You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jo...@apache.org on 2017/01/19 21:12:20 UTC
[1/3] ambari git commit: AMBARI-19617 - Restarting Some Components
During a Suspended Upgrade Fails Due To Missing Upgrade Parameters
(jonathanhurley)
Repository: ambari
Updated Branches:
refs/heads/trunk de8bf6019 -> d540f943d
http://git-wip-us.apache.org/repos/asf/ambari/blob/d540f943/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
index 0d1a2fa..7be9419 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
@@ -85,6 +85,7 @@ import com.google.inject.Binder;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.Module;
+import com.google.inject.assistedinject.FactoryModuleBuilder;
import com.google.inject.persist.PersistService;
import com.google.inject.util.Modules;
@@ -136,6 +137,7 @@ public class UpgradeHelperTest {
};
MockModule mockModule = new MockModule();
+
// create an injector which will inject the mocks
injector = Guice.createInjector(Modules.override(injectorModule).with(mockModule));
injector.getInstance(GuiceJpaInitializer.class);
@@ -199,9 +201,9 @@ public class UpgradeHelperTest {
Cluster cluster = makeCluster();
- UpgradeContext context = new UpgradeContext(cluster, UpgradeType.ROLLING, Direction.UPGRADE, null);
- context.setSourceAndTargetStacks(HDP_21, HDP_21);
- context.setVersion(UPGRADE_VERSION);
+ UpgradeContext context = m_upgradeContextFactory.create(cluster, UpgradeType.ROLLING,
+ Direction.UPGRADE, UPGRADE_VERSION, new HashMap<String, Object>());
+
context.setResolver(m_masterHostResolver);
List<UpgradeGroupHolder> groups = m_upgradeHelper.createSequence(upgrade, context);
@@ -280,9 +282,8 @@ public class UpgradeHelperTest {
Cluster cluster = makeCluster();
- UpgradeContext context = new UpgradeContext(cluster, UpgradeType.ROLLING, Direction.UPGRADE, null);
- context.setSourceAndTargetStacks(HDP_21, HDP_21);
- context.setVersion(UPGRADE_VERSION);
+ UpgradeContext context = m_upgradeContextFactory.create(cluster, UpgradeType.ROLLING,
+ Direction.UPGRADE, UPGRADE_VERSION, new HashMap<String, Object>());
context.setResolver(m_masterHostResolver);
context.setSupportedServices(Collections.singleton("ZOOKEEPER"));
context.setScope(UpgradeScope.PARTIAL);
@@ -339,9 +340,9 @@ public class UpgradeHelperTest {
Cluster cluster = makeCluster();
- UpgradeContext context = new UpgradeContext(cluster, UpgradeType.ROLLING, Direction.UPGRADE, null);
- context.setSourceAndTargetStacks(HDP_21, HDP_21);
- context.setVersion(UPGRADE_VERSION);
+ UpgradeContext context = m_upgradeContextFactory.create(cluster, UpgradeType.ROLLING,
+ Direction.UPGRADE, UPGRADE_VERSION, new HashMap<String, Object>());
+
context.setResolver(m_masterHostResolver);
context.setSupportedServices(Collections.singleton("ZOOKEEPER"));
context.setScope(UpgradeScope.COMPLETE);
@@ -396,9 +397,8 @@ public class UpgradeHelperTest {
Cluster cluster = makeCluster();
- UpgradeContext context = new UpgradeContext(cluster, UpgradeType.ROLLING, Direction.UPGRADE, null);
- context.setSourceAndTargetStacks(HDP_21, HDP_21);
- context.setVersion(UPGRADE_VERSION);
+ UpgradeContext context = m_upgradeContextFactory.create(cluster, UpgradeType.ROLLING,
+ Direction.UPGRADE, UPGRADE_VERSION, new HashMap<String, Object>());
context.setResolver(m_masterHostResolver);
List<UpgradeGroupHolder> groups = m_upgradeHelper.createSequence(upgrade, context);
@@ -450,9 +450,8 @@ public class UpgradeHelperTest {
// use a "real" master host resolver here so that we can actually test MM
MasterHostResolver masterHostResolver = new MasterHostResolver(null, cluster, "");
- UpgradeContext context = new UpgradeContext(cluster, UpgradeType.ROLLING, Direction.UPGRADE, null);
- context.setSourceAndTargetStacks(HDP_21, HDP_21);
- context.setVersion(UPGRADE_VERSION);
+ UpgradeContext context = m_upgradeContextFactory.create(cluster, UpgradeType.ROLLING,
+ Direction.UPGRADE, UPGRADE_VERSION, new HashMap<String, Object>());
context.setResolver(masterHostResolver);
List<UpgradeGroupHolder> groups = m_upgradeHelper.createSequence(upgrade, context);
@@ -483,9 +482,8 @@ public class UpgradeHelperTest {
Cluster cluster = makeCluster();
- UpgradeContext context = new UpgradeContext(cluster, UpgradeType.ROLLING, Direction.UPGRADE, null);
- context.setSourceAndTargetStacks(HDP_21, HDP_21);
- context.setVersion(UPGRADE_VERSION);
+ UpgradeContext context = m_upgradeContextFactory.create(cluster, UpgradeType.ROLLING,
+ Direction.UPGRADE, UPGRADE_VERSION, new HashMap<String, Object>());
context.setResolver(m_masterHostResolver);
List<UpgradeGroupHolder> groups = m_upgradeHelper.createSequence(upgrade, context);
@@ -495,7 +493,7 @@ public class UpgradeHelperTest {
UpgradeGroupHolder mastersGroup = groups.get(2);
assertEquals("CORE_MASTER", mastersGroup.name);
- List<String> orderedNameNodes = new LinkedList<String>();
+ List<String> orderedNameNodes = new LinkedList<>();
for (StageWrapper sw : mastersGroup.items) {
if (sw.getType().equals(StageWrapper.Type.RESTART) && sw.getText().toLowerCase().contains("NameNode".toLowerCase())) {
for (TaskWrapper tw : sw.getTasks()) {
@@ -534,9 +532,8 @@ public class UpgradeHelperTest {
assertEquals(1, schs.size());
assertEquals(HostState.HEARTBEAT_LOST, schs.get(0).getHostState());
- UpgradeContext context = new UpgradeContext(cluster, UpgradeType.ROLLING, Direction.UPGRADE, null);
- context.setSourceAndTargetStacks(HDP_21, HDP_21);
- context.setVersion(UPGRADE_VERSION);
+ UpgradeContext context = m_upgradeContextFactory.create(cluster, UpgradeType.ROLLING,
+ Direction.UPGRADE, UPGRADE_VERSION, new HashMap<String, Object>());
context.setResolver(m_masterHostResolver);
List<UpgradeGroupHolder> groups = m_upgradeHelper.createSequence(upgrade, context);
@@ -573,9 +570,8 @@ public class UpgradeHelperTest {
Cluster cluster = makeCluster();
- UpgradeContext context = new UpgradeContext(cluster, UpgradeType.ROLLING, Direction.DOWNGRADE, null);
- context.setSourceAndTargetStacks(HDP_21, HDP_21);
- context.setVersion(DOWNGRADE_VERSION);
+ UpgradeContext context = m_upgradeContextFactory.create(cluster, UpgradeType.ROLLING,
+ Direction.DOWNGRADE, DOWNGRADE_VERSION, new HashMap<String, Object>());
context.setResolver(m_masterHostResolver);
List<UpgradeGroupHolder> groups = m_upgradeHelper.createSequence(upgrade, context);
@@ -615,9 +611,8 @@ public class UpgradeHelperTest {
Cluster cluster = makeCluster();
- UpgradeContext context = new UpgradeContext(cluster, UpgradeType.ROLLING, Direction.UPGRADE, null);
- context.setSourceAndTargetStacks(HDP_21, HDP_21);
- context.setVersion(UPGRADE_VERSION);
+ UpgradeContext context = m_upgradeContextFactory.create(cluster, UpgradeType.ROLLING,
+ Direction.UPGRADE, UPGRADE_VERSION, new HashMap<String, Object>());
context.setResolver(m_masterHostResolver);
List<UpgradeGroupHolder> groups = m_upgradeHelper.createSequence(upgrade, context);
@@ -647,9 +642,8 @@ public class UpgradeHelperTest {
Cluster cluster = makeCluster();
- UpgradeContext context = new UpgradeContext(cluster, UpgradeType.ROLLING, Direction.UPGRADE, null);
- context.setSourceAndTargetStacks(HDP_21, HDP_21);
- context.setVersion(UPGRADE_VERSION);
+ UpgradeContext context = m_upgradeContextFactory.create(cluster, UpgradeType.ROLLING,
+ Direction.UPGRADE, UPGRADE_VERSION, new HashMap<String, Object>());
context.setResolver(m_masterHostResolver);
List<UpgradeGroupHolder> groups = m_upgradeHelper.createSequence(upgrade, context);
@@ -677,9 +671,8 @@ public class UpgradeHelperTest {
Cluster cluster = makeCluster();
- UpgradeContext context = new UpgradeContext(cluster, UpgradeType.ROLLING, Direction.UPGRADE, null);
- context.setSourceAndTargetStacks(HDP_21, HDP_21);
- context.setVersion(UPGRADE_VERSION);
+ UpgradeContext context = m_upgradeContextFactory.create(cluster, UpgradeType.ROLLING,
+ Direction.UPGRADE, UPGRADE_VERSION, new HashMap<String, Object>());
context.setResolver(m_masterHostResolver);
List<UpgradeGroupHolder> groups = m_upgradeHelper.createSequence(upgrade, context);
@@ -692,7 +685,7 @@ public class UpgradeHelperTest {
ConfigureTask configureTask = (ConfigureTask) hiveGroup.items.get(1).getTasks().get(0).getTasks().get(0);
// now change the thrift port to http to have the 2nd condition invoked
- Map<String, String> hiveConfigs = new HashMap<String, String>();
+ Map<String, String> hiveConfigs = new HashMap<>();
hiveConfigs.put("hive.server2.transport.mode", "http");
hiveConfigs.put("hive.server2.thrift.port", "10001");
hiveConfigs.put("condition", "1");
@@ -750,9 +743,8 @@ public class UpgradeHelperTest {
Cluster cluster = makeCluster();
- UpgradeContext context = new UpgradeContext(cluster, UpgradeType.ROLLING, Direction.UPGRADE, null);
- context.setSourceAndTargetStacks(HDP_21, HDP_21);
- context.setVersion(UPGRADE_VERSION);
+ UpgradeContext context = m_upgradeContextFactory.create(cluster, UpgradeType.ROLLING,
+ Direction.UPGRADE, UPGRADE_VERSION, new HashMap<String, Object>());
context.setResolver(m_masterHostResolver);
List<UpgradeGroupHolder> groups = m_upgradeHelper.createSequence(upgrade,
@@ -863,9 +855,8 @@ public class UpgradeHelperTest {
Cluster cluster = makeCluster();
- UpgradeContext context = new UpgradeContext(cluster, UpgradeType.ROLLING, Direction.UPGRADE, null);
- context.setSourceAndTargetStacks(HDP_21, HDP_21);
- context.setVersion(UPGRADE_VERSION);
+ UpgradeContext context = m_upgradeContextFactory.create(cluster, UpgradeType.ROLLING,
+ Direction.UPGRADE, UPGRADE_VERSION, new HashMap<String, Object>());
context.setResolver(m_masterHostResolver);
List<UpgradeGroupHolder> groups = m_upgradeHelper.createSequence(upgrade,
@@ -927,9 +918,8 @@ public class UpgradeHelperTest {
Cluster cluster = makeCluster();
- UpgradeContext context = new UpgradeContext(cluster, UpgradeType.ROLLING, Direction.UPGRADE, null);
- context.setSourceAndTargetStacks(HDP_21, HDP_21);
- context.setVersion(UPGRADE_VERSION);
+ UpgradeContext context = m_upgradeContextFactory.create(cluster, UpgradeType.ROLLING,
+ Direction.UPGRADE, UPGRADE_VERSION, new HashMap<String, Object>());
context.setResolver(m_masterHostResolver);
List<UpgradeGroupHolder> groups = m_upgradeHelper.createSequence(upgrade,
@@ -947,7 +937,7 @@ public class UpgradeHelperTest {
assertEquals(configProperties.get(ConfigureTask.PARAMETER_CONFIG_TYPE), "hive-site");
// now set the property in the if-check in the set element so that we have a match
- Map<String, String> hiveConfigs = new HashMap<String, String>();
+ Map<String, String> hiveConfigs = new HashMap<>();
hiveConfigs.put("fooKey", "THIS-BETTER-CHANGE");
hiveConfigs.put("ifFooKey", "ifFooValue");
ConfigurationRequest configurationRequest = new ConfigurationRequest();
@@ -994,9 +984,8 @@ public class UpgradeHelperTest {
assertNotNull(upgrade);
Cluster cluster = makeCluster();
- UpgradeContext context = new UpgradeContext(cluster, UpgradeType.ROLLING, Direction.UPGRADE, null);
- context.setSourceAndTargetStacks(HDP_21, HDP_21);
- context.setVersion(UPGRADE_VERSION);
+ UpgradeContext context = m_upgradeContextFactory.create(cluster, UpgradeType.ROLLING,
+ Direction.UPGRADE, UPGRADE_VERSION, new HashMap<String, Object>());
context.setResolver(m_masterHostResolver);
List<UpgradeGroupHolder> groups = m_upgradeHelper.createSequence(upgrade, context);
@@ -1072,9 +1061,8 @@ public class UpgradeHelperTest {
numServiceChecksExpected++;
}
- UpgradeContext context = new UpgradeContext(c, UpgradeType.ROLLING, Direction.UPGRADE, null);
- context.setSourceAndTargetStacks(HDP_21, HDP_22);
- context.setVersion(UPGRADE_VERSION);
+ UpgradeContext context = m_upgradeContextFactory.create(c, UpgradeType.ROLLING,
+ Direction.UPGRADE, UPGRADE_VERSION, new HashMap<String, Object>());
context.setResolver(m_masterHostResolver);
List<UpgradeGroupHolder> groups = m_upgradeHelper.createSequence(upgrade, context);
@@ -1119,9 +1107,8 @@ public class UpgradeHelperTest {
Cluster cluster = makeCluster();
- UpgradeContext context = new UpgradeContext(cluster, UpgradeType.ROLLING, Direction.DOWNGRADE, null);
- context.setSourceAndTargetStacks(HDP_21, HDP_21);
- context.setVersion(DOWNGRADE_VERSION);
+ UpgradeContext context = m_upgradeContextFactory.create(cluster, UpgradeType.ROLLING,
+ Direction.DOWNGRADE, DOWNGRADE_VERSION, new HashMap<String, Object>());
context.setResolver(m_masterHostResolver);
List<UpgradeGroupHolder> groups = m_upgradeHelper.createSequence(upgrade, context);
@@ -1156,9 +1143,8 @@ public class UpgradeHelperTest {
Cluster cluster = makeCluster();
- UpgradeContext context = new UpgradeContext(cluster, UpgradeType.ROLLING, Direction.UPGRADE, null);
- context.setSourceAndTargetStacks(HDP_21, HDP_21);
- context.setVersion(UPGRADE_VERSION);
+ UpgradeContext context = m_upgradeContextFactory.create(cluster, UpgradeType.ROLLING,
+ Direction.UPGRADE, UPGRADE_VERSION, new HashMap<String, Object>());
context.setResolver(m_masterHostResolver);
List<UpgradeGroupHolder> groups = m_upgradeHelper.createSequence(upgrade, context);
@@ -1242,13 +1228,17 @@ public class UpgradeHelperTest {
StackId stackId = new StackId("HDP-2.1.1");
StackId stackId2 = new StackId("HDP-2.2.0");
+
clusters.addCluster(clusterName, stackId);
Cluster c = clusters.getCluster(clusterName);
helper.getOrCreateRepositoryVersion(stackId,
c.getDesiredStackVersion().getStackVersion());
+
helper.getOrCreateRepositoryVersion(stackId2,"2.2.0");
+ helper.getOrCreateRepositoryVersion(stackId2, UPGRADE_VERSION);
+
c.createClusterVersion(stackId,
c.getDesiredStackVersion().getStackVersion(), "admin",
RepositoryVersionState.INSTALLING);
@@ -1258,7 +1248,7 @@ public class UpgradeHelperTest {
clusters.addHost(hostName);
Host host = clusters.getHost(hostName);
- Map<String, String> hostAttributes = new HashMap<String, String>();
+ Map<String, String> hostAttributes = new HashMap<>();
hostAttributes.put("os_family", "redhat");
hostAttributes.put("os_release_version", "6");
@@ -1312,7 +1302,7 @@ public class UpgradeHelperTest {
sc.addServiceComponentHost("h3");
// set some desired configs
- Map<String, String> hiveConfigs = new HashMap<String, String>();
+ Map<String, String> hiveConfigs = new HashMap<>();
hiveConfigs.put("hive.server2.transport.mode", "binary");
hiveConfigs.put("hive.server2.thrift.port", "10001");
@@ -1404,9 +1394,8 @@ public class UpgradeHelperTest {
Cluster cluster = makeCluster();
- UpgradeContext context = new UpgradeContext(cluster, UpgradeType.ROLLING, Direction.UPGRADE, null);
- context.setSourceAndTargetStacks(HDP_21, HDP_21);
- context.setVersion(UPGRADE_VERSION);
+ UpgradeContext context = m_upgradeContextFactory.create(cluster, UpgradeType.ROLLING,
+ Direction.UPGRADE, UPGRADE_VERSION, new HashMap<String, Object>());
context.setResolver(m_masterHostResolver);
List<UpgradeGroupHolder> groups = m_upgradeHelper.createSequence(upgrade, context);
@@ -1476,7 +1465,7 @@ public class UpgradeHelperTest {
clusters.addHost(hostName);
Host host = clusters.getHost(hostName);
- Map<String, String> hostAttributes = new HashMap<String, String>();
+ Map<String, String> hostAttributes = new HashMap<>();
hostAttributes.put("os_family", "redhat");
hostAttributes.put("os_release_version", "6");
@@ -1505,9 +1494,8 @@ public class UpgradeHelperTest {
expect(m_masterHostResolver.getCluster()).andReturn(c).anyTimes();
replay(m_masterHostResolver);
- UpgradeContext context = new UpgradeContext(c, UpgradeType.ROLLING, Direction.DOWNGRADE, null);
- context.setSourceAndTargetStacks(HDP_21, HDP_21);
- context.setVersion(DOWNGRADE_VERSION);
+ UpgradeContext context = m_upgradeContextFactory.create(c, UpgradeType.ROLLING,
+ Direction.DOWNGRADE, DOWNGRADE_VERSION, new HashMap<String, Object>());
context.setResolver(m_masterHostResolver);
Map<String, UpgradePack> upgrades = ambariMetaInfo.getUpgradePacks("HDP", "2.1.1");
@@ -1557,7 +1545,7 @@ public class UpgradeHelperTest {
clusters.addHost(hostName);
Host host = clusters.getHost(hostName);
- Map<String, String> hostAttributes = new HashMap<String, String>();
+ Map<String, String> hostAttributes = new HashMap<>();
hostAttributes.put("os_family", "redhat");
hostAttributes.put("os_release_version", "6");
@@ -1622,7 +1610,7 @@ public class UpgradeHelperTest {
clusters.addHost(hostName);
Host host = clusters.getHost(hostName);
- Map<String, String> hostAttributes = new HashMap<String, String>();
+ Map<String, String> hostAttributes = new HashMap<>();
hostAttributes.put("os_family", "redhat");
hostAttributes.put("os_release_version", "6");
@@ -1688,7 +1676,7 @@ public class UpgradeHelperTest {
clusters.addHost(hostName);
Host host = clusters.getHost(hostName);
- Map<String, String> hostAttributes = new HashMap<String, String>();
+ Map<String, String> hostAttributes = new HashMap<>();
hostAttributes.put("os_family", "redhat");
hostAttributes.put("os_release_version", "6");
@@ -1755,9 +1743,9 @@ public class UpgradeHelperTest {
assertEquals(upgradeType, upgradePack.getType());
// get an upgrade
- UpgradeContext context = new UpgradeContext(cluster, UpgradeType.ROLLING, Direction.UPGRADE, null);
- context.setSourceAndTargetStacks(HDP_21, HDP_21);
- context.setVersion(UPGRADE_VERSION);
+ UpgradeContext context = m_upgradeContextFactory.create(cluster, UpgradeType.ROLLING,
+ Direction.UPGRADE, UPGRADE_VERSION, new HashMap<String, Object>());
+
context.setResolver(m_masterHostResolver);
context.setSupportedServices(Collections.singleton("ZOOKEEPER"));
@@ -1808,7 +1796,7 @@ public class UpgradeHelperTest {
clusters.addHost(hostName);
Host host = clusters.getHost(hostName);
- Map<String, String> hostAttributes = new HashMap<String, String>();
+ Map<String, String> hostAttributes = new HashMap<>();
hostAttributes.put("os_family", "redhat");
hostAttributes.put("os_release_version", "6");
@@ -1858,9 +1846,8 @@ public class UpgradeHelperTest {
MasterHostResolver resolver = new MasterHostResolver(m_configHelper, c);
- UpgradeContext context = new UpgradeContext(c, UpgradeType.NON_ROLLING, Direction.UPGRADE, null);
- context.setSourceAndTargetStacks(stackId, stackId2);
- context.setVersion("2.2.0");
+ UpgradeContext context = m_upgradeContextFactory.create(c, UpgradeType.NON_ROLLING,
+ Direction.UPGRADE, "2.2.0", new HashMap<String, Object>());
context.setResolver(resolver);
List<UpgradeGroupHolder> groups = m_upgradeHelper.createSequence(upgradePack, context);
@@ -1871,9 +1858,8 @@ public class UpgradeHelperTest {
sch2.setVersion("2.1.1");
resolver = new MasterHostResolver(m_configHelper, c, "2.1.1");
- context = new UpgradeContext(c, UpgradeType.NON_ROLLING, Direction.DOWNGRADE, null);
- context.setSourceAndTargetStacks(stackId2, stackId);
- context.setVersion("2.1.1");
+ context = m_upgradeContextFactory.create(c, UpgradeType.NON_ROLLING, Direction.DOWNGRADE,
+ "2.1.1", new HashMap<String, Object>());
context.setResolver(resolver);
groups = m_upgradeHelper.createSequence(upgradePack, context);
@@ -1914,7 +1900,7 @@ public class UpgradeHelperTest {
clusters.addHost(hostName);
Host host = clusters.getHost(hostName);
- Map<String, String> hostAttributes = new HashMap<String, String>();
+ Map<String, String> hostAttributes = new HashMap<>();
hostAttributes.put("os_family", "redhat");
hostAttributes.put("os_release_version", "6");
@@ -1960,11 +1946,9 @@ public class UpgradeHelperTest {
MasterHostResolver resolver = new MasterHostResolver(m_configHelper, c);
UpgradeContext context = m_upgradeContextFactory.create(c, UpgradeType.HOST_ORDERED,
- Direction.UPGRADE, new HashMap<String, Object>());
+ Direction.UPGRADE, "2.2.0", new HashMap<String, Object>());
context.setResolver(resolver);
- context.setSourceAndTargetStacks(stackId, stackId2);
- context.setVersion("2.2.0");
List<UpgradeGroupHolder> groups = m_upgradeHelper.createSequence(upgradePack, context);
assertEquals(1, groups.size());
@@ -2003,11 +1987,9 @@ public class UpgradeHelperTest {
resolver = new MasterHostResolver(m_configHelper, c, "2.1.1");
m_upgradeContextFactory.create(c, UpgradeType.HOST_ORDERED, Direction.DOWNGRADE,
- new HashMap<String, Object>());
+ "2.1.1", new HashMap<String, Object>());
context.setResolver(resolver);
- context.setSourceAndTargetStacks(stackId2, stackId);
- context.setVersion("2.1.1");
groups = m_upgradeHelper.createSequence(upgradePack, context);
assertEquals(1, groups.size());
@@ -2020,11 +2002,9 @@ public class UpgradeHelperTest {
resolver = new MasterHostResolver(m_configHelper, c, "2.1.1");
m_upgradeContextFactory.create(c, UpgradeType.HOST_ORDERED, Direction.DOWNGRADE,
- new HashMap<String, Object>());
+ "2.1.1", new HashMap<String, Object>());
context.setResolver(resolver);
- context.setSourceAndTargetStacks(stackId2, stackId);
- context.setVersion("2.1.1");
groups = m_upgradeHelper.createSequence(upgradePack, context);
assertEquals(1, groups.size());
@@ -2046,9 +2026,9 @@ public class UpgradeHelperTest {
Cluster cluster = makeCluster();
- UpgradeContext context = new UpgradeContext(cluster, UpgradeType.ROLLING, Direction.UPGRADE, null);
- context.setSourceAndTargetStacks(HDP_22, HDP_22);
- context.setVersion(UPGRADE_VERSION);
+ UpgradeContext context = m_upgradeContextFactory.create(cluster, UpgradeType.ROLLING,
+ Direction.UPGRADE, UPGRADE_VERSION, new HashMap<String, Object>());
+
context.setResolver(m_masterHostResolver);
// initially, no conditions should be met
@@ -2056,7 +2036,7 @@ public class UpgradeHelperTest {
assertEquals(0, groups.size());
// set the configuration property and try again
- Map<String, String> fooConfigs = new HashMap<String, String>();
+ Map<String, String> fooConfigs = new HashMap<>();
fooConfigs.put("foo-property", "foo-value");
ConfigurationRequest configurationRequest = new ConfigurationRequest();
configurationRequest.setClusterName(cluster.getClusterName());
@@ -2124,6 +2104,7 @@ public class UpgradeHelperTest {
@Override
public void configure(Binder binder) {
+ binder.install(new FactoryModuleBuilder().build(UpgradeContextFactory.class));
binder.bind(ConfigHelper.class).toInstance(m_configHelper);
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/d540f943/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterEffectiveVersionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterEffectiveVersionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterEffectiveVersionTest.java
index d01249d..9d339e2 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterEffectiveVersionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterEffectiveVersionTest.java
@@ -59,6 +59,7 @@ import org.apache.ambari.server.state.ServiceComponentFactory;
import org.apache.ambari.server.state.ServiceComponentHostFactory;
import org.apache.ambari.server.state.ServiceFactory;
import org.apache.ambari.server.state.ServiceInfo;
+import org.apache.ambari.server.state.UpgradeContextFactory;
import org.apache.ambari.server.state.configgroup.ConfigGroupFactory;
import org.apache.ambari.server.state.scheduler.RequestExecutionFactory;
import org.apache.ambari.server.state.stack.OsFamily;
@@ -249,6 +250,7 @@ public class ClusterEffectiveVersionTest extends EasyMockSupport {
*/
@Override
public void configure(Binder binder) {
+ binder.bind(UpgradeContextFactory.class).toInstance(EasyMock.createNiceMock(UpgradeContextFactory.class));
binder.bind(Clusters.class).toInstance(EasyMock.createNiceMock(Clusters.class));
binder.bind(OsFamily.class).toInstance(EasyMock.createNiceMock(OsFamily.class));
binder.bind(DBAccessor.class).toInstance(EasyMock.createNiceMock(DBAccessor.class));
http://git-wip-us.apache.org/repos/asf/ambari/blob/d540f943/ambari-server/src/test/java/org/apache/ambari/server/state/stack/upgrade/StageWrapperBuilderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/stack/upgrade/StageWrapperBuilderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/stack/upgrade/StageWrapperBuilderTest.java
index f7f8325..b9e27be 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/stack/upgrade/StageWrapperBuilderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/stack/upgrade/StageWrapperBuilderTest.java
@@ -19,22 +19,29 @@ package org.apache.ambari.server.state.stack.upgrade;
import java.util.ArrayList;
import java.util.Collections;
+import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
import org.apache.ambari.server.serveraction.upgrades.AutoSkipFailedSummaryAction;
import org.apache.ambari.server.stack.HostsType;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.StackId;
import org.apache.ambari.server.state.UpgradeContext;
import org.apache.ambari.server.state.stack.UpgradePack.ProcessingComponent;
import org.easymock.EasyMock;
+import org.easymock.EasyMockSupport;
import org.junit.Assert;
import org.junit.Test;
/**
* Tests the {@link StageWrapperBuilder}.
*/
-public class StageWrapperBuilderTest {
+public class StageWrapperBuilderTest extends EasyMockSupport {
+ private static final StackId HDP_21 = new StackId("HDP-2.1.1");
/**
* Tests that the various build methods of a builder are invoked in the
@@ -44,7 +51,21 @@ public class StageWrapperBuilderTest {
*/
@Test
public void testBuildOrder() throws Exception {
- UpgradeContext upgradeContext = new UpgradeContext(null, UpgradeType.ROLLING, Direction.UPGRADE, null);
+ Cluster cluster = createNiceMock(Cluster.class);
+ EasyMock.expect(cluster.getCurrentStackVersion()).andReturn(HDP_21).atLeastOnce();
+ EasyMock.expect(cluster.getDesiredStackVersion()).andReturn(HDP_21).anyTimes();
+
+ RepositoryVersionEntity repoVersionEntity = createNiceMock(RepositoryVersionEntity.class);
+ EasyMock.expect(repoVersionEntity.getStackId()).andReturn(HDP_21).anyTimes();
+
+ RepositoryVersionDAO repoVersionDAO = createNiceMock(RepositoryVersionDAO.class);
+ EasyMock.expect(repoVersionDAO.findByStackNameAndVersion(EasyMock.anyString(),
+ EasyMock.anyString())).andReturn(repoVersionEntity).anyTimes();
+
+ replayAll();
+
+ UpgradeContext upgradeContext = new UpgradeContext(cluster, UpgradeType.ROLLING,
+ Direction.UPGRADE, HDP_21.toString(), new HashMap<String, Object>(), repoVersionDAO);
MockStageWrapperBuilder builder = new MockStageWrapperBuilder(null);
List<StageWrapper> stageWrappers = builder.build(upgradeContext);
@@ -56,6 +77,8 @@ public class StageWrapperBuilderTest {
// nothing happened, so this should be empty
Assert.assertTrue(stageWrappers.isEmpty());
+
+ verifyAll();
}
/**
@@ -66,7 +89,22 @@ public class StageWrapperBuilderTest {
*/
@Test
public void testAutoSkipCheckInserted() throws Exception {
- UpgradeContext upgradeContext = new UpgradeContext(null, UpgradeType.ROLLING, Direction.UPGRADE, null);
+ Cluster cluster = createNiceMock(Cluster.class);
+ EasyMock.expect(cluster.getCurrentStackVersion()).andReturn(HDP_21).atLeastOnce();
+ EasyMock.expect(cluster.getDesiredStackVersion()).andReturn(HDP_21).anyTimes();
+
+ RepositoryVersionEntity repoVersionEntity = createNiceMock(RepositoryVersionEntity.class);
+ EasyMock.expect(repoVersionEntity.getStackId()).andReturn(HDP_21).anyTimes();
+
+ RepositoryVersionDAO repoVersionDAO = createNiceMock(RepositoryVersionDAO.class);
+ EasyMock.expect(repoVersionDAO.findByStackNameAndVersion(EasyMock.anyString(),
+ EasyMock.anyString())).andReturn(repoVersionEntity).anyTimes();
+
+ replayAll();
+
+ UpgradeContext upgradeContext = new UpgradeContext(cluster, UpgradeType.ROLLING,
+ Direction.UPGRADE, HDP_21.toString(), new HashMap<String, Object>(), repoVersionDAO);
+
upgradeContext.setAutoSkipComponentFailures(true);
upgradeContext.setAutoSkipServiceCheckFailures(true);
@@ -89,6 +127,8 @@ public class StageWrapperBuilderTest {
ServerActionTask task = (ServerActionTask)(skipSummaryWrapper.getTasks().get(0).getTasks().get(0));
Assert.assertEquals(AutoSkipFailedSummaryAction.class.getName(), task.implClass);
+
+ verifyAll();
}
/**
[2/3] ambari git commit: AMBARI-19617 - Restarting Some Components
During a Suspended Upgrade Fails Due To Missing Upgrade Parameters
(jonathanhurley)
Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/d540f943/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
index 1d51b0d..71fb5d9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
@@ -17,7 +17,10 @@
*/
package org.apache.ambari.server.state;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.VERSION;
+
import java.util.ArrayList;
+import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
@@ -27,7 +30,11 @@ import java.util.Set;
import org.apache.ambari.annotations.Experimental;
import org.apache.ambari.annotations.ExperimentalFeature;
import org.apache.ambari.server.actionmanager.HostRoleCommandFactory;
+import org.apache.ambari.server.agent.ExecutionCommand.KeyNames;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
+import org.apache.ambari.server.orm.entities.UpgradeEntity;
import org.apache.ambari.server.stack.MasterHostResolver;
import org.apache.ambari.server.stageplanner.RoleGraphFactory;
import org.apache.ambari.server.state.stack.UpgradePack;
@@ -36,14 +43,42 @@ import org.apache.ambari.server.state.stack.upgrade.Grouping;
import org.apache.ambari.server.state.stack.upgrade.UpgradeScope;
import org.apache.ambari.server.state.stack.upgrade.UpgradeType;
+import com.google.gson.Gson;
+import com.google.gson.JsonElement;
import com.google.inject.Inject;
import com.google.inject.assistedinject.Assisted;
+import com.google.inject.assistedinject.AssistedInject;
/**
* Used to hold various helper objects required to process an upgrade pack.
*/
public class UpgradeContext {
+ public static final String COMMAND_PARAM_VERSION = VERSION;
+ public static final String COMMAND_PARAM_CLUSTER_NAME = "clusterName";
+ public static final String COMMAND_PARAM_DIRECTION = "upgrade_direction";
+ public static final String COMMAND_PARAM_UPGRADE_PACK = "upgrade_pack";
+ public static final String COMMAND_PARAM_REQUEST_ID = "request_id";
+
+ public static final String COMMAND_PARAM_UPGRADE_TYPE = "upgrade_type";
+ public static final String COMMAND_PARAM_TASKS = "tasks";
+ public static final String COMMAND_PARAM_STRUCT_OUT = "structured_out";
+ public static final String COMMAND_DOWNGRADE_FROM_VERSION = "downgrade_from_version";
+
+ /**
+ * The original "current" stack of the cluster before the upgrade started.
+ * This is the same regardless of whether the current direction is
+ * {@link Direction#UPGRADE} or {@link Direction#DOWNGRADE}.
+ */
+ public static final String COMMAND_PARAM_ORIGINAL_STACK = "original_stack";
+
+ /**
+ * The target upgrade stack before the upgrade started. This is the same
+ * regardless of whether the current direction is {@link Direction#UPGRADE} or
+ * {@link Direction#DOWNGRADE}.
+ */
+ public static final String COMMAND_PARAM_TARGET_STACK = "target_stack";
+
/**
* The cluster that the upgrade is for.
*/
@@ -72,7 +107,7 @@ public class UpgradeContext {
/**
* The version being upgrade to or downgraded to.
*/
- private String m_version;
+ private final String m_version;
/**
* The original "current" stack of the cluster before the upgrade started.
@@ -98,9 +133,9 @@ public class UpgradeContext {
private MasterHostResolver m_resolver;
private AmbariMetaInfo m_metaInfo;
- private List<ServiceComponentHost> m_unhealthy = new ArrayList<ServiceComponentHost>();
- private Map<String, String> m_serviceNames = new HashMap<String, String>();
- private Map<String, String> m_componentNames = new HashMap<String, String>();
+ private List<ServiceComponentHost> m_unhealthy = new ArrayList<>();
+ private Map<String, String> m_serviceNames = new HashMap<>();
+ private Map<String, String> m_componentNames = new HashMap<>();
private String m_downgradeFromVersion = null;
/**
@@ -141,6 +176,17 @@ public class UpgradeContext {
private RoleGraphFactory m_roleGraphFactory;
/**
+ * Used to lookup the reposotory version given a stack name and version.
+ */
+ final private RepositoryVersionDAO m_repoVersionDAO;
+
+ /**
+ * Used for serializing the upgrade type.
+ */
+ @Inject
+ private Gson m_gson;
+
+ /**
* Constructor.
*
* @param cluster
@@ -151,14 +197,57 @@ public class UpgradeContext {
* the direction for the upgrade
* @param upgradeRequestMap
* the original map of paramters used to create the upgrade
+ *
+ * @param repoVersionDAO
+ * the repository version DAO.
*/
- @Inject
+ @AssistedInject
public UpgradeContext(@Assisted Cluster cluster, @Assisted UpgradeType type,
- @Assisted Direction direction, @Assisted Map<String, Object> upgradeRequestMap) {
+ @Assisted Direction direction, @Assisted String version,
+ @Assisted Map<String, Object> upgradeRequestMap,
+ RepositoryVersionDAO repoVersionDAO) {
+ m_repoVersionDAO = repoVersionDAO;
m_cluster = cluster;
m_type = type;
m_direction = direction;
+ m_version = version;
m_upgradeRequestMap = upgradeRequestMap;
+
+ // sets the original/target stacks - requires direction and cluster
+ setSourceAndTargetStacks();
+ }
+
+ /**
+ * Constructor.
+ *
+ * @param cluster
+ * the cluster that the upgrade is for
+ * @param upgradeEntity
+ * the upgrade entity
+ * @param repoVersionDAO
+ * the repository version DAO.
+ */
+ @AssistedInject
+ public UpgradeContext(@Assisted Cluster cluster, @Assisted UpgradeEntity upgradeEntity,
+ RepositoryVersionDAO repoVersionDAO) {
+ m_repoVersionDAO = repoVersionDAO;
+
+ m_cluster = cluster;
+ m_type = upgradeEntity.getUpgradeType();
+ m_direction = upgradeEntity.getDirection();
+
+ m_version = upgradeEntity.getToVersion();
+
+ // sets the original/target stacks - requires direction and cluster
+ setSourceAndTargetStacks();
+
+ if (m_direction == Direction.DOWNGRADE) {
+ m_downgradeFromVersion = upgradeEntity.getFromVersion();
+ }
+
+ // since this constructor is initialized from an entity, then this map is
+ // not present
+ m_upgradeRequestMap = Collections.emptyMap();
}
/**
@@ -166,24 +255,38 @@ public class UpgradeContext {
* stack ID based on the already-set {@link UpgradeType} and
* {@link Direction}.
*
- * @param sourceStackId
- * the original "current" stack of the cluster before the upgrade
- * started. This is the same regardless of whether the current
- * direction is {@link Direction#UPGRADE} or
- * {@link Direction#DOWNGRADE} (not {@code null}).
- * @param targetStackId
- * The target upgrade stack before the upgrade started. This is the
- * same regardless of whether the current direction is
- * {@link Direction#UPGRADE} or {@link Direction#DOWNGRADE} (not
- * {@code null}).
- *
* @see #getEffectiveStackId()
*/
- public void setSourceAndTargetStacks(StackId sourceStackId, StackId targetStackId) {
+ private void setSourceAndTargetStacks() {
+ StackId sourceStackId = null;
+
+ // taret stack will not always be what it is today - tagging as experimental
+ @Experimental(feature = ExperimentalFeature.PATCH_UPGRADES)
+ StackId targetStackId = null;
+
+ switch (m_direction) {
+ case UPGRADE:
+ sourceStackId = m_cluster.getCurrentStackVersion();
+
+ RepositoryVersionEntity targetRepositoryVersion = m_repoVersionDAO.findByStackNameAndVersion(
+ sourceStackId.getStackName(), m_version);
+
+ // !!! TODO check the repo_version for patch-ness and restrict the
+ // context to those services that require it. Consult the version
+ // definition and add the service names to supportedServices
+ targetStackId = targetRepositoryVersion.getStackId();
+ break;
+ case DOWNGRADE:
+ sourceStackId = m_cluster.getCurrentStackVersion();
+ targetStackId = m_cluster.getDesiredStackVersion();
+ break;
+ }
+
m_originalStackId = sourceStackId;
switch (m_type) {
case ROLLING:
+ case HOST_ORDERED:
m_effectiveStackId = targetStackId;
break;
case NON_ROLLING:
@@ -244,14 +347,6 @@ public class UpgradeContext {
}
/**
- * @param version
- * the target version to upgrade to
- */
- public void setVersion(String version) {
- m_version = version;
- }
-
- /**
* @return the direction of the upgrade
*/
public Direction getDirection() {
@@ -521,4 +616,48 @@ public class UpgradeContext {
public HostRoleCommandFactory getHostRoleCommandFactory() {
return m_hrcFactory;
}
+
+ /**
+ * Gets a map initialized with parameters required for upgrades to work. The
+ * following properties are already set:
+ * <ul>
+ * <li>{@link #COMMAND_PARAM_CLUSTER_NAME}
+ * <li>{@link #COMMAND_PARAM_VERSION}
+ * <li>{@link #COMMAND_PARAM_DIRECTION}
+ * <li>{@link #COMMAND_PARAM_ORIGINAL_STACK}
+ * <li>{@link #COMMAND_PARAM_TARGET_STACK}
+ * <li>{@link #COMMAND_DOWNGRADE_FROM_VERSION}
+ * <li>{@link #COMMAND_PARAM_UPGRADE_TYPE}
+ * <li>{@link KeyNames#REFRESH_CONFIG_TAGS_BEFORE_EXECUTION} - necessary in
+ * order to have the commands contain the correct configurations. Otherwise,
+ * they will contain the configurations that were available at the time the
+ * command was created. For upgrades, this is problematic since the commands
+ * are all created ahead of time, but the upgrade may change configs as part
+ * of the upgrade pack.</li>
+ * <ul>
+ *
+ * @return the initialized parameter map.
+ */
+ public Map<String, String> getInitializedCommandParameters() {
+ Map<String, String> parameters = new HashMap<>();
+
+ parameters.put(COMMAND_PARAM_CLUSTER_NAME, m_cluster.getClusterName());
+ parameters.put(COMMAND_PARAM_VERSION, getVersion());
+ parameters.put(COMMAND_PARAM_DIRECTION, getDirection().name().toLowerCase());
+ parameters.put(COMMAND_PARAM_ORIGINAL_STACK, getOriginalStackId().getStackId());
+ parameters.put(COMMAND_PARAM_TARGET_STACK, getTargetStackId().getStackId());
+ parameters.put(COMMAND_DOWNGRADE_FROM_VERSION, getDowngradeFromVersion());
+
+ if (null != getType()) {
+ // use the serialized attributes of the enum to convert it to a string,
+ // but first we must convert it into an element so that we don't get a
+ // quoted string - using toString() actually returns a quoted stirng which
+ // is bad
+ JsonElement json = m_gson.toJsonTree(getType());
+ parameters.put(COMMAND_PARAM_UPGRADE_TYPE, json.getAsString());
+ }
+
+ parameters.put(KeyNames.REFRESH_CONFIG_TAGS_BEFORE_EXECUTION, "true");
+ return parameters;
+ }
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/d540f943/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContextFactory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContextFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContextFactory.java
index 4b988e8..4f15ee2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContextFactory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContextFactory.java
@@ -19,6 +19,7 @@ package org.apache.ambari.server.state;
import java.util.Map;
+import org.apache.ambari.server.orm.entities.UpgradeEntity;
import org.apache.ambari.server.state.stack.upgrade.Direction;
import org.apache.ambari.server.state.stack.upgrade.UpgradeType;
@@ -32,16 +33,32 @@ public interface UpgradeContextFactory {
* Creates an {@link UpgradeContext} which is injected with dependencies.
*
* @param cluster
- * the cluster that the upgrade is for
+ * the cluster that the upgrade is for (not {@code null}).
* @param type
- * the type of upgrade, either rolling or non_rolling
+ * the type of upgrade, either rolling or non_rolling (not
+ * {@code null}).
* @param direction
* the direction for the upgrade
+ * @param version
+ * the version being upgrade-to or downgraded-from (not
+ * {@code null}).
* @param upgradeRequestMap
- * the original map of paramters used to create the upgrade
+ * the original map of parameters used to create the upgrade (not
+ * {@code null}).
*
* @return an initialized {@link UpgradeContext}.
*/
UpgradeContext create(Cluster cluster, UpgradeType type, Direction direction,
- Map<String, Object> upgradeRequestMap);
+ String version, Map<String, Object> upgradeRequestMap);
+
+ /**
+ * Creates an {@link UpgradeContext} which is injected with dependencies.
+ *
+ * @param cluster
+ * the cluster that the upgrade is for (not {@code null}).
+ * @param upgradeEntity
+ * the upgrade entity (not {@code null}).
+ * @return an initialized {@link UpgradeContext}.
+ */
+ UpgradeContext create(Cluster cluster, UpgradeEntity upgradeEntity);
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/d540f943/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
index 7b0b696..0381e38 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
@@ -52,6 +52,7 @@ import org.apache.ambari.server.ServiceComponentHostNotFoundException;
import org.apache.ambari.server.ServiceComponentNotFoundException;
import org.apache.ambari.server.ServiceNotFoundException;
import org.apache.ambari.server.actionmanager.HostRoleStatus;
+import org.apache.ambari.server.agent.ExecutionCommand.KeyNames;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
import org.apache.ambari.server.configuration.Configuration;
import org.apache.ambari.server.controller.AmbariSessionManager;
@@ -136,6 +137,8 @@ import org.apache.ambari.server.state.ServiceInfo;
import org.apache.ambari.server.state.StackId;
import org.apache.ambari.server.state.StackInfo;
import org.apache.ambari.server.state.State;
+import org.apache.ambari.server.state.UpgradeContext;
+import org.apache.ambari.server.state.UpgradeContextFactory;
import org.apache.ambari.server.state.configgroup.ConfigGroup;
import org.apache.ambari.server.state.configgroup.ConfigGroupFactory;
import org.apache.ambari.server.state.fsm.InvalidStateTransitionException;
@@ -316,6 +319,15 @@ public class ClusterImpl implements Cluster {
private RoleCommandOrderProvider roleCommandOrderProvider;
/**
+ * Used to create instances of {@link UpgradeContext} with injected
+ * dependencies. The {@link UpgradeContext} is used to populate the command
+ * with upgrade information on the command/role maps if the upgrade is
+ * suspended.
+ */
+ @Inject
+ private UpgradeContextFactory upgradeContextFactory;
+
+ /**
* A simple cache for looking up {@code cluster-env} properties for a cluster.
* This map is changed whenever {{cluster-env}} is changed and we receive a
* {@link ClusterConfigChangedEvent}.
@@ -511,7 +523,7 @@ public class ClusterImpl implements Cluster {
@Override
public Map<Long, ConfigGroup> getConfigGroupsByHostname(String hostname)
throws AmbariException {
- Map<Long, ConfigGroup> configGroups = new HashMap<Long, ConfigGroup>();
+ Map<Long, ConfigGroup> configGroups = new HashMap<>();
for (Entry<Long, ConfigGroup> groupEntry : clusterConfigGroups.entrySet()) {
Long id = groupEntry.getKey();
@@ -788,15 +800,15 @@ public class ClusterImpl implements Cluster {
public List<ServiceComponentHost> getServiceComponentHosts(String hostname) {
List<ServiceComponentHost> serviceComponentHosts = serviceComponentHostsByHost.get(hostname);
if (null != serviceComponentHosts) {
- return new CopyOnWriteArrayList<ServiceComponentHost>(serviceComponentHosts);
+ return new CopyOnWriteArrayList<>(serviceComponentHosts);
}
- return new ArrayList<ServiceComponentHost>();
+ return new ArrayList<>();
}
@Override
public Map<String, Set<String>> getServiceComponentHostMap(Set<String> hostNames, Set<String> serviceNames) {
- Map<String, Set<String>> componentHostMap = new HashMap<String, Set<String>>();
+ Map<String, Set<String>> componentHostMap = new HashMap<>();
Collection<Host> hosts = getHosts();
@@ -816,7 +828,7 @@ public class ClusterImpl implements Cluster {
Set<String> componentHosts = componentHostMap.get(component);
if (componentHosts == null) {
- componentHosts = new HashSet<String>();
+ componentHosts = new HashSet<>();
componentHostMap.put(component, componentHosts);
}
@@ -833,7 +845,7 @@ public class ClusterImpl implements Cluster {
@Override
public List<ServiceComponentHost> getServiceComponentHosts(String serviceName, String componentName) {
- ArrayList<ServiceComponentHost> foundItems = new ArrayList<ServiceComponentHost>();
+ ArrayList<ServiceComponentHost> foundItems = new ArrayList<>();
ConcurrentMap<String, ConcurrentMap<String, ServiceComponentHost>> foundByService = serviceComponentHosts.get(
serviceName);
@@ -884,7 +896,7 @@ public class ClusterImpl implements Cluster {
@Override
public Map<String, Service> getServices() {
- return new HashMap<String, Service>(services);
+ return new HashMap<>(services);
}
@Override
@@ -1138,7 +1150,7 @@ public class ClusterImpl implements Cluster {
StackEntity repoVersionStackEntity = currentClusterVersion.getRepositoryVersion().getStack();
StackId repoVersionStackId = new StackId(repoVersionStackEntity);
- Map<String, HostVersionEntity> existingHostToHostVersionEntity = new HashMap<String, HostVersionEntity>();
+ Map<String, HostVersionEntity> existingHostToHostVersionEntity = new HashMap<>();
List<HostVersionEntity> existingHostVersionEntities = hostVersionDAO.findByClusterStackAndVersion(
getClusterName(), repoVersionStackId,
currentClusterVersion.getRepositoryVersion().getVersion());
@@ -1206,8 +1218,8 @@ public class ClusterImpl implements Cluster {
}
Map<String, Host> hosts = clusters.getHostsForCluster(getClusterName());
- Set<String> existingHostsWithClusterStackAndVersion = new HashSet<String>();
- HashMap<String, HostVersionEntity> existingHostStackVersions = new HashMap<String, HostVersionEntity>();
+ Set<String> existingHostsWithClusterStackAndVersion = new HashSet<>();
+ HashMap<String, HostVersionEntity> existingHostStackVersions = new HashMap<>();
clusterGlobalLock.writeLock().lock();
try {
@@ -1417,15 +1429,15 @@ public class ClusterImpl implements Cluster {
}
// Part 2, check for transitions.
- Set<String> hostsWithoutHostVersion = new HashSet<String>();
- Map<RepositoryVersionState, Set<String>> stateToHosts = new HashMap<RepositoryVersionState, Set<String>>();
+ Set<String> hostsWithoutHostVersion = new HashSet<>();
+ Map<RepositoryVersionState, Set<String>> stateToHosts = new HashMap<>();
//hack until better hostversion integration into in-memory cluster structure
List<HostVersionEntity> hostVersionEntities =
hostVersionDAO.findByClusterStackAndVersion(getClusterName(), stackId, version);
- Set<String> hostsWithState = new HashSet<String>();
+ Set<String> hostsWithState = new HashSet<>();
Set<String> hostsInMaintenanceState = new HashSet<>();
for (HostVersionEntity hostVersionEntity : hostVersionEntities) {
String hostname = hostVersionEntity.getHostEntity().getHostName();
@@ -1440,7 +1452,7 @@ public class ClusterImpl implements Cluster {
if (stateToHosts.containsKey(hostState)) {
stateToHosts.get(hostState).add(hostname);
} else {
- Set<String> hostsInState = new HashSet<String>();
+ Set<String> hostsInState = new HashSet<>();
hostsInState.add(hostname);
stateToHosts.put(hostState, hostsInState);
}
@@ -1655,7 +1667,7 @@ public class ClusterImpl implements Cluster {
@Transactional
public void transitionClusterVersion(StackId stackId, String version,
RepositoryVersionState state) throws AmbariException {
- Set<RepositoryVersionState> allowedStates = new HashSet<RepositoryVersionState>();
+ Set<RepositoryVersionState> allowedStates = new HashSet<>();
clusterGlobalLock.writeLock().lock();
try {
ClusterEntity clusterEntity = getClusterEntity();
@@ -1924,7 +1936,7 @@ public class ClusterImpl implements Cluster {
public Collection<Config> getAllConfigs() {
clusterGlobalLock.readLock().lock();
try {
- List<Config> list = new ArrayList<Config>();
+ List<Config> list = new ArrayList<>();
for (Entry<String, ConcurrentMap<String, Config>> entry : allConfigs.entrySet()) {
for (Config config : entry.getValue().values()) {
list.add(config);
@@ -2211,14 +2223,14 @@ public class ClusterImpl implements Cluster {
}
// TODO AMBARI-10679, need efficient caching from hostId to hostName...
- Map<Long, String> hostIdToName = new HashMap<Long, String>();
+ Map<Long, String> hostIdToName = new HashMap<>();
if (!map.isEmpty()) {
Map<String, List<HostConfigMapping>> hostMappingsByType =
hostConfigMappingDAO.findSelectedHostsByTypes(clusterId, types);
for (Entry<String, Set<DesiredConfig>> entry : map.entrySet()) {
- List<DesiredConfig.HostOverride> hostOverrides = new ArrayList<DesiredConfig.HostOverride>();
+ List<DesiredConfig.HostOverride> hostOverrides = new ArrayList<>();
for (HostConfigMapping mappingEntity : hostMappingsByType.get(entry.getKey())) {
if (!hostIdToName.containsKey(mappingEntity.getHostId())) {
@@ -2258,7 +2270,7 @@ public class ClusterImpl implements Cluster {
if (configGroup != null) {
serviceConfigEntity.setGroupId(configGroup.getId());
Collection<Config> configs = configGroup.getConfigurations().values();
- List<ClusterConfigEntity> configEntities = new ArrayList<ClusterConfigEntity>(
+ List<ClusterConfigEntity> configEntities = new ArrayList<>(
configs.size());
for (Config config : configs) {
configEntities.add(
@@ -2284,7 +2296,7 @@ public class ClusterImpl implements Cluster {
serviceConfigDAO.create(serviceConfigEntity);
if (configGroup != null) {
- serviceConfigEntity.setHostIds(new ArrayList<Long>(configGroup.getHosts().keySet()));
+ serviceConfigEntity.setHostIds(new ArrayList<>(configGroup.getHosts().keySet()));
serviceConfigEntity = serviceConfigDAO.merge(serviceConfigEntity);
}
} finally {
@@ -2360,7 +2372,7 @@ public class ClusterImpl implements Cluster {
public Map<String, Collection<ServiceConfigVersionResponse>> getActiveServiceConfigVersions() {
clusterGlobalLock.readLock().lock();
try {
- Map<String, Collection<ServiceConfigVersionResponse>> map = new HashMap<String, Collection<ServiceConfigVersionResponse>>();
+ Map<String, Collection<ServiceConfigVersionResponse>> map = new HashMap<>();
Set<ServiceConfigVersionResponse> responses = getActiveServiceConfigVersionSet();
for (ServiceConfigVersionResponse response : responses) {
@@ -2380,7 +2392,7 @@ public class ClusterImpl implements Cluster {
public List<ServiceConfigVersionResponse> getServiceConfigVersions() {
clusterGlobalLock.readLock().lock();
try {
- List<ServiceConfigVersionResponse> serviceConfigVersionResponses = new ArrayList<ServiceConfigVersionResponse>();
+ List<ServiceConfigVersionResponse> serviceConfigVersionResponses = new ArrayList<>();
List<ServiceConfigEntity> serviceConfigs = serviceConfigDAO.getServiceConfigs(getClusterId());
@@ -2436,7 +2448,7 @@ public class ClusterImpl implements Cluster {
}
private Set<ServiceConfigVersionResponse> getActiveServiceConfigVersionSet() {
- Set<ServiceConfigVersionResponse> responses = new HashSet<ServiceConfigVersionResponse>();
+ Set<ServiceConfigVersionResponse> responses = new HashSet<>();
List<ServiceConfigEntity> activeServiceConfigVersions = getActiveServiceConfigVersionEntities();
for (ServiceConfigEntity lastServiceConfig : activeServiceConfigVersions) {
@@ -2449,7 +2461,7 @@ public class ClusterImpl implements Cluster {
private List<ServiceConfigEntity> getActiveServiceConfigVersionEntities() {
- List<ServiceConfigEntity> activeServiceConfigVersions = new ArrayList<ServiceConfigEntity>();
+ List<ServiceConfigEntity> activeServiceConfigVersions = new ArrayList<>();
//for services
activeServiceConfigVersions.addAll(serviceConfigDAO.getLastServiceConfigs(getClusterId()));
//for config groups
@@ -2465,8 +2477,8 @@ public class ClusterImpl implements Cluster {
public List<ServiceConfigVersionResponse> getActiveServiceConfigVersionResponse(String serviceName) {
clusterGlobalLock.readLock().lock();
try {
- List<ServiceConfigEntity> activeServiceConfigVersionEntities = new ArrayList<ServiceConfigEntity>();
- List<ServiceConfigVersionResponse> activeServiceConfigVersionResponses = new ArrayList<ServiceConfigVersionResponse>();
+ List<ServiceConfigEntity> activeServiceConfigVersionEntities = new ArrayList<>();
+ List<ServiceConfigVersionResponse> activeServiceConfigVersionResponses = new ArrayList<>();
activeServiceConfigVersionEntities.addAll(serviceConfigDAO.getLastServiceConfigsForService(getClusterId(), serviceName));
for (ServiceConfigEntity serviceConfigEntity : activeServiceConfigVersionEntities) {
ServiceConfigVersionResponse serviceConfigVersionResponse = getServiceConfigVersionResponseWithConfig(convertToServiceConfigVersionResponse(serviceConfigEntity), serviceConfigEntity);
@@ -2560,14 +2572,14 @@ public class ClusterImpl implements Cluster {
Long configGroupId = serviceConfigEntity.getGroupId();
ConfigGroup configGroup = clusterConfigGroups.get(configGroupId);
if (configGroup != null) {
- Map<String, Config> groupDesiredConfigs = new HashMap<String, Config>();
+ Map<String, Config> groupDesiredConfigs = new HashMap<>();
for (ClusterConfigEntity entity : serviceConfigEntity.getClusterConfigEntities()) {
Config config = allConfigs.get(entity.getType()).get(entity.getTag());
groupDesiredConfigs.put(config.getType(), config);
}
configGroup.setConfigurations(groupDesiredConfigs);
- Map<Long, Host> groupDesiredHosts = new HashMap<Long, Host>();
+ Map<Long, Host> groupDesiredHosts = new HashMap<>();
if (serviceConfigEntity.getHostIds() != null) {
for (Long hostId : serviceConfigEntity.getHostIds()) {
Host host = clusters.getHostById(hostId);
@@ -2716,7 +2728,7 @@ public class ClusterImpl implements Cluster {
Set<HostConfigMapping> mappingEntities =
hostConfigMappingDAO.findSelectedByHosts(hostIds);
- Map<Long, Map<String, DesiredConfig>> desiredConfigsByHost = new HashMap<Long, Map<String, DesiredConfig>>();
+ Map<Long, Map<String, DesiredConfig>> desiredConfigsByHost = new HashMap<>();
for (Long hostId : hostIds) {
desiredConfigsByHost.put(hostId, new HashMap<String, DesiredConfig>());
@@ -2782,7 +2794,7 @@ public class ClusterImpl implements Cluster {
@Transactional
protected Map<ServiceComponentHostEvent, String> processServiceComponentHostEventsInSingleTransaction(
ListMultimap<String, ServiceComponentHostEvent> eventMap) {
- Map<ServiceComponentHostEvent, String> failedEvents = new HashMap<ServiceComponentHostEvent, String>();
+ Map<ServiceComponentHostEvent, String> failedEvents = new HashMap<>();
for (Entry<String, ServiceComponentHostEvent> entry : eventMap.entries()) {
String serviceName = entry.getKey();
@@ -3016,7 +3028,7 @@ public class ClusterImpl implements Cluster {
@Override
public void addSessionAttributes(Map<String, Object> attributes) {
if (attributes != null && !attributes.isEmpty()) {
- Map<String, Object> sessionAttributes = new HashMap<String, Object>(getSessionAttributes());
+ Map<String, Object> sessionAttributes = new HashMap<>(getSessionAttributes());
sessionAttributes.putAll(attributes);
setSessionAttributes(attributes);
}
@@ -3025,7 +3037,7 @@ public class ClusterImpl implements Cluster {
@Override
public void setSessionAttribute(String key, Object value){
if (key != null && !key.isEmpty()) {
- Map<String, Object> sessionAttributes = new HashMap<String, Object>(getSessionAttributes());
+ Map<String, Object> sessionAttributes = new HashMap<>(getSessionAttributes());
sessionAttributes.put(key, value);
setSessionAttributes(sessionAttributes);
}
@@ -3034,7 +3046,7 @@ public class ClusterImpl implements Cluster {
@Override
public void removeSessionAttribute(String key) {
if (key != null && !key.isEmpty()) {
- Map<String, Object> sessionAttributes = new HashMap<String, Object>(getSessionAttributes());
+ Map<String, Object> sessionAttributes = new HashMap<>(getSessionAttributes());
sessionAttributes.remove(key);
setSessionAttributes(sessionAttributes);
}
@@ -3174,7 +3186,7 @@ public class ClusterImpl implements Cluster {
List<ClusterConfigMappingEntity> clusterConfigMappingsForStack = clusterDAO.getClusterConfigMappingsByStack(
clusterId, stackId);
- Map<String, ClusterConfigMappingEntity> latestMappingsByType = new HashMap<String, ClusterConfigMappingEntity>();
+ Map<String, ClusterConfigMappingEntity> latestMappingsByType = new HashMap<>();
for (ClusterConfigMappingEntity mapping : clusterConfigMappingsForStack) {
String type = mapping.getType();
@@ -3227,7 +3239,7 @@ public class ClusterImpl implements Cluster {
// this will keep track of cluster config mappings that need removal
// since there is no relationship between configs and their mappings, we
// have to do it manually
- List<ClusterConfigEntity> removedClusterConfigs = new ArrayList<ClusterConfigEntity>(50);
+ List<ClusterConfigEntity> removedClusterConfigs = new ArrayList<>(50);
Collection<ClusterConfigEntity> clusterConfigEntities = clusterEntity.getClusterConfigEntities();
List<ServiceConfigEntity> serviceConfigs = serviceConfigDAO.getAllServiceConfigsForClusterAndStack(
@@ -3489,4 +3501,28 @@ public class ClusterImpl implements Cluster {
public RoleCommandOrder getRoleCommandOrder() {
return roleCommandOrderProvider.getRoleCommandOrder(this);
}
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void addSuspendedUpgradeParameters(Map<String, String> commandParams,
+ Map<String, String> roleParams) {
+
+ // build some command params from the upgrade, including direction,
+ // type, version, etc
+ UpgradeEntity suspendedUpgrade = getUpgradeInProgress();
+ if( null == suspendedUpgrade ){
+ LOG.warn(
+ "An upgrade is not currently suspended. The command and role parameters will not be modified.");
+
+ return;
+ }
+
+ UpgradeContext upgradeContext = upgradeContextFactory.create(this, suspendedUpgrade);
+ commandParams.putAll(upgradeContext.getInitializedCommandParameters());
+
+ // suspended goes in role params
+ roleParams.put(KeyNames.UPGRADE_SUSPENDED, Boolean.TRUE.toString().toLowerCase());
+ }
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/d540f943/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java
index 17b1e27..8ae192b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java
@@ -44,6 +44,7 @@ import org.apache.ambari.server.orm.dao.HostRoleCommandDAO;
import org.apache.ambari.server.security.SecurityHelper;
import org.apache.ambari.server.security.SecurityHelperImpl;
import org.apache.ambari.server.stack.StackManagerFactory;
+import org.apache.ambari.server.stageplanner.RoleGraphFactory;
import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.Config;
@@ -58,6 +59,7 @@ import org.apache.ambari.server.state.ServiceComponentHostFactory;
import org.apache.ambari.server.state.ServiceComponentImpl;
import org.apache.ambari.server.state.ServiceFactory;
import org.apache.ambari.server.state.ServiceImpl;
+import org.apache.ambari.server.state.UpgradeContextFactory;
import org.apache.ambari.server.state.cluster.ClusterFactory;
import org.apache.ambari.server.state.cluster.ClusterImpl;
import org.apache.ambari.server.state.configgroup.ConfigGroup;
@@ -322,6 +324,8 @@ public class AgentResourceTest extends RandomPortJerseyTest {
}
private void installDependencies() {
+ install(new FactoryModuleBuilder().build(UpgradeContextFactory.class));
+ install(new FactoryModuleBuilder().build(RoleGraphFactory.class));
install(new FactoryModuleBuilder().implement(
Cluster.class, ClusterImpl.class).build(ClusterFactory.class));
install(new FactoryModuleBuilder().implement(
http://git-wip-us.apache.org/repos/asf/ambari/blob/d540f943/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
index 9693f98..4e5d055 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
@@ -110,6 +110,7 @@ import org.apache.ambari.server.state.ServiceComponent;
import org.apache.ambari.server.state.ServiceComponentHost;
import org.apache.ambari.server.state.StackId;
import org.apache.ambari.server.state.State;
+import org.apache.ambari.server.state.UpgradeContextFactory;
import org.apache.ambari.server.state.cluster.ClusterFactory;
import org.apache.ambari.server.state.host.HostFactory;
import org.apache.ambari.server.state.kerberos.KerberosComponentDescriptor;
@@ -221,6 +222,9 @@ public class KerberosHelperTest extends EasyMockSupport {
@Override
protected void configure() {
+ install(new FactoryModuleBuilder().build(UpgradeContextFactory.class));
+ install(new FactoryModuleBuilder().build(RoleGraphFactory.class));
+
bind(EntityManager.class).toInstance(createNiceMock(EntityManager.class));
bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
bind(ClusterFactory.class).toInstance(createNiceMock(ClusterFactory.class));
@@ -234,7 +238,6 @@ public class KerberosHelperTest extends EasyMockSupport {
bind(RequestFactory.class).toInstance(createNiceMock(RequestFactory.class));
bind(StageFactory.class).toInstance(createNiceMock(StageFactory.class));
bind(Clusters.class).toInstance(createNiceMock(Clusters.class));
- install(new FactoryModuleBuilder().build(RoleGraphFactory.class));
bind(ConfigHelper.class).toInstance(createNiceMock(ConfigHelper.class));
bind(KerberosOperationHandlerFactory.class).toInstance(kerberosOperationHandlerFactory);
bind(ClusterController.class).toInstance(clusterController);
@@ -1583,7 +1586,7 @@ public class KerberosHelperTest extends EasyMockSupport {
hostInvalid = null;
}
- Map<String, ServiceComponentHost> map = new HashMap<String, ServiceComponentHost>();
+ Map<String, ServiceComponentHost> map = new HashMap<>();
final ServiceComponent serviceComponentKerberosClient = createNiceMock(ServiceComponent.class);
map.put("host1", schKerberosClient);
expect(serviceComponentKerberosClient.getName()).andReturn(Role.KERBEROS_CLIENT.name()).anyTimes();
@@ -1922,7 +1925,7 @@ public class KerberosHelperTest extends EasyMockSupport {
identityDescriptor2
)).times(1);
expect(serviceDescriptor1.getComponent("COMPONENT1")).andReturn(componentDescriptor1).times(1);
- expect(serviceDescriptor1.getAuthToLocalProperties()).andReturn(new HashSet<String>(Arrays.asList(
+ expect(serviceDescriptor1.getAuthToLocalProperties()).andReturn(new HashSet<>(Arrays.asList(
"default",
"explicit_multiple_lines|new_lines",
"explicit_multiple_lines_escaped|new_lines_escaped",
@@ -1941,7 +1944,7 @@ public class KerberosHelperTest extends EasyMockSupport {
Map<String, Set<String>> installedServices = Collections.singletonMap("SERVICE1", Collections.singleton("COMPONENT1"));
- Map<String, Map<String, String>> kerberosConfigurations = new HashMap<String, Map<String, String>>();
+ Map<String, Map<String, String>> kerberosConfigurations = new HashMap<>();
replayAll();
@@ -2191,7 +2194,7 @@ public class KerberosHelperTest extends EasyMockSupport {
Collections.<KerberosIdentityDescriptor>emptyList());
expect(serviceDescriptor3.getComponent("COMPONENT3A")).andReturn(componentDescriptor3a).times(4);
- final Map<String, String> kerberosDescriptorProperties = new HashMap<String, String>();
+ final Map<String, String> kerberosDescriptorProperties = new HashMap<>();
kerberosDescriptorProperties.put("realm", "${kerberos-env/realm}");
final KerberosDescriptor kerberosDescriptor = createMock(KerberosDescriptor.class);
@@ -2218,7 +2221,7 @@ public class KerberosHelperTest extends EasyMockSupport {
RecommendationResponse.BlueprintConfigurations service1SiteRecommendation = createNiceMock(RecommendationResponse.BlueprintConfigurations.class);
expect(service1SiteRecommendation.getProperties()).andReturn(Collections.singletonMap("component1b.property", "replaced value"));
- Map<String, RecommendationResponse.BlueprintConfigurations> configurations = new HashMap<String, RecommendationResponse.BlueprintConfigurations>();
+ Map<String, RecommendationResponse.BlueprintConfigurations> configurations = new HashMap<>();
configurations.put("core-site", coreSiteRecommendation);
configurations.put("new-type", newTypeRecommendation);
configurations.put("type1", type1Recommendation);
@@ -2285,16 +2288,16 @@ public class KerberosHelperTest extends EasyMockSupport {
}
});
- Map<String, Service> services = new HashMap<String, Service>();
+ Map<String, Service> services = new HashMap<>();
services.put("SERVICE1", service1);
services.put("SERVICE2", service2);
services.put("SERVICE3", service3);
- Map<String, Set<String>> serviceComponentHostMap = new HashMap<String, Set<String>>();
+ Map<String, Set<String>> serviceComponentHostMap = new HashMap<>();
serviceComponentHostMap.put("COMPONENT1A", Collections.singleton("hostA"));
- serviceComponentHostMap.put("COMPONENT1B", new HashSet<String>(Arrays.asList("hostB", "hostC")));
+ serviceComponentHostMap.put("COMPONENT1B", new HashSet<>(Arrays.asList("hostB", "hostC")));
serviceComponentHostMap.put("COMPONENT2A", Collections.singleton("hostA"));
- serviceComponentHostMap.put("COMPONENT2B", new HashSet<String>(Arrays.asList("hostB", "hostC")));
+ serviceComponentHostMap.put("COMPONENT2B", new HashSet<>(Arrays.asList("hostB", "hostC")));
serviceComponentHostMap.put("COMPONEN3A", Collections.singleton("hostA"));
final Cluster cluster = createMockCluster("c1", hosts, SecurityType.KERBEROS, krb5ConfConfig, kerberosEnvConfig);
@@ -2335,20 +2338,20 @@ public class KerberosHelperTest extends EasyMockSupport {
// Needed by infrastructure
injector.getInstance(AmbariMetaInfo.class).init();
- HashMap<String, Set<String>> installedServices1 = new HashMap<String, Set<String>>();
- installedServices1.put("SERVICE1", new HashSet<String>(Arrays.asList("COMPONENT1A", "COMPONENT1B")));
- installedServices1.put("SERVICE2", new HashSet<String>(Arrays.asList("COMPONENT2A", "COMPONENT2B")));
+ HashMap<String, Set<String>> installedServices1 = new HashMap<>();
+ installedServices1.put("SERVICE1", new HashSet<>(Arrays.asList("COMPONENT1A", "COMPONENT1B")));
+ installedServices1.put("SERVICE2", new HashSet<>(Arrays.asList("COMPONENT2A", "COMPONENT2B")));
installedServices1.put("SERVICE3", Collections.singleton("COMPONENT3A"));
Map<String, Map<String, String>> updates1 = kerberosHelper.getServiceConfigurationUpdates(
cluster, existingConfigurations, installedServices1, null, null, true, true);
- HashMap<String, Set<String>> installedServices2 = new HashMap<String, Set<String>>();
- installedServices2.put("SERVICE1", new HashSet<String>(Arrays.asList("COMPONENT1A", "COMPONENT1B")));
+ HashMap<String, Set<String>> installedServices2 = new HashMap<>();
+ installedServices2.put("SERVICE1", new HashSet<>(Arrays.asList("COMPONENT1A", "COMPONENT1B")));
installedServices2.put("SERVICE3", Collections.singleton("COMPONENT3A"));
- Map<String, Collection<String>> serviceFilter2 = new HashMap<String, Collection<String>>();
- serviceFilter2.put("SERVICE1", new HashSet<String>(Arrays.asList("COMPONENT1A", "COMPONENT1B")));
+ Map<String, Collection<String>> serviceFilter2 = new HashMap<>();
+ serviceFilter2.put("SERVICE1", new HashSet<>(Arrays.asList("COMPONENT1A", "COMPONENT1B")));
serviceFilter2.put("SERVICE3", Collections.singleton("COMPONENT3A"));
Map<String, Map<String, String>> updates2 = kerberosHelper.getServiceConfigurationUpdates(
@@ -2495,9 +2498,9 @@ public class KerberosHelperTest extends EasyMockSupport {
ambariServerPrincipalNameExpected = String.format("ambari-server-%s@%s", clusterName, realm);
}
- Map<String, String> propertiesKrb5Conf = new HashMap<String, String>();
+ Map<String, String> propertiesKrb5Conf = new HashMap<>();
- Map<String, String> propertiesKerberosEnv = new HashMap<String, String>();
+ Map<String, String> propertiesKerberosEnv = new HashMap<>();
propertiesKerberosEnv.put("realm", realm);
propertiesKerberosEnv.put("kdc_type", "mit-kdc");
propertiesKerberosEnv.put("password_length", "20");
@@ -2518,35 +2521,35 @@ public class KerberosHelperTest extends EasyMockSupport {
Host host2 = createMockHost("host3");
Host host3 = createMockHost("host2");
- Map<String, ServiceComponentHost> service1Component1HostMap = new HashMap<String, ServiceComponentHost>();
+ Map<String, ServiceComponentHost> service1Component1HostMap = new HashMap<>();
service1Component1HostMap.put("host1", createMockServiceComponentHost(State.INSTALLED));
- Map<String, ServiceComponentHost> service2Component1HostMap = new HashMap<String, ServiceComponentHost>();
+ Map<String, ServiceComponentHost> service2Component1HostMap = new HashMap<>();
service2Component1HostMap.put("host2", createMockServiceComponentHost(State.INSTALLED));
- Map<String, ServiceComponent> service1ComponentMap = new HashMap<String, ServiceComponent>();
+ Map<String, ServiceComponent> service1ComponentMap = new HashMap<>();
service1ComponentMap.put("COMPONENT11", createMockComponent("COMPONENT11", true, service1Component1HostMap));
- Map<String, ServiceComponent> service2ComponentMap = new HashMap<String, ServiceComponent>();
+ Map<String, ServiceComponent> service2ComponentMap = new HashMap<>();
service2ComponentMap.put("COMPONENT21", createMockComponent("COMPONENT21", true, service2Component1HostMap));
Service service1 = createMockService("SERVICE1", service1ComponentMap);
Service service2 = createMockService("SERVICE2", service2ComponentMap);
- Map<String, Service> servicesMap = new HashMap<String, Service>();
+ Map<String, Service> servicesMap = new HashMap<>();
servicesMap.put("SERVICE1", service1);
servicesMap.put("SERVICE2", service2);
Cluster cluster = createMockCluster(clusterName, Arrays.asList(host1, host2, host3), SecurityType.KERBEROS, configKrb5Conf, configKerberosEnv);
expect(cluster.getServices()).andReturn(servicesMap).times(1);
- Map<String, String> kerberosDescriptorProperties = new HashMap<String, String>();
+ Map<String, String> kerberosDescriptorProperties = new HashMap<>();
kerberosDescriptorProperties.put("additional_realms", "");
kerberosDescriptorProperties.put("keytab_dir", "/etc/security/keytabs");
kerberosDescriptorProperties.put("realm", "${kerberos-env/realm}");
kerberosDescriptorProperties.put("principal_suffix", "-${cluster_name|toLower()}");
- ArrayList<KerberosIdentityDescriptor> service1Component1Identities = new ArrayList<KerberosIdentityDescriptor>();
+ ArrayList<KerberosIdentityDescriptor> service1Component1Identities = new ArrayList<>();
service1Component1Identities.add(createMockIdentityDescriptor(
"s1c1_1.user",
createMockPrincipalDescriptor("s1c1_1@${realm}", KerberosPrincipalType.USER, "s1c1", null),
@@ -2558,10 +2561,10 @@ public class KerberosHelperTest extends EasyMockSupport {
createMockKeytabDescriptor("s1c1_1.service.keytab", null)
));
- HashMap<String, KerberosComponentDescriptor> service1ComponentDescriptorMap = new HashMap<String, KerberosComponentDescriptor>();
+ HashMap<String, KerberosComponentDescriptor> service1ComponentDescriptorMap = new HashMap<>();
service1ComponentDescriptorMap.put("COMPONENT11", createMockComponentDescriptor("COMPONENT11", service1Component1Identities, null));
- List<KerberosIdentityDescriptor> service1Identities = new ArrayList<KerberosIdentityDescriptor>();
+ List<KerberosIdentityDescriptor> service1Identities = new ArrayList<>();
service1Identities.add(createMockIdentityDescriptor(
"s1_1.user",
createMockPrincipalDescriptor("s1_1@${realm}", KerberosPrincipalType.USER, "s1", null),
@@ -2575,7 +2578,7 @@ public class KerberosHelperTest extends EasyMockSupport {
KerberosServiceDescriptor service1KerberosDescriptor = createMockServiceDescriptor("SERVICE1", service1ComponentDescriptorMap, service1Identities);
- ArrayList<KerberosIdentityDescriptor> service2Component1Identities = new ArrayList<KerberosIdentityDescriptor>();
+ ArrayList<KerberosIdentityDescriptor> service2Component1Identities = new ArrayList<>();
service2Component1Identities.add(createMockIdentityDescriptor(
"s2_1.user",
createMockPrincipalDescriptor("s2_1@${realm}", KerberosPrincipalType.USER, "s2", null),
@@ -2587,7 +2590,7 @@ public class KerberosHelperTest extends EasyMockSupport {
createMockKeytabDescriptor("s2c1_1.service.keytab", null)
));
- HashMap<String, KerberosComponentDescriptor> service2ComponentDescriptorMap = new HashMap<String, KerberosComponentDescriptor>();
+ HashMap<String, KerberosComponentDescriptor> service2ComponentDescriptorMap = new HashMap<>();
service2ComponentDescriptorMap.put("COMPONENT21", createMockComponentDescriptor("COMPONENT21", service2Component1Identities, null));
KerberosServiceDescriptor service2KerberosDescriptor = createMockServiceDescriptor("SERVICE2", service2ComponentDescriptorMap, null);
@@ -2600,7 +2603,7 @@ public class KerberosHelperTest extends EasyMockSupport {
if (createAmbariIdentities) {
String spnegoPrincipalNameExpected = String.format("HTTP/%s@%s", ambariServerHostname, realm);
- ArrayList<KerberosIdentityDescriptor> ambarServerComponent1Identities = new ArrayList<KerberosIdentityDescriptor>();
+ ArrayList<KerberosIdentityDescriptor> ambarServerComponent1Identities = new ArrayList<>();
ambarServerComponent1Identities.add(createMockIdentityDescriptor(
KerberosHelper.AMBARI_SERVER_KERBEROS_IDENTITY_NAME,
createMockPrincipalDescriptor(ambariServerPrincipalName, ambariServerPrincipalType, "ambari", null),
@@ -2613,7 +2616,7 @@ public class KerberosHelperTest extends EasyMockSupport {
KerberosComponentDescriptor ambariServerComponentKerberosDescriptor = createMockComponentDescriptor("AMBARI_SERVER", ambarServerComponent1Identities, null);
- HashMap<String, KerberosComponentDescriptor> ambariServerComponentDescriptorMap = new HashMap<String, KerberosComponentDescriptor>();
+ HashMap<String, KerberosComponentDescriptor> ambariServerComponentDescriptorMap = new HashMap<>();
ambariServerComponentDescriptorMap.put("AMBARI_SERVER", ambariServerComponentKerberosDescriptor);
KerberosServiceDescriptor ambariServiceKerberosDescriptor = createMockServiceDescriptor("AMBARI", ambariServerComponentDescriptorMap, null);
@@ -2637,7 +2640,7 @@ public class KerberosHelperTest extends EasyMockSupport {
setupKerberosDescriptor(kerberosDescriptor, 1);
- Map<String, Map<String, String>> existingConfigurations = new HashMap<String, Map<String, String>>();
+ Map<String, Map<String, String>> existingConfigurations = new HashMap<>();
existingConfigurations.put("kerberos-env", propertiesKerberosEnv);
Set<String> services = new HashSet<String>() {
@@ -2717,9 +2720,9 @@ public class KerberosHelperTest extends EasyMockSupport {
*/
@Test
public void testServiceWithoutComponents() throws Exception {
- Map<String, String> propertiesKrb5Conf = new HashMap<String, String>();
+ Map<String, String> propertiesKrb5Conf = new HashMap<>();
- Map<String, String> propertiesKerberosEnv = new HashMap<String, String>();
+ Map<String, String> propertiesKerberosEnv = new HashMap<>();
propertiesKerberosEnv.put("realm", "EXAMPLE.COM");
propertiesKerberosEnv.put("kdc_type", "mit-kdc");
propertiesKerberosEnv.put("create_ambari_principal", "false");
@@ -2733,29 +2736,29 @@ public class KerberosHelperTest extends EasyMockSupport {
// Create a Service (SERVICE1) with one Component (COMPONENT11)
Host host1 = createMockHost("host1");
- Map<String, ServiceComponentHost> service1Component1HostMap = new HashMap<String, ServiceComponentHost>();
+ Map<String, ServiceComponentHost> service1Component1HostMap = new HashMap<>();
service1Component1HostMap.put("host1", createMockServiceComponentHost(State.INSTALLED));
- Map<String, ServiceComponent> service1ComponentMap = new HashMap<String, ServiceComponent>();
+ Map<String, ServiceComponent> service1ComponentMap = new HashMap<>();
service1ComponentMap.put("COMPONENT11", createMockComponent("COMPONENT11", true, service1Component1HostMap));
Service service1 = createMockService("SERVICE1", service1ComponentMap);
- Map<String, Service> servicesMap = new HashMap<String, Service>();
+ Map<String, Service> servicesMap = new HashMap<>();
servicesMap.put("SERVICE1", service1);
Cluster cluster = createMockCluster("c1", Arrays.asList(host1), SecurityType.KERBEROS, configKrb5Conf, configKerberosEnv);
expect(cluster.getServices()).andReturn(servicesMap).times(1);
- Map<String, String> kerberosDescriptorProperties = new HashMap<String, String>();
+ Map<String, String> kerberosDescriptorProperties = new HashMap<>();
kerberosDescriptorProperties.put("additional_realms", "");
kerberosDescriptorProperties.put("keytab_dir", "/etc/security/keytabs");
kerberosDescriptorProperties.put("realm", "${kerberos-env/realm}");
// Notice that this map is empty, hence it has 0 Components in the kerberosDescriptor.
- HashMap<String, KerberosComponentDescriptor> service1ComponentDescriptorMap = new HashMap<String, KerberosComponentDescriptor>();
+ HashMap<String, KerberosComponentDescriptor> service1ComponentDescriptorMap = new HashMap<>();
- List<KerberosIdentityDescriptor> service1Identities = new ArrayList<KerberosIdentityDescriptor>();
+ List<KerberosIdentityDescriptor> service1Identities = new ArrayList<>();
KerberosServiceDescriptor service1KerberosDescriptor = createMockServiceDescriptor("SERVICE1", service1ComponentDescriptorMap, service1Identities);
KerberosDescriptor kerberosDescriptor = createMock(KerberosDescriptor.class);
@@ -2764,7 +2767,7 @@ public class KerberosHelperTest extends EasyMockSupport {
setupKerberosDescriptor(kerberosDescriptor, 1);
- Map<String, Map<String, String>> existingConfigurations = new HashMap<String, Map<String, String>>();
+ Map<String, Map<String, String>> existingConfigurations = new HashMap<>();
existingConfigurations.put("kerberos-env", propertiesKerberosEnv);
Set<String> services = new HashSet<String>() {
@@ -3086,7 +3089,7 @@ public class KerberosHelperTest extends EasyMockSupport {
// Needed by infrastructure
injector.getInstance(AmbariMetaInfo.class).init();
- Map<String, Collection<String>> serviceComponentFilter = new HashMap<String, Collection<String>>();
+ Map<String, Collection<String>> serviceComponentFilter = new HashMap<>();
Collection<String> identityFilter = Arrays.asList("identity1a", "identity3");
serviceComponentFilter.put("SERVICE3", Collections.singleton("COMPONENT3"));
@@ -3273,7 +3276,7 @@ public class KerberosHelperTest extends EasyMockSupport {
// Needed by infrastructure
injector.getInstance(AmbariMetaInfo.class).init();
- Map<String, Collection<String>> serviceComponentFilter = new HashMap<String, Collection<String>>();
+ Map<String, Collection<String>> serviceComponentFilter = new HashMap<>();
Collection<String> identityFilter = Arrays.asList("identity1a", "identity3");
serviceComponentFilter.put("SERVICE3", Collections.singleton("COMPONENT3"));
@@ -3292,7 +3295,7 @@ public class KerberosHelperTest extends EasyMockSupport {
KerberosHelper kerberosHelper = injector.getInstance(KerberosHelper.class);
boolean managingIdentities = !Boolean.FALSE.equals(manageIdentities);
- final Map<String, String> kerberosEnvProperties = new HashMap<String, String>();
+ final Map<String, String> kerberosEnvProperties = new HashMap<>();
kerberosEnvProperties.put("kdc_type", "mit-kdc");
kerberosEnvProperties.put("realm", "FOOBAR.COM");
kerberosEnvProperties.put("manage_identities", "FOOBAR.COM");
@@ -3304,12 +3307,12 @@ public class KerberosHelperTest extends EasyMockSupport {
final Config kerberosEnvConfig = createMock(Config.class);
expect(kerberosEnvConfig.getProperties()).andReturn(kerberosEnvProperties).anyTimes();
- final Map<String, String> krb5ConfProperties = new HashMap<String, String>();
+ final Map<String, String> krb5ConfProperties = new HashMap<>();
final Config krb5ConfConfig = createMock(Config.class);
expect(krb5ConfConfig.getProperties()).andReturn(krb5ConfProperties).anyTimes();
- final Map<String, Object> attributeMap = new HashMap<String, Object>();
+ final Map<String, Object> attributeMap = new HashMap<>();
final Cluster cluster = createNiceMock(Cluster.class);
expect(cluster.getDesiredConfigByType("krb5-conf")).andReturn(krb5ConfConfig).anyTimes();
@@ -3446,7 +3449,7 @@ public class KerberosHelperTest extends EasyMockSupport {
// Needed by infrastructure
injector.getInstance(AmbariMetaInfo.class).init();
- Map<String, String> commandParamsStage = new HashMap<String, String>();
+ Map<String, String> commandParamsStage = new HashMap<>();
CredentialStoreService credentialStoreService = injector.getInstance(CredentialStoreService.class);
credentialStoreService.setCredential(cluster.getClusterName(), KerberosHelper.KDC_ADMINISTRATOR_CREDENTIAL_ALIAS,
PrincipalKeyCredential, CredentialStoreType.TEMPORARY);
@@ -3602,7 +3605,7 @@ public class KerberosHelperTest extends EasyMockSupport {
// Needed by infrastructure
injector.getInstance(AmbariMetaInfo.class).init();
- Map<String, String> commandParamsStage = new HashMap<String, String>();
+ Map<String, String> commandParamsStage = new HashMap<>();
commandParamsStage.put("principal_name", "${cluster-env/smokeuser}@${realm}");
commandParamsStage.put("keytab_file", "${keytab_dir}/kerberos.service_check.keytab");
@@ -3907,7 +3910,7 @@ public class KerberosHelperTest extends EasyMockSupport {
createMockPrincipalDescriptor(ambariServerPrincipalName, ambariServerPrincipalType, "ambari", null),
createMockKeytabDescriptor(ambariServerKeytabFilePath, null));
- ArrayList<KerberosIdentityDescriptor> ambarServerComponent1Identities = new ArrayList<KerberosIdentityDescriptor>();
+ ArrayList<KerberosIdentityDescriptor> ambarServerComponent1Identities = new ArrayList<>();
ambarServerComponent1Identities.add(ambariKerberosIdentity);
ambarServerComponent1Identities.add(createMockIdentityDescriptor(
@@ -3915,7 +3918,7 @@ public class KerberosHelperTest extends EasyMockSupport {
createMockPrincipalDescriptor("HTTP/_HOST@${realm}", KerberosPrincipalType.SERVICE, null, null),
createMockKeytabDescriptor("spnego.service.keytab", null)));
- HashMap<String, KerberosComponentDescriptor> ambariServerComponentDescriptorMap = new HashMap<String, KerberosComponentDescriptor>();
+ HashMap<String, KerberosComponentDescriptor> ambariServerComponentDescriptorMap = new HashMap<>();
KerberosComponentDescriptor componentDescrptor = createMockComponentDescriptor("AMBARI_SERVER", ambarServerComponent1Identities, null);
ambariServerComponentDescriptorMap.put("AMBARI_SERVER", componentDescrptor);
http://git-wip-us.apache.org/repos/asf/ambari/blob/d540f943/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ActiveWidgetLayoutResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ActiveWidgetLayoutResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ActiveWidgetLayoutResourceProviderTest.java
index 5cce3fc..e370a8a 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ActiveWidgetLayoutResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ActiveWidgetLayoutResourceProviderTest.java
@@ -76,6 +76,7 @@ import org.apache.ambari.server.state.ConfigFactory;
import org.apache.ambari.server.state.ServiceComponentFactory;
import org.apache.ambari.server.state.ServiceComponentHostFactory;
import org.apache.ambari.server.state.ServiceFactory;
+import org.apache.ambari.server.state.UpgradeContextFactory;
import org.apache.ambari.server.state.configgroup.ConfigGroupFactory;
import org.apache.ambari.server.state.scheduler.RequestExecutionFactory;
import org.apache.ambari.server.state.stack.OsFamily;
@@ -233,9 +234,9 @@ public class ActiveWidgetLayoutResourceProviderTest extends EasyMockSupport {
ResourceProvider provider = getResourceProvider(injector, managementController);
// add the property map to a set for the request. add more maps for multiple creates
- Set<Map<String, Object>> propertySet = new LinkedHashSet<Map<String, Object>>();
+ Set<Map<String, Object>> propertySet = new LinkedHashSet<>();
- Map<String, Object> properties = new LinkedHashMap<String, Object>();
+ Map<String, Object> properties = new LinkedHashMap<>();
// add properties to the request map
properties.put(ActiveWidgetLayoutResourceProvider.WIDGETLAYOUT_USERNAME_PROPERTY_ID, requestedUsername);
@@ -276,18 +277,18 @@ public class ActiveWidgetLayoutResourceProviderTest extends EasyMockSupport {
AmbariManagementController managementController = injector.getInstance(AmbariManagementController.class);
- Set<Map<String, String>> widgetLayouts = new HashSet<Map<String, String>>();
+ Set<Map<String, String>> widgetLayouts = new HashSet<>();
HashMap<String, String> layout;
- layout = new HashMap<String, String>();
+ layout = new HashMap<>();
layout.put("id", "1");
widgetLayouts.add(layout);
- layout = new HashMap<String, String>();
+ layout = new HashMap<>();
layout.put("id", "2");
widgetLayouts.add(layout);
- HashMap<String, Object> requestProps = new HashMap<String, Object>();
+ HashMap<String, Object> requestProps = new HashMap<>();
requestProps.put(ActiveWidgetLayoutResourceProvider.WIDGETLAYOUT, widgetLayouts);
requestProps.put(ActiveWidgetLayoutResourceProvider.WIDGETLAYOUT_USERNAME_PROPERTY_ID, requestedUsername);
@@ -377,6 +378,9 @@ public class ActiveWidgetLayoutResourceProviderTest extends EasyMockSupport {
return Guice.createInjector(new AbstractModule() {
@Override
protected void configure() {
+ install(new FactoryModuleBuilder().build(UpgradeContextFactory.class));
+ install(new FactoryModuleBuilder().build(RoleGraphFactory.class));
+
bind(EntityManager.class).toInstance(createNiceMock(EntityManager.class));
bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
bind(ActionDBAccessor.class).toInstance(createNiceMock(ActionDBAccessor.class));
@@ -387,7 +391,6 @@ public class ActiveWidgetLayoutResourceProviderTest extends EasyMockSupport {
bind(org.apache.ambari.server.actionmanager.RequestFactory.class).toInstance(createNiceMock(org.apache.ambari.server.actionmanager.RequestFactory.class));
bind(RequestExecutionFactory.class).toInstance(createNiceMock(RequestExecutionFactory.class));
bind(StageFactory.class).toInstance(createNiceMock(StageFactory.class));
- install(new FactoryModuleBuilder().build(RoleGraphFactory.class));
bind(Clusters.class).toInstance(createNiceMock(Clusters.class));
bind(AbstractRootServiceResponseFactory.class).toInstance(createNiceMock(AbstractRootServiceResponseFactory.class));
bind(StackManagerFactory.class).toInstance(createNiceMock(StackManagerFactory.class));
http://git-wip-us.apache.org/repos/asf/ambari/blob/d540f943/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UserAuthorizationResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UserAuthorizationResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UserAuthorizationResourceProviderTest.java
index fd96c8e..4c52e11 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UserAuthorizationResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UserAuthorizationResourceProviderTest.java
@@ -73,6 +73,7 @@ import org.apache.ambari.server.state.ConfigFactory;
import org.apache.ambari.server.state.ServiceComponentFactory;
import org.apache.ambari.server.state.ServiceComponentHostFactory;
import org.apache.ambari.server.state.ServiceFactory;
+import org.apache.ambari.server.state.UpgradeContextFactory;
import org.apache.ambari.server.state.configgroup.ConfigGroupFactory;
import org.apache.ambari.server.state.scheduler.RequestExecutionFactory;
import org.apache.ambari.server.state.stack.OsFamily;
@@ -224,7 +225,7 @@ public class UserAuthorizationResourceProviderTest extends EasyMockSupport {
.andReturn(null)
.anyTimes();
- Set<Resource> userPrivilegeResources = new HashSet<Resource>();
+ Set<Resource> userPrivilegeResources = new HashSet<>();
userPrivilegeResources.add(clusterResource);
userPrivilegeResources.add(viewResource);
userPrivilegeResources.add(adminResource);
@@ -334,7 +335,7 @@ public class UserAuthorizationResourceProviderTest extends EasyMockSupport {
Assert.assertEquals(3, resources.size());
- LinkedList<String> expectedIds = new LinkedList<String>();
+ LinkedList<String> expectedIds = new LinkedList<>();
expectedIds.add("CLUSTER.DO_SOMETHING");
expectedIds.add("VIEW.DO_SOMETHING");
expectedIds.add("ADMIN.DO_SOMETHING");
@@ -388,6 +389,9 @@ public class UserAuthorizationResourceProviderTest extends EasyMockSupport {
return Guice.createInjector(new AbstractModule() {
@Override
protected void configure() {
+ install(new FactoryModuleBuilder().build(UpgradeContextFactory.class));
+ install(new FactoryModuleBuilder().build(RoleGraphFactory.class));
+
bind(EntityManager.class).toInstance(createNiceMock(EntityManager.class));
bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
bind(ActionDBAccessor.class).toInstance(createNiceMock(ActionDBAccessor.class));
@@ -398,7 +402,6 @@ public class UserAuthorizationResourceProviderTest extends EasyMockSupport {
bind(org.apache.ambari.server.actionmanager.RequestFactory.class).toInstance(createNiceMock(org.apache.ambari.server.actionmanager.RequestFactory.class));
bind(RequestExecutionFactory.class).toInstance(createNiceMock(RequestExecutionFactory.class));
bind(StageFactory.class).toInstance(createNiceMock(StageFactory.class));
- install(new FactoryModuleBuilder().build(RoleGraphFactory.class));
bind(Clusters.class).toInstance(createNiceMock(Clusters.class));
bind(AbstractRootServiceResponseFactory.class).toInstance(createNiceMock(AbstractRootServiceResponseFactory.class));
bind(StackManagerFactory.class).toInstance(createNiceMock(StackManagerFactory.class));
http://git-wip-us.apache.org/repos/asf/ambari/blob/d540f943/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UserResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UserResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UserResourceProviderTest.java
index cc0f2b6..d298b7f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UserResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UserResourceProviderTest.java
@@ -70,6 +70,7 @@ import org.apache.ambari.server.state.ConfigFactory;
import org.apache.ambari.server.state.ServiceComponentFactory;
import org.apache.ambari.server.state.ServiceComponentHostFactory;
import org.apache.ambari.server.state.ServiceFactory;
+import org.apache.ambari.server.state.UpgradeContextFactory;
import org.apache.ambari.server.state.configgroup.ConfigGroupFactory;
import org.apache.ambari.server.state.scheduler.RequestExecutionFactory;
import org.apache.ambari.server.state.stack.OsFamily;
@@ -226,6 +227,9 @@ public class UserResourceProviderTest extends EasyMockSupport {
return Guice.createInjector(new AbstractModule() {
@Override
protected void configure() {
+ install(new FactoryModuleBuilder().build(UpgradeContextFactory.class));
+ install(new FactoryModuleBuilder().build(RoleGraphFactory.class));
+
bind(EntityManager.class).toInstance(createNiceMock(EntityManager.class));
bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
bind(ActionDBAccessor.class).toInstance(createNiceMock(ActionDBAccessor.class));
@@ -236,7 +240,6 @@ public class UserResourceProviderTest extends EasyMockSupport {
bind(RequestFactory.class).toInstance(createNiceMock(RequestFactory.class));
bind(RequestExecutionFactory.class).toInstance(createNiceMock(RequestExecutionFactory.class));
bind(StageFactory.class).toInstance(createNiceMock(StageFactory.class));
- install(new FactoryModuleBuilder().build(RoleGraphFactory.class));
bind(Clusters.class).toInstance(createNiceMock(Clusters.class));
bind(AbstractRootServiceResponseFactory.class).toInstance(createNiceMock(AbstractRootServiceResponseFactory.class));
bind(StackManagerFactory.class).toInstance(createNiceMock(StackManagerFactory.class));
@@ -280,9 +283,9 @@ public class UserResourceProviderTest extends EasyMockSupport {
ResourceProvider provider = getResourceProvider(managementController);
// add the property map to a set for the request. add more maps for multiple creates
- Set<Map<String, Object>> propertySet = new LinkedHashSet<Map<String, Object>>();
+ Set<Map<String, Object>> propertySet = new LinkedHashSet<>();
- Map<String, Object> properties = new LinkedHashMap<String, Object>();
+ Map<String, Object> properties = new LinkedHashMap<>();
// add properties to the request map
properties.put(UserResourceProvider.USER_USERNAME_PROPERTY_ID, "User100");
@@ -327,7 +330,7 @@ public class UserResourceProviderTest extends EasyMockSupport {
ResourceProvider provider = getResourceProvider(managementController);
- Set<String> propertyIds = new HashSet<String>();
+ Set<String> propertyIds = new HashSet<>();
propertyIds.add(UserResourceProvider.USER_USERNAME_PROPERTY_ID);
propertyIds.add(UserResourceProvider.USER_PASSWORD_PROPERTY_ID);
@@ -369,7 +372,7 @@ public class UserResourceProviderTest extends EasyMockSupport {
ResourceProvider provider = getResourceProvider(managementController);
- Set<String> propertyIds = new HashSet<String>();
+ Set<String> propertyIds = new HashSet<>();
propertyIds.add(UserResourceProvider.USER_USERNAME_PROPERTY_ID);
propertyIds.add(UserResourceProvider.USER_PASSWORD_PROPERTY_ID);
@@ -409,7 +412,7 @@ public class UserResourceProviderTest extends EasyMockSupport {
ResourceProvider provider = getResourceProvider(managementController);
// add the property map to a set for the request.
- Map<String, Object> properties = new LinkedHashMap<String, Object>();
+ Map<String, Object> properties = new LinkedHashMap<>();
properties.put(UserResourceProvider.USER_ADMIN_PROPERTY_ID, "true");
// create the request
@@ -443,7 +446,7 @@ public class UserResourceProviderTest extends EasyMockSupport {
ResourceProvider provider = getResourceProvider(managementController);
// add the property map to a set for the request.
- Map<String, Object> properties = new LinkedHashMap<String, Object>();
+ Map<String, Object> properties = new LinkedHashMap<>();
properties.put(UserResourceProvider.USER_ACTIVE_PROPERTY_ID, "true");
Request request = PropertyHelper.getUpdateRequest(properties, null);
@@ -473,7 +476,7 @@ public class UserResourceProviderTest extends EasyMockSupport {
ResourceProvider provider = getResourceProvider(managementController);
// add the property map to a set for the request.
- Map<String, Object> properties = new LinkedHashMap<String, Object>();
+ Map<String, Object> properties = new LinkedHashMap<>();
properties.put(UserResourceProvider.USER_OLD_PASSWORD_PROPERTY_ID, "old_password");
properties.put(UserResourceProvider.USER_PASSWORD_PROPERTY_ID, "new_password");
http://git-wip-us.apache.org/repos/asf/ambari/blob/d540f943/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
index 526e462..1e65fc2 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
@@ -125,7 +125,7 @@ public class ConfigHelperTest {
put("fs.trash.interval", "30");
}});
cr.setPropertiesAttributes(new HashMap<String, Map<String, String>>() {{
- Map<String, String> attrs = new HashMap<String, String>();
+ Map<String, String> attrs = new HashMap<>();
attrs.put("ipc.client.connect.max.retries", "1");
attrs.put("fs.trash.interval", "2");
put("attribute1", attrs);
@@ -165,7 +165,7 @@ public class ConfigHelperTest {
put("namenode_heapsize", "1024");
}});
cr.setPropertiesAttributes(new HashMap<String, Map<String, String>>() {{
- Map<String, String> attrs = new HashMap<String, String>();
+ Map<String, String> attrs = new HashMap<>();
attrs.put("dfs_namenode_name_dir", "3");
attrs.put("namenode_heapsize", "4");
put("attribute2", attrs);
@@ -233,8 +233,8 @@ public class ConfigHelperTest {
Long addConfigGroup(String name, String tag, List<String> hosts,
List<Config> configs) throws AmbariException {
- Map<Long, Host> hostMap = new HashMap<Long, Host>();
- Map<String, Config> configMap = new HashMap<String, Config>();
+ Map<Long, Host> hostMap = new HashMap<>();
+ Map<String, Config> configMap = new HashMap<>();
Long hostId = 1L;
for (String hostname : hosts) {
@@ -277,7 +277,7 @@ public class ConfigHelperTest {
@Test
public void testProcessHiddenAttribute() throws Exception {
StackInfo stackInfo = metaInfo.getStack("HDP", "2.0.5");
- Map<String, Map<String, Map<String, String>>> configAttributes = new HashMap<String, Map<String, Map<String, String>>>();
+ Map<String, Map<String, Map<String, String>>> configAttributes = new HashMap<>();
configAttributes.put("hive-site", stackInfo.getDefaultConfigAttributesForConfigType("hive-site"));
Map<String, Map<String, String>> originalConfig_hiveClient = createHiveConfig();
@@ -339,7 +339,7 @@ public class ConfigHelperTest {
add(clusterRequest6);
}}, null);
- Map<String, String> properties = new HashMap<String, String>();
+ Map<String, String> properties = new HashMap<>();
properties.put("a", "b");
properties.put("c", "d");
@@ -378,7 +378,7 @@ public class ConfigHelperTest {
put("fs.trash.interval", "30");
}});
cr.setPropertiesAttributes(new HashMap<String, Map<String, String>>() {{
- Map<String, String> attrs = new HashMap<String, String>();
+ Map<String, String> attrs = new HashMap<>();
attrs.put("ipc.client.connect.max.retries", "1");
attrs.put("fs.trash.interval", "2");
put("attribute1", attrs);
@@ -401,7 +401,7 @@ public class ConfigHelperTest {
put("namenode_heapsize", "1024");
}});
cr.setPropertiesAttributes(new HashMap<String, Map<String, String>>() {{
- Map<String, String> attrs = new HashMap<String, String>();
+ Map<String, String> attrs = new HashMap<>();
attrs.put("dfs_namenode_name_dir", "3");
attrs.put("namenode_heapsize", "4");
put("attribute2", attrs);
@@ -416,12 +416,12 @@ public class ConfigHelperTest {
add(clusterRequest3);
}}, null);
- Map<String, String> properties = new HashMap<String, String>();
+ Map<String, String> properties = new HashMap<>();
properties.put("a", "b");
properties.put("c", "d");
final Config config1 = configFactory.createNew(cluster, "core-site2", "version122", properties, null);
- Map<String, String> properties2 = new HashMap<String, String>();
+ Map<String, String> properties2 = new HashMap<>();
properties2.put("namenode_heapsize", "1111");
final Config config2 = configFactory.createNew(cluster, "global2", "version122", properties2, null);
@@ -464,7 +464,7 @@ public class ConfigHelperTest {
put("fs.trash.interval", "30");
}});
crr.setPropertiesAttributes(new HashMap<String, Map<String, String>>() {{
- Map<String, String> attrs = new HashMap<String, String>();
+ Map<String, String> attrs = new HashMap<>();
attrs.put("ipc.client.connect.max.retries", "1");
attrs.put("fs.trash.interval", "2");
put("attribute1", attrs);
@@ -487,7 +487,7 @@ public class ConfigHelperTest {
put("namenode_heapsize", "1024");
}});
crr.setPropertiesAttributes(new HashMap<String, Map<String, String>>() {{
- Map<String, String> attrs = new HashMap<String, String>();
+ Map<String, String> attrs = new HashMap<>();
attrs.put("dfs_namenode_name_dir", "3");
attrs.put("namenode_heapsize", "4");
put("attribute2", attrs);
@@ -503,19 +503,19 @@ public class ConfigHelperTest {
}}, null);
- Map<String, String> attributes = new HashMap<String, String>();
+ Map<String, String> attributes = new HashMap<>();
attributes.put("fs.trash.interval", "11");
attributes.put("b", "y");
- Map<String, Map<String, String>> config1Attributes = new HashMap<String, Map<String, String>>();
+ Map<String, Map<String, String>> config1Attributes = new HashMap<>();
config1Attributes.put("attribute1", attributes);
final Config config1 = configFactory.createNew(cluster, "core-site3", "version122",
new HashMap<String, String>(), config1Attributes);
- attributes = new HashMap<String, String>();
+ attributes = new HashMap<>();
attributes.put("namenode_heapsize", "z");
attributes.put("c", "q");
- Map<String, Map<String, String>> config2Attributes = new HashMap<String, Map<String, String>>();
+ Map<String, Map<String, String>> config2Attributes = new HashMap<>();
config2Attributes.put("attribute2", attributes);
final Config config2 = configFactory.createNew(cluster, "global3", "version122",
@@ -567,19 +567,19 @@ public class ConfigHelperTest {
@Test
public void testCloneAttributesMap() throws Exception {
// init
- Map<String, Map<String, String>> targetAttributesMap = new HashMap<String, Map<String, String>>();
- Map<String, String> attributesValues = new HashMap<String, String>();
+ Map<String, Map<String, String>> targetAttributesMap = new HashMap<>();
+ Map<String, String> attributesValues = new HashMap<>();
attributesValues.put("a", "1");
attributesValues.put("b", "2");
attributesValues.put("f", "3");
attributesValues.put("q", "4");
targetAttributesMap.put("attr", attributesValues);
- Map<String, Map<String, String>> sourceAttributesMap = new HashMap<String, Map<String, String>>();
- attributesValues = new HashMap<String, String>();
+ Map<String, Map<String, String>> sourceAttributesMap = new HashMap<>();
+ attributesValues = new HashMap<>();
attributesValues.put("a", "5");
attributesValues.put("f", "6");
sourceAttributesMap.put("attr", attributesValues);
- attributesValues = new HashMap<String, String>();
+ attributesValues = new HashMap<>();
attributesValues.put("f", "7");
attributesValues.put("q", "8");
sourceAttributesMap.put("attr1", attributesValues);
@@ -606,8 +606,8 @@ public class ConfigHelperTest {
@Test
public void testCloneAttributesMapSourceIsNull() throws Exception {
// init
- Map<String, Map<String, String>> targetAttributesMap = new HashMap<String, Map<String, String>>();
- Map<String, String> attributesValues = new HashMap<String, String>();
+ Map<String, Map<String, String>> targetAttributesMap = new HashMap<>();
+ Map<String, String> attributesValues = new HashMap<>();
attributesValues.put("a", "1");
attributesValues.put("b", "2");
attributesValues.put("f", "3");
@@ -635,12 +635,12 @@ public class ConfigHelperTest {
public void testCloneAttributesMapTargetIsNull() throws Exception {
// init
Map<String, Map<String, String>> targetAttributesMap = null;
- Map<String, Map<String, String>> sourceAttributesMap = new HashMap<String, Map<String, String>>();
- Map<String, String> attributesValues = new HashMap<String, String>();
+ Map<String, Map<String, String>> sourceAttributesMap = new HashMap<>();
+ Map<String, String> attributesValues = new HashMap<>();
attributesValues.put("a", "5");
attributesValues.put("f", "6");
sourceAttributesMap.put("attr", attributesValues);
- attributesValues = new HashMap<String, String>();
+ attributesValues = new HashMap<>();
attributesValues.put("f", "7");
attributesValues.put("q", "8");
sourceAttributesMap.put("attr1", attributesValues);
@@ -666,17 +666,17 @@ public class ConfigHelperTest {
@Test
public void testMergeAttributes() throws Exception {
- Map<String, Map<String, String>> persistedAttributes = new HashMap<String, Map<String, String>>();
- Map<String, String> persistedFinalAttrs = new HashMap<String, String>();
+ Map<String, Map<String, String>> persistedAttributes = new HashMap<>();
+ Map<String, String> persistedFinalAttrs = new HashMap<>();
persistedFinalAttrs.put("a", "true");
persistedFinalAttrs.put("c", "true");
persistedFinalAttrs.put("d", "true");
persistedAttributes.put("final", persistedFinalAttrs);
- Map<String, Map<String, String>> confGroupAttributes = new HashMap<String, Map<String, String>>();
- Map<String, String> confGroupFinalAttrs = new HashMap<String, String>();
+ Map<String, Map<String, String>> confGroupAttributes = new HashMap<>();
+ Map<String, String> confGroupFinalAttrs = new HashMap<>();
confGroupFinalAttrs.put("b", "true");
confGroupAttributes.put("final", confGroupFinalAttrs);
- Map<String, String> confGroupProperties = new HashMap<String, String>();
+ Map<String, String> confGroupProperties = new HashMap<>();
confGroupProperties.put("a", "any");
confGroupProperties.put("b", "any");
confGroupProperties.put("c", "any");
@@ -698,14 +698,14 @@ public class ConfigHelperTest {
@Test
public void testMergeAttributesWithNoAttributeOverrides() throws Exception {
- Map<String, Map<String, String>> persistedAttributes = new HashMap<String, Map<String, String>>();
- Map<String, String> persistedFinalAttrs = new HashMap<String, String>();
+ Map<String, Map<String, String>> persistedAttributes = new HashMap<>();
+ Map<String, String> persistedFinalAttrs = new HashMap<>();
persistedFinalAttrs.put("a", "true");
persistedFinalAttrs.put("c", "true");
persistedFinalAttrs.put("d", "true");
persistedAttributes.put("final", persistedFinalAttrs);
- Map<String, Map<String, String>> confGroupAttributes = new HashMap<String, Map<String, String>>();
- Map<String, String> confGroupProperties = new HashMap<String, String>();
+ Map<String, Map<String, String>> confGroupAttributes = new HashMap<>();
+ Map<String, String> confGroupProperties = new HashMap<>();
confGroupProperties.put("a", "any");
confGroupProperties.put("b", "any");
confGroupProperties.put("c", "any");
@@ -726,13 +726,13 @@ public class ConfigHelperTest {
@Test
public void testMergeAttributesWithNullAttributes() throws Exception {
- Map<String, Map<String, String>> persistedAttributes = new HashMap<String, Map<String, String>>();
- Map<String, String> persistedFinalAttrs = new HashMap<String, String>();
+ Map<String, Map<String, String>> persistedAttributes = new HashMap<>();
+ Map<String, String> persistedFinalAttrs = new HashMap<>();
persistedFinalAttrs.put("a", "true");
persistedFinalAttrs.put("c", "true");
persistedFinalAttrs.put("d", "true");
persistedAttributes.put("final", persistedFinalAttrs);
- Map<String, String> confGroupProperties = new HashMap<String, String>();
+ Map<String, String> confGroupProperties = new HashMap<>();
confGroupProperties.put("a", "any");
confGroupProperties.put("b", "any");
confGroupProperties.put("c", "any");
@@ -755,14 +755,14 @@ public class ConfigHelperTest {
@Test
public void testMergeAttributesWithNullProperties() throws Exception {
- Map<String, Map<String, String>> persistedAttributes = new HashMap<String, Map<String, String>>();
- Map<String, String> persistedFinalAttrs = new HashMap<String, String>();
+ Map<String, Map<String, String>> persistedAttributes = new HashMap<>();
+ Map<String, String> persistedFinalAttrs = new HashMap<>();
persistedFinalAttrs.put("a", "true");
persistedFinalAttrs.put("c", "true");
persistedFinalAttrs.put("d", "true");
persistedAttributes.put("final", persistedFinalAttrs);
- Map<String, Map<String, String>> confGroupAttributes = new HashMap<String, Map<String, String>>();
- Map<String, String> confGroupFinalAttrs = new HashMap<String, String>();
+ Map<String, Map<String, String>> confGroupAttributes = new HashMap<>();
+ Map<String, String> confGroupFinalAttrs = new HashMap<>();
confGroupFinalAttrs.put("b", "true");
confGroupAttributes.put("final", confGroupFinalAttrs);
@@ -801,7 +801,7 @@ public class ConfigHelperTest {
Assert.assertTrue(propertiesAttributes.get("attribute1").containsKey("ipc.client.connect.max.retries"));
- Map<String, String> updates = new HashMap<String, String>();
+ Map<String, String> updates = new HashMap<>();
updates.put("new-property", "new-value");
updates.put("fs.trash.interval", "updated-value");
Collection<String> removals = Collections.singletonList("ipc.client.connect.max.retries");
@@ -839,7 +839,7 @@ public class ConfigHelperTest {
Assert.assertEquals("simple", properties.get("oozie.authentication.type"));
Assert.assertEquals("false", properties.get("oozie.service.HadoopAccessorService.kerberos.enabled"));
- Map<String, String> updates = new HashMap<String, String>();
+ Map<String, String> updates = new HashMap<>();
updates.put("oozie.authentication.type", "kerberos");
updates.put("oozie.service.HadoopAccessorService.kerberos.enabled", "true");
@@ -868,7 +868,7 @@ public class ConfigHelperTest {
Assert.assertEquals("embedded", properties.get("timeline.service.operating.mode"));
Assert.assertEquals("false", properties.get("timeline.service.fifo.enabled"));
- List<String> removals = new ArrayList<String>();
+ List<String> removals = new ArrayList<>();
removals.add("timeline.service.operating.mode");
configHelper.updateConfigType(cluster, managementController, "ams-site", null, removals, "admin", "Test note");
@@ -887,7 +887,7 @@ public class ConfigHelperTest {
@Test
public void testCalculateIsStaleConfigs() throws Exception {
- Map<String, HostConfig> schReturn = new HashMap<String, HostConfig>();
+ Map<String, HostConfig> schReturn = new HashMap<>();
HostConfig hc = new HostConfig();
// Put a different version to check for change
hc.setDefaultVersionTag("version2");
@@ -913,9 +913,9 @@ public class ConfigHelperTest {
Assert.assertFalse(configHelper.isStaleConfigs(sch, null));
// Cluster level same configs but group specific configs for host have been updated
- List<String> hosts = new ArrayList<String>();
+ List<String> hosts = new ArrayList<>();
hosts.add("h1");
- List<Config> configs = new ArrayList<Config>();
+ List<Config> configs = new ArrayList<>();
Config configImpl = configFactory.createNew(cluster, "flume-conf", "FLUME1",
new HashMap<String,String>(), null);
@@ -966,6 +966,8 @@ public class ConfigHelperTest {
final AmbariMetaInfo mockMetaInfo = createNiceMock(AmbariMetaInfo.class);
final ClusterController clusterController = createStrictMock(ClusterController.class);
+ bind(UpgradeContextFactory.class).toInstance(createNiceMock(UpgradeContextFactory.class));
+
bind(EntityManager.class).toInstance(createNiceMock(EntityManager.class));
bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
bind(ClusterFactory.class).toInstance(createNiceMock(ClusterFactory.class));
[3/3] ambari git commit: AMBARI-19617 - Restarting Some Components
During a Suspended Upgrade Fails Due To Missing Upgrade Parameters
(jonathanhurley)
Posted by jo...@apache.org.
AMBARI-19617 - Restarting Some Components During a Suspended Upgrade Fails Due To Missing Upgrade Parameters (jonathanhurley)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d540f943
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d540f943
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d540f943
Branch: refs/heads/trunk
Commit: d540f943da59d196b377a9e15fcd2e6dcb50483b
Parents: de8bf60
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Wed Jan 18 20:33:43 2017 -0500
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Thu Jan 19 16:11:09 2017 -0500
----------------------------------------------------------------------
.../controller/AmbariActionExecutionHelper.java | 13 +-
.../AmbariCustomCommandExecutionHelper.java | 60 +++---
.../AmbariManagementControllerImpl.java | 183 +++++++++---------
.../internal/UpgradeResourceProvider.java | 154 +++++----------
.../org/apache/ambari/server/state/Cluster.java | 14 ++
.../ambari/server/state/UpgradeContext.java | 191 ++++++++++++++++---
.../server/state/UpgradeContextFactory.java | 25 ++-
.../server/state/cluster/ClusterImpl.java | 106 ++++++----
.../ambari/server/agent/AgentResourceTest.java | 4 +
.../server/controller/KerberosHelperTest.java | 105 +++++-----
.../ActiveWidgetLayoutResourceProviderTest.java | 17 +-
.../UserAuthorizationResourceProviderTest.java | 9 +-
.../internal/UserResourceProviderTest.java | 19 +-
.../ambari/server/state/ConfigHelperTest.java | 98 +++++-----
.../ambari/server/state/UpgradeHelperTest.java | 167 +++++++---------
.../cluster/ClusterEffectiveVersionTest.java | 2 +
.../stack/upgrade/StageWrapperBuilderTest.java | 46 ++++-
17 files changed, 694 insertions(+), 519 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/d540f943/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
index ec0f7d0..d556b60 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
@@ -270,7 +270,7 @@ public class AmbariActionExecutionHelper {
}
// List of host to select from
- Set<String> candidateHosts = new HashSet<String>();
+ Set<String> candidateHosts = new HashSet<>();
final String serviceName = actionContext.getExpectedServiceName();
final String componentName = actionContext.getExpectedComponentName();
@@ -394,7 +394,7 @@ public class AmbariActionExecutionHelper {
clusterName, serviceName, actionContext.isRetryAllowed(),
actionContext.isFailureAutoSkipped());
- Map<String, String> commandParams = new TreeMap<String, String>();
+ Map<String, String> commandParams = new TreeMap<>();
int taskTimeout = Integer.parseInt(configs.getDefaultAgentTaskTimeout(false));
@@ -435,15 +435,13 @@ public class AmbariActionExecutionHelper {
// when building complex orchestration ahead of time (such as when
// performing ugprades), fetching configuration tags can take a very long
// time - if it's not needed, then don't do it
- Map<String, Map<String, String>> configTags = new TreeMap<String, Map<String, String>>();
+ Map<String, Map<String, String>> configTags = new TreeMap<>();
if (!execCmd.getForceRefreshConfigTagsBeforeExecution()) {
configTags = managementController.findConfigurationTagsWithOverrides(cluster, hostName);
}
execCmd.setConfigurationTags(configTags);
- execCmd.setCommandParams(commandParams);
-
execCmd.setServiceName(serviceName == null || serviceName.isEmpty() ?
resourceFilter.getServiceName() : serviceName);
@@ -463,7 +461,7 @@ public class AmbariActionExecutionHelper {
Map<String, String> roleParams = execCmd.getRoleParams();
if (roleParams == null) {
- roleParams = new TreeMap<String, String>();
+ roleParams = new TreeMap<>();
}
roleParams.putAll(actionParameters);
@@ -477,9 +475,10 @@ public class AmbariActionExecutionHelper {
// if there is a stack upgrade which is currently suspended then pass that
// information down with the command as some components may need to know
if (null != cluster && cluster.isUpgradeSuspended()) {
- roleParams.put(KeyNames.UPGRADE_SUSPENDED, Boolean.TRUE.toString().toLowerCase());
+ cluster.addSuspendedUpgradeParameters(commandParams, roleParams);
}
+ execCmd.setCommandParams(commandParams);
execCmd.setRoleParams(roleParams);
if (null != cluster) {
http://git-wip-us.apache.org/repos/asf/ambari/blob/d540f943/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
index bdad015..923a796 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
@@ -125,7 +125,7 @@ public class AmbariCustomCommandExecutionHelper {
AmbariCustomCommandExecutionHelper.class);
// TODO: Remove the hard-coded mapping when stack definition indicates which slave types can be decommissioned
- public static final Map<String, String> masterToSlaveMappingForDecom = new HashMap<String, String>();
+ public static final Map<String, String> masterToSlaveMappingForDecom = new HashMap<>();
static {
masterToSlaveMappingForDecom.put("NAMENODE", "DATANODE");
@@ -276,7 +276,7 @@ public class AmbariCustomCommandExecutionHelper {
final Cluster cluster = clusters.getCluster(clusterName);
// start with all hosts
- Set<String> candidateHosts = new HashSet<String>(resourceFilter.getHostNames());
+ Set<String> candidateHosts = new HashSet<>(resourceFilter.getHostNames());
// Filter hosts that are in MS
Set<String> ignoredHosts = maintenanceStateHelper.filterHostsInMaintenanceState(
@@ -325,7 +325,6 @@ public class AmbariCustomCommandExecutionHelper {
(stackId.getStackName(), stackId.getStackVersion());
ClusterVersionEntity effectiveClusterVersion = cluster.getEffectiveClusterVersion();
- boolean isUpgradeSuspended = cluster.isUpgradeSuspended();
CustomCommandDefinition customCommandDefinition = null;
ComponentInfo ci = serviceInfo.getComponentByName(componentName);
@@ -345,10 +344,10 @@ public class AmbariCustomCommandExecutionHelper {
cluster.getClusterName(), serviceName, retryAllowed, autoSkipFailure);
Map<String, Map<String, String>> configurations =
- new TreeMap<String, Map<String, String>>();
+ new TreeMap<>();
Map<String, Map<String, Map<String, String>>> configurationAttributes =
- new TreeMap<String, Map<String, Map<String, String>>>();
- Map<String, Map<String, String>> configTags = new TreeMap<String, Map<String, String>>();
+ new TreeMap<>();
+ Map<String, Map<String, String>> configTags = new TreeMap<>();
ExecutionCommand execCmd = stage.getExecutionCommandWrapper(hostName,
componentName).getExecutionCommand();
@@ -383,7 +382,7 @@ public class AmbariCustomCommandExecutionHelper {
execCmd.setAvailableServicesFromServiceInfoMap(ambariMetaInfo.getServices(stackId.getStackName(), stackId.getStackVersion()));
- Map<String, String> hostLevelParams = new TreeMap<String, String>();
+ Map<String, String> hostLevelParams = new TreeMap<>();
hostLevelParams.put(CUSTOM_COMMAND, commandName);
@@ -408,7 +407,7 @@ public class AmbariCustomCommandExecutionHelper {
execCmd.setHostLevelParams(hostLevelParams);
- Map<String, String> commandParams = new TreeMap<String, String>();
+ Map<String, String> commandParams = new TreeMap<>();
if (additionalCommandParams != null) {
for (String key : additionalCommandParams.keySet()) {
commandParams.put(key, additionalCommandParams.get(key));
@@ -449,20 +448,21 @@ public class AmbariCustomCommandExecutionHelper {
commandParams.put(KeyNames.VERSION, effectiveClusterVersion.getRepositoryVersion().getVersion());
}
- execCmd.setCommandParams(commandParams);
-
Map<String, String> roleParams = execCmd.getRoleParams();
if (roleParams == null) {
- roleParams = new TreeMap<String, String>();
+ roleParams = new TreeMap<>();
}
// if there is a stack upgrade which is currently suspended then pass that
// information down with the command as some components may need to know
+ boolean isUpgradeSuspended = cluster.isUpgradeSuspended();
if (isUpgradeSuspended) {
- roleParams.put(KeyNames.UPGRADE_SUSPENDED, Boolean.TRUE.toString().toLowerCase());
+ cluster.addSuspendedUpgradeParameters(commandParams, roleParams);
}
roleParams.put(COMPONENT_CATEGORY, componentInfo.getCategory());
+
+ execCmd.setCommandParams(commandParams);
execCmd.setRoleParams(roleParams);
// perform any server side command related logic - eg - set desired states on restart
@@ -523,7 +523,7 @@ public class AmbariCustomCommandExecutionHelper {
// Otherwise, use candidates that contain the component.
List<String> candidateHostsList = resourceFilter.getHostNames();
if (candidateHostsList != null && !candidateHostsList.isEmpty()) {
- candidateHosts = new HashSet<String>(candidateHostsList);
+ candidateHosts = new HashSet<>(candidateHostsList);
// Get the intersection.
candidateHosts.retainAll(serviceHostComponents.keySet());
@@ -570,7 +570,7 @@ public class AmbariCustomCommandExecutionHelper {
}
// Filter out hosts that are in maintenance mode - they should never be included in service checks
- Set<String> hostsInMaintenanceMode = new HashSet<String>();
+ Set<String> hostsInMaintenanceMode = new HashSet<>();
if (actionExecutionContext.isMaintenanceModeHostExcluded()) {
Iterator<String> iterator = candidateHosts.iterator();
while (iterator.hasNext()) {
@@ -675,10 +675,10 @@ public class AmbariCustomCommandExecutionHelper {
}
// [ type -> [ key, value ] ]
Map<String, Map<String, String>> configurations =
- new TreeMap<String, Map<String, String>>();
+ new TreeMap<>();
Map<String, Map<String, Map<String, String>>> configurationAttributes =
- new TreeMap<String, Map<String, Map<String, String>>>();
- Map<String, Map<String, String>> configTags = new TreeMap<String, Map<String, String>>();
+ new TreeMap<>();
+ Map<String, Map<String, String>> configTags = new TreeMap<>();
ExecutionCommand execCmd = stage.getExecutionCommandWrapper(hostname,
smokeTestRole).getExecutionCommand();
@@ -709,7 +709,7 @@ public class AmbariCustomCommandExecutionHelper {
execCmd.getLocalComponents().add(sch.getServiceComponentName());
}
- Map<String, String> commandParams = new TreeMap<String, String>();
+ Map<String, String> commandParams = new TreeMap<>();
//Propagate HCFS service type info
Map<String, ServiceInfo> serviceInfos = ambariMetaInfo.getServices(stackId.getStackName(), stackId.getStackVersion());
@@ -755,7 +755,7 @@ public class AmbariCustomCommandExecutionHelper {
}
private Set<String> getHostList(Map<String, String> cmdParameters, String key) {
- Set<String> hosts = new HashSet<String>();
+ Set<String> hosts = new HashSet<>();
if (cmdParameters.containsKey(key)) {
String allHosts = cmdParameters.get(key);
if (allHosts != null) {
@@ -792,7 +792,7 @@ public class AmbariCustomCommandExecutionHelper {
DECOM_INCLUDED_HOSTS);
- Set<String> cloneSet = new HashSet<String>(excludedHosts);
+ Set<String> cloneSet = new HashSet<>(excludedHosts);
cloneSet.retainAll(includedHosts);
if (cloneSet.size() > 0) {
throw new AmbariException("Same host cannot be specified for inclusion " +
@@ -877,7 +877,7 @@ public class AmbariCustomCommandExecutionHelper {
}
};
// Filter excluded hosts
- Set<String> filteredExcludedHosts = new HashSet<String>(excludedHosts);
+ Set<String> filteredExcludedHosts = new HashSet<>(excludedHosts);
Set<String> ignoredHosts = maintenanceStateHelper.filterHostsInMaintenanceState(
filteredExcludedHosts, hostPredicate);
if (! ignoredHosts.isEmpty()) {
@@ -889,7 +889,7 @@ public class AmbariCustomCommandExecutionHelper {
}
// Filter included hosts
- Set<String> filteredIncludedHosts = new HashSet<String>(includedHosts);
+ Set<String> filteredIncludedHosts = new HashSet<>(includedHosts);
ignoredHosts = maintenanceStateHelper.filterHostsInMaintenanceState(
filteredIncludedHosts, hostPredicate);
if (! ignoredHosts.isEmpty()) {
@@ -913,7 +913,7 @@ public class AmbariCustomCommandExecutionHelper {
String alignMtnStateStr = actionExecutionContext.getParameters().get(ALIGN_MAINTENANCE_STATE);
boolean alignMtnState = "true".equals(alignMtnStateStr);
// Set/reset decommissioned flag on all components
- List<String> listOfExcludedHosts = new ArrayList<String>();
+ List<String> listOfExcludedHosts = new ArrayList<>();
for (ServiceComponentHost sch : svcComponents.get(slaveCompType).getServiceComponentHosts().values()) {
if (filteredExcludedHosts.contains(sch.getHostName())) {
sch.setComponentAdminState(HostComponentAdminState.DECOMMISSIONED);
@@ -956,7 +956,7 @@ public class AmbariCustomCommandExecutionHelper {
for (String hostName : masterSchs.keySet()) {
RequestResourceFilter commandFilter = new RequestResourceFilter(serviceName,
masterComponent.getName(), Collections.singletonList(hostName));
- List<RequestResourceFilter> resourceFilters = new ArrayList<RequestResourceFilter>();
+ List<RequestResourceFilter> resourceFilters = new ArrayList<>();
resourceFilters.add(commandFilter);
ActionExecutionContext commandContext = new ActionExecutionContext(
@@ -969,7 +969,7 @@ public class AmbariCustomCommandExecutionHelper {
// Reset cluster host info as it has changed
stage.setClusterHostInfo(clusterHostInfoJson);
- Map<String, String> commandParams = new HashMap<String, String>();
+ Map<String, String> commandParams = new HashMap<>();
if (serviceName.equals(Service.Type.HBASE.name())) {
commandParams.put(DECOM_EXCLUDED_HOSTS, StringUtils.join(listOfExcludedHosts, ','));
if ((isDrainOnlyRequest != null) && isDrainOnlyRequest.equals("true")) {
@@ -1066,7 +1066,7 @@ public class AmbariCustomCommandExecutionHelper {
} else if (isValidCustomCommand(actionExecutionContext, resourceFilter)) {
String commandDetail = getReadableCustomCommandDetail(actionExecutionContext, resourceFilter);
- Map<String, String> extraParams = new HashMap<String, String>();
+ Map<String, String> extraParams = new HashMap<>();
String componentName = (null == resourceFilter.getComponentName()) ? null :
resourceFilter.getComponentName().toLowerCase();
@@ -1233,7 +1233,7 @@ public class AmbariCustomCommandExecutionHelper {
Cluster cluster, StackId stackId) throws AmbariException {
Map<String, String> commandParamsStage = StageUtils.getCommandParamsStage(actionExecContext);
- Map<String, String> hostParamsStage = new HashMap<String, String>();
+ Map<String, String> hostParamsStage = new HashMap<>();
Map<String, Set<String>> clusterHostInfo;
String clusterHostInfoJson = "{}";
@@ -1254,7 +1254,7 @@ public class AmbariCustomCommandExecutionHelper {
serviceName, componentName);
List<String> clientsToUpdateConfigsList = componentInfo.getClientsToUpdateConfigs();
if (clientsToUpdateConfigsList == null) {
- clientsToUpdateConfigsList = new ArrayList<String>();
+ clientsToUpdateConfigsList = new ArrayList<>();
clientsToUpdateConfigsList.add("*");
}
String clientsToUpdateConfigs = gson.toJson(clientsToUpdateConfigsList);
@@ -1288,7 +1288,7 @@ public class AmbariCustomCommandExecutionHelper {
}
Map<String, String> createDefaultHostParams(Cluster cluster, StackId stackId) throws AmbariException{
- TreeMap<String, String> hostLevelParams = new TreeMap<String, String>();
+ TreeMap<String, String> hostLevelParams = new TreeMap<>();
hostLevelParams.put(JDK_LOCATION, managementController.getJdkResourceUrl());
hostLevelParams.put(JAVA_HOME, managementController.getJavaHome());
hostLevelParams.put(JAVA_VERSION, String.valueOf(configs.getJavaVersion()));
@@ -1427,7 +1427,7 @@ public class AmbariCustomCommandExecutionHelper {
private Set<String> getUnhealthyHosts(Set<String> hosts,
ActionExecutionContext actionExecutionContext,
RequestResourceFilter resourceFilter) throws AmbariException {
- Set<String> removedHosts = new HashSet<String>();
+ Set<String> removedHosts = new HashSet<>();
for (String hostname : hosts) {
if (filterUnhealthHostItem(hostname, actionExecutionContext, resourceFilter)){
removedHosts.add(hostname);
http://git-wip-us.apache.org/repos/asf/ambari/blob/d540f943/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 9afe598..a6a56ed 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -176,7 +176,6 @@ import org.apache.ambari.server.state.ServiceComponentFactory;
import org.apache.ambari.server.state.ServiceComponentHost;
import org.apache.ambari.server.state.ServiceComponentHostEvent;
import org.apache.ambari.server.state.ServiceComponentHostFactory;
-import org.apache.ambari.server.state.ServiceFactory;
import org.apache.ambari.server.state.ServiceInfo;
import org.apache.ambari.server.state.ServiceOsSpecific;
import org.apache.ambari.server.state.StackId;
@@ -253,8 +252,6 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
private RoleCommandOrderProvider roleCommandOrderProvider;
@Inject
- private ServiceFactory serviceFactory;
- @Inject
private ServiceComponentFactory serviceComponentFactory;
@Inject
private ServiceComponentHostFactory serviceComponentHostFactory;
@@ -301,10 +298,6 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
@Inject
private ClusterVersionDAO clusterVersionDAO;
@Inject
- private AmbariEventPublisher ambariEventPublisher;
- @Inject
- private MetricsCollectorHAManager metricsCollectorHAManager;
- @Inject
private SettingDAO settingDAO;
private MaintenanceStateHelper maintenanceStateHelper;
@@ -519,8 +512,8 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
// do all validation checks
Map<String, Map<String, Map<String, Set<String>>>> hostComponentNames =
- new HashMap<String, Map<String, Map<String, Set<String>>>>();
- Set<String> duplicates = new HashSet<String>();
+ new HashMap<>();
+ Set<String> duplicates = new HashSet<>();
for (ServiceComponentHostRequest request : requests) {
validateServiceComponentHostRequest(request);
@@ -907,10 +900,10 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
Map<String, Config> configs = cluster.getConfigsByType(
request.getType());
if (null == configs) {
- configs = new HashMap<String, Config>();
+ configs = new HashMap<>();
}
- Map<String, Map<String, String>> propertiesAttributes = new HashMap<String, Map<String,String>>();
+ Map<String, Map<String, String>> propertiesAttributes = new HashMap<>();
StackId currentStackId = cluster.getCurrentStackVersion();
StackInfo currentStackInfo = ambariMetaInfo.getStack(currentStackId.getStackName(), currentStackId.getStackVersion());
@@ -993,7 +986,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
@Override
public Set<MemberResponse> getMembers(Set<MemberRequest> requests)
throws AmbariException {
- final Set<MemberResponse> responses = new HashSet<MemberResponse>();
+ final Set<MemberResponse> responses = new HashSet<>();
for (MemberRequest request: requests) {
LOG.debug("Received a getMembers request, " + request.toString());
final Group group = users.getGroup(request.getGroupName());
@@ -1025,7 +1018,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
}
groupName = request.getGroupName();
}
- final List<String> requiredMembers = new ArrayList<String>();
+ final List<String> requiredMembers = new ArrayList<>();
for (MemberRequest request: requests) {
if (request.getUserName() != null) {
requiredMembers.add(request.getUserName());
@@ -1057,7 +1050,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
private Set<ClusterResponse> getClusters(ClusterRequest request)
throws AmbariException, AuthorizationException {
- Set<ClusterResponse> response = new HashSet<ClusterResponse>();
+ Set<ClusterResponse> response = new HashSet<>();
if (LOG.isDebugEnabled()) {
LOG.debug("Received a getClusters request"
@@ -1193,7 +1186,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
}
}
- Set<Service> services = new HashSet<Service>();
+ Set<Service> services = new HashSet<>();
if (request.getServiceName() != null && !request.getServiceName().isEmpty()) {
services.add(cluster.getService(request.getServiceName()));
} else {
@@ -1201,7 +1194,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
}
Set<ServiceComponentHostResponse> response =
- new HashSet<ServiceComponentHostResponse>();
+ new HashSet<>();
boolean checkDesiredState = false;
State desiredStateToCheck = null;
@@ -1237,7 +1230,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
for (Service s : services) {
// filter on component name if provided
- Set<ServiceComponent> components = new HashSet<ServiceComponent>();
+ Set<ServiceComponent> components = new HashSet<>();
if (request.getComponentName() != null) {
components.add(s.getServiceComponent(request.getComponentName()));
} else {
@@ -1402,7 +1395,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
Cluster cluster = clusters.getCluster(request.getClusterName());
- Set<ConfigurationResponse> responses = new HashSet<ConfigurationResponse>();
+ Set<ConfigurationResponse> responses = new HashSet<>();
// !!! if only one, then we need full properties
if (null != request.getType() && null != request.getVersionTag()) {
@@ -1491,7 +1484,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
// kerberos_admin/principal
// kerberos_admin/password
if((sessionAttributes != null) && !sessionAttributes.isEmpty()) {
- Map<String, Object> cleanedSessionAttributes = new HashMap<String, Object>();
+ Map<String, Object> cleanedSessionAttributes = new HashMap<>();
String principal = null;
char[] password = null;
@@ -1562,7 +1555,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
//save data to return configurations created
List<ConfigurationResponse> configurationResponses =
- new LinkedList<ConfigurationResponse>();
+ new LinkedList<>();
ServiceConfigVersionResponse serviceConfigVersionResponse = null;
if (request.getDesiredConfig() != null && request.getServiceConfigVersionRequest() != null) {
@@ -1667,7 +1660,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
List<ConfigurationRequest> desiredConfigs = request.getDesiredConfig();
if (!desiredConfigs.isEmpty()) {
- Set<Config> configs = new HashSet<Config>();
+ Set<Config> configs = new HashSet<>();
String note = null;
for (ConfigurationRequest cr : desiredConfigs) {
@@ -1796,7 +1789,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
new ClusterResponse(cluster.getClusterId(), cluster.getClusterName(), null, null, null, null, null, null);
Map<String, Collection<ServiceConfigVersionResponse>> map =
- new HashMap<String, Collection<ServiceConfigVersionResponse>>();
+ new HashMap<>();
map.put(serviceConfigVersionResponse.getServiceName(), Collections.singletonList(serviceConfigVersionResponse));
clusterResponse.setDesiredServiceConfigVersions(map);
@@ -1888,7 +1881,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
* @return a map lf property names to String arrays indicating the requsted changes ({current value, requested valiue})
*/
private Map<String, String[]> getPropertyChanges(Cluster cluster, ConfigurationRequest request) {
- Map<String, String[]> changedProperties = new HashMap<String, String[]>();
+ Map<String, String[]> changedProperties = new HashMap<>();
// Ensure that the requested property map is not null.
Map<String, String> requestedProperties = request.getProperties();
@@ -1905,7 +1898,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
}
// Ensure all propery names are captured, including missing ones from either set.
- Set<String> propertyNames = new HashSet<String>();
+ Set<String> propertyNames = new HashSet<>();
propertyNames.addAll(requestedProperties.keySet());
propertyNames.addAll(existingProperties.keySet());
@@ -2003,7 +1996,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
// are not in a Maintenance state.
Resource.Type opLvl = Resource.Type.Cluster;
- Set<String> smokeTestServices = new HashSet<String>();
+ Set<String> smokeTestServices = new HashSet<>();
// Adding smoke checks for changed services
if (changedServices != null) {
@@ -2022,7 +2015,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
// Adding smoke checks for changed host components
Map<String, Map<String, Integer>> changedComponentCount =
- new HashMap<String, Map<String, Integer>>();
+ new HashMap<>();
for (Map<State, List<ServiceComponentHost>> stateScHostMap :
changedScHosts.values()) {
for (Entry<State, List<ServiceComponentHost>> entry :
@@ -2092,7 +2085,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
Map<String, Map<State, List<ServiceComponentHost>>> changedScHosts)
throws AmbariException {
- Set<String> services = new HashSet<String>();
+ Set<String> services = new HashSet<>();
// This is done to account for services with client only components.
if (changedServices != null) {
@@ -2109,7 +2102,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
}
// Flatten changed Schs that are going to be Started
- List<ServiceComponentHost> serviceComponentHosts = new ArrayList<ServiceComponentHost>();
+ List<ServiceComponentHost> serviceComponentHosts = new ArrayList<>();
if (changedScHosts != null && !changedScHosts.isEmpty()) {
for (Entry<String, Map<State, List<ServiceComponentHost>>> stringMapEntry : changedScHosts.entrySet()) {
for (State state : stringMapEntry.getValue().keySet()) {
@@ -2130,12 +2123,12 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
return;
}
- Map<String, List<ServiceComponentHost>> clientSchs = new HashMap<String, List<ServiceComponentHost>>();
+ Map<String, List<ServiceComponentHost>> clientSchs = new HashMap<>();
for (String serviceName : services) {
Service s = cluster.getService(serviceName);
for (String component : s.getServiceComponents().keySet()) {
- List<ServiceComponentHost> potentialHosts = new ArrayList<ServiceComponentHost>();
+ List<ServiceComponentHost> potentialHosts = new ArrayList<>();
ServiceComponent sc = s.getServiceComponents().get(component);
if (sc.isClientComponent()) {
for (ServiceComponentHost potentialSch : sc.getServiceComponentHosts().values()) {
@@ -2157,7 +2150,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
if (changedScHosts != null) {
for (Entry<String, List<ServiceComponentHost>> stringListEntry : clientSchs.entrySet()) {
- Map<State, List<ServiceComponentHost>> schMap = new EnumMap<State, List<ServiceComponentHost>>(State.class);
+ Map<State, List<ServiceComponentHost>> schMap = new EnumMap<>(State.class);
schMap.put(State.INSTALLED, stringListEntry.getValue());
changedScHosts.put(stringListEntry.getKey(), schMap);
}
@@ -2238,7 +2231,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
execCmd.setConfigurationCredentials(configCredentials);
// Create a local copy for each command
- Map<String, String> commandParams = new TreeMap<String, String>();
+ Map<String, String> commandParams = new TreeMap<>();
if (commandParamsInp != null) { // if not defined
commandParams.putAll(commandParamsInp);
}
@@ -2350,8 +2343,6 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
commandParams.put(ExecutionCommand.KeyNames.REFRESH_TOPOLOGY, "True");
}
- execCmd.setCommandParams(commandParams);
-
String repoInfo = customCommandExecutionHelper.getRepoInfo(cluster, host);
if (LOG.isDebugEnabled()) {
LOG.debug("Sending repo information to agent"
@@ -2361,7 +2352,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
+ ", repoInfo=" + repoInfo);
}
- Map<String, String> hostParams = new TreeMap<String, String>();
+ Map<String, String> hostParams = new TreeMap<>();
hostParams.put(REPO_INFO, repoInfo);
hostParams.putAll(getRcaParameters());
@@ -2422,7 +2413,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
List<String> clientsToUpdateConfigsList = componentInfo.getClientsToUpdateConfigs();
if (clientsToUpdateConfigsList == null) {
- clientsToUpdateConfigsList = new ArrayList<String>();
+ clientsToUpdateConfigsList = new ArrayList<>();
clientsToUpdateConfigsList.add("*");
}
@@ -2430,16 +2421,18 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
hostParams.put(CLIENTS_TO_UPDATE_CONFIGS, clientsToUpdateConfigs);
execCmd.setHostLevelParams(hostParams);
- Map<String, String> roleParams = new TreeMap<String, String>();
+ Map<String, String> roleParams = new TreeMap<>();
// !!! consistent with where custom commands put variables
// !!! after-INSTALL hook checks this such that the stack selection tool won't
// select-all to a version that is not being upgraded, breaking RU
if (cluster.isUpgradeSuspended()) {
- roleParams.put(KeyNames.UPGRADE_SUSPENDED, Boolean.TRUE.toString().toLowerCase());
+ cluster.addSuspendedUpgradeParameters(commandParams, roleParams);
}
+
execCmd.setRoleParams(roleParams);
+ execCmd.setCommandParams(commandParams);
execCmd.setAvailableServicesFromServiceInfoMap(ambariMetaInfo.getServices(stackId.getStackName(), stackId.getStackVersion()));
@@ -2489,7 +2482,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
// Build package list that is relevant for host
List<ServiceOsSpecific.Package> packages =
- new ArrayList<ServiceOsSpecific.Package>();
+ new ArrayList<>();
if (anyOs != null) {
packages.addAll(anyOs.getPackages());
}
@@ -2502,7 +2495,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
}
private List<ServiceOsSpecific> getOSSpecificsByFamily(Map<String, ServiceOsSpecific> osSpecifics, String osFamily) {
- List<ServiceOsSpecific> foundedOSSpecifics = new ArrayList<ServiceOsSpecific>();
+ List<ServiceOsSpecific> foundedOSSpecifics = new ArrayList<>();
for (Entry<String, ServiceOsSpecific> osSpecific : osSpecifics.entrySet()) {
if (osSpecific.getKey().contains(osFamily)) {
foundedOSSpecifics.add(osSpecific.getValue());
@@ -2675,14 +2668,14 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
// so kerberosHelper.configureServices know which to work on. Null indicates no filter
// and all services and components will be (re)configured, however null will not be
// passed in from here.
- Map<String, Collection<String>> serviceFilter = new HashMap<String, Collection<String>>();
+ Map<String, Collection<String>> serviceFilter = new HashMap<>();
for (ServiceComponentHost scHost : componentsToConfigureForKerberos) {
String serviceName = scHost.getServiceName();
Collection<String> componentFilter = serviceFilter.get(serviceName);
if (componentFilter == null) {
- componentFilter = new HashSet<String>();
+ componentFilter = new HashSet<>();
serviceFilter.put(serviceName, componentFilter);
}
@@ -2872,22 +2865,22 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
}
if (null == requestParameters) {
- requestParameters = new HashMap<String, String>();
+ requestParameters = new HashMap<>();
}
requestParameters.put(keyName, requestProperties.get(keyName));
}
if (requestProperties.containsKey(CLUSTER_PHASE_PROPERTY)) {
if (null == requestParameters) {
- requestParameters = new HashMap<String, String>();
+ requestParameters = new HashMap<>();
}
requestParameters.put(CLUSTER_PHASE_PROPERTY, requestProperties.get(CLUSTER_PHASE_PROPERTY));
}
- Map<String, Map<String, String>> configurations = new TreeMap<String, Map<String, String>>();
+ Map<String, Map<String, String>> configurations = new TreeMap<>();
Map<String, Map<String, Map<String, String>>>
configurationAttributes =
- new TreeMap<String, Map<String, Map<String, String>>>();
+ new TreeMap<>();
Host host = clusters.getHost(scHost.getHostName());
Map<String, Map<String, String>> configTags =
@@ -2942,15 +2935,15 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
requestStages.addStages(rg.getStages());
if (!componentsToEnableKerberos.isEmpty()) {
- Map<String, Collection<String>> serviceFilter = new HashMap<String, Collection<String>>();
- Set<String> hostFilter = new HashSet<String>();
+ Map<String, Collection<String>> serviceFilter = new HashMap<>();
+ Set<String> hostFilter = new HashSet<>();
for (ServiceComponentHost scHost : componentsToEnableKerberos) {
String serviceName = scHost.getServiceName();
Collection<String> componentFilter = serviceFilter.get(serviceName);
if (componentFilter == null) {
- componentFilter = new HashSet<String>();
+ componentFilter = new HashSet<>();
serviceFilter.put(serviceName, componentFilter);
}
@@ -3034,7 +3027,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
Map<String, Map<String, Map<String, String>>>
configurationAttributes =
- new TreeMap<String, Map<String, Map<String, String>>>();
+ new TreeMap<>();
createHostAction(cluster, stage, scHost, configurations, configurationAttributes, configTags,
roleCommand, null, null, false);
@@ -3080,7 +3073,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
public Set<StackConfigurationDependencyResponse> getStackConfigurationDependencies(
Set<StackConfigurationDependencyRequest> requests) throws AmbariException {
Set<StackConfigurationDependencyResponse> response
- = new HashSet<StackConfigurationDependencyResponse>();
+ = new HashSet<>();
if (requests != null) {
for (StackConfigurationDependencyRequest request : requests) {
@@ -3106,7 +3099,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
private Set<StackConfigurationDependencyResponse> getStackConfigurationDependencies(StackConfigurationDependencyRequest request) throws AmbariException {
Set<StackConfigurationDependencyResponse> response =
- new HashSet<StackConfigurationDependencyResponse>();
+ new HashSet<>();
String stackName = request.getStackName();
String stackVersion = request.getStackVersion();
@@ -3479,11 +3472,11 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
}
//Clear exclud file or draining list except HBASE
if (!serviceName.equals(Service.Type.HBASE.toString())) {
- HashMap<String, String> requestProperties = new HashMap<String, String>();
+ HashMap<String, String> requestProperties = new HashMap<>();
requestProperties.put("context", "Remove host " +
included_hostname + " from exclude file");
requestProperties.put("exclusive", "true");
- HashMap<String, String> params = new HashMap<String, String>();
+ HashMap<String, String> params = new HashMap<>();
params.put("included_hosts", included_hostname);
params.put("slave_type", slave_component_name);
params.put(AmbariCustomCommandExecutionHelper.UPDATE_EXCLUDE_FILE_ONLY, "true");
@@ -3558,7 +3551,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
actionManager.getRequestTasks(requestId);
response.setRequestContext(actionManager.getRequestContext(requestId));
- List<ShortTaskStatus> tasks = new ArrayList<ShortTaskStatus>();
+ List<ShortTaskStatus> tasks = new ArrayList<>();
for (HostRoleCommand hostRoleCommand : hostRoleCommands) {
tasks.add(new ShortTaskStatus(hostRoleCommand));
@@ -3570,7 +3563,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
@Override
public Set<ClusterResponse> getClusters(Set<ClusterRequest> requests) throws AmbariException, AuthorizationException {
- Set<ClusterResponse> response = new HashSet<ClusterResponse>();
+ Set<ClusterResponse> response = new HashSet<>();
for (ClusterRequest request : requests) {
try {
response.addAll(getClusters(request));
@@ -3590,7 +3583,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
Set<ServiceComponentHostRequest> requests) throws AmbariException {
LOG.debug("Processing requests: {}", requests);
Set<ServiceComponentHostResponse> response =
- new HashSet<ServiceComponentHostResponse>();
+ new HashSet<>();
for (ServiceComponentHostRequest request : requests) {
try {
response.addAll(getHostComponents(request));
@@ -3651,7 +3644,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
public Set<ConfigurationResponse> getConfigurations(
Set<ConfigurationRequest> requests) throws AmbariException {
Set<ConfigurationResponse> response =
- new HashSet<ConfigurationResponse>();
+ new HashSet<>();
for (ConfigurationRequest request : requests) {
response.addAll(getConfigurations(request));
}
@@ -3661,7 +3654,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
@Override
public Set<ServiceConfigVersionResponse> getServiceConfigVersions(Set<ServiceConfigVersionRequest> requests)
throws AmbariException {
- Set<ServiceConfigVersionResponse> responses = new LinkedHashSet<ServiceConfigVersionResponse>();
+ Set<ServiceConfigVersionResponse> responses = new LinkedHashSet<>();
for (ServiceConfigVersionRequest request : requests) {
responses.addAll(getServiceConfigVersions(request));
@@ -3679,9 +3672,9 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
Cluster cluster = clusters.getCluster(request.getClusterName());
- Set<ServiceConfigVersionResponse> result = new LinkedHashSet<ServiceConfigVersionResponse>();
+ Set<ServiceConfigVersionResponse> result = new LinkedHashSet<>();
String serviceName = request.getServiceName();
- List<ServiceConfigVersionResponse> serviceConfigVersionResponses = new ArrayList<ServiceConfigVersionResponse>();
+ List<ServiceConfigVersionResponse> serviceConfigVersionResponses = new ArrayList<>();
if (Boolean.TRUE.equals(request.getIsCurrent()) && serviceName != null) {
serviceConfigVersionResponses.addAll(cluster.getActiveServiceConfigVersionResponse(serviceName));
@@ -3709,7 +3702,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
public Set<UserResponse> getUsers(Set<UserRequest> requests)
throws AmbariException, AuthorizationException {
- Set<UserResponse> responses = new HashSet<UserResponse>();
+ Set<UserResponse> responses = new HashSet<>();
for (UserRequest r : requests) {
@@ -3739,7 +3732,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
for (User u : users.getAllUsers()) {
UserResponse resp = new UserResponse(u.getUserName(), u.getUserType(), u.isLdapUser(), u.isActive(), u
.isAdmin());
- resp.setGroups(new HashSet<String>(u.getGroups()));
+ resp.setGroups(new HashSet<>(u.getGroups()));
responses.add(resp);
}
} else {
@@ -3755,7 +3748,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
} else {
UserResponse resp = new UserResponse(u.getUserName(), u.getUserType(), u.isLdapUser(), u.isActive(), u
.isAdmin());
- resp.setGroups(new HashSet<String>(u.getGroups()));
+ resp.setGroups(new HashSet<>(u.getGroups()));
responses.add(resp);
}
}
@@ -3767,7 +3760,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
@Override
public Set<GroupResponse> getGroups(Set<GroupRequest> requests)
throws AmbariException {
- final Set<GroupResponse> responses = new HashSet<GroupResponse>();
+ final Set<GroupResponse> responses = new HashSet<>();
for (GroupRequest request: requests) {
LOG.debug("Received a getGroups request, groupRequest=" + request.toString());
// get them all
@@ -4039,7 +4032,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
@Override
public Set<StackResponse> getStacks(Set<StackRequest> requests)
throws AmbariException {
- Set<StackResponse> response = new HashSet<StackResponse>();
+ Set<StackResponse> response = new HashSet<>();
for (StackRequest request : requests) {
try {
response.addAll(getStacks(request));
@@ -4068,7 +4061,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
response = Collections.singleton(new StackResponse(stackName));
} else {
Collection<StackInfo> supportedStacks = ambariMetaInfo.getStacks();
- response = new HashSet<StackResponse>();
+ response = new HashSet<>();
for (StackInfo stack: supportedStacks) {
response.add(new StackResponse(stack.getName()));
}
@@ -4094,7 +4087,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
@Override
public Set<ExtensionResponse> getExtensions(Set<ExtensionRequest> requests)
throws AmbariException {
- Set<ExtensionResponse> response = new HashSet<ExtensionResponse>();
+ Set<ExtensionResponse> response = new HashSet<>();
for (ExtensionRequest request : requests) {
try {
response.addAll(getExtensions(request));
@@ -4123,7 +4116,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
response = Collections.singleton(new ExtensionResponse(extensionName));
} else {
Collection<ExtensionInfo> supportedExtensions = ambariMetaInfo.getExtensions();
- response = new HashSet<ExtensionResponse>();
+ response = new HashSet<>();
for (ExtensionInfo extension: supportedExtensions) {
response.add(new ExtensionResponse(extension.getName()));
}
@@ -4134,7 +4127,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
@Override
public Set<ExtensionVersionResponse> getExtensionVersions(
Set<ExtensionVersionRequest> requests) throws AmbariException {
- Set<ExtensionVersionResponse> response = new HashSet<ExtensionVersionResponse>();
+ Set<ExtensionVersionResponse> response = new HashSet<>();
for (ExtensionVersionRequest request : requests) {
String extensionName = request.getExtensionName();
try {
@@ -4167,7 +4160,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
} else {
try {
Collection<ExtensionInfo> extensionInfos = ambariMetaInfo.getExtensions(extensionName);
- response = new HashSet<ExtensionVersionResponse>();
+ response = new HashSet<>();
for (ExtensionInfo extensionInfo: extensionInfos) {
response.add(extensionInfo.convertToResponse());
}
@@ -4182,7 +4175,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
@Override
public Set<RepositoryResponse> getRepositories(Set<RepositoryRequest> requests)
throws AmbariException {
- Set<RepositoryResponse> response = new HashSet<RepositoryResponse>();
+ Set<RepositoryResponse> response = new HashSet<>();
for (RepositoryRequest request : requests) {
try {
String stackName = request.getStackName();
@@ -4230,7 +4223,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
}
}
- Set<RepositoryResponse> responses = new HashSet<RepositoryResponse>();
+ Set<RepositoryResponse> responses = new HashSet<>();
if (repositoryVersionId != null) {
final RepositoryVersionEntity repositoryVersion = repositoryVersionDAO.findByPK(repositoryVersionId);
@@ -4411,7 +4404,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
@Override
public Set<StackVersionResponse> getStackVersions(
Set<StackVersionRequest> requests) throws AmbariException {
- Set<StackVersionResponse> response = new HashSet<StackVersionResponse>();
+ Set<StackVersionResponse> response = new HashSet<>();
for (StackVersionRequest request : requests) {
String stackName = request.getStackName();
try {
@@ -4445,7 +4438,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
} else {
try {
Collection<StackInfo> stackInfos = ambariMetaInfo.getStacks(stackName);
- response = new HashSet<StackVersionResponse>();
+ response = new HashSet<>();
for (StackInfo stackInfo: stackInfos) {
response.add(stackInfo.convertToResponse());
}
@@ -4461,7 +4454,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
public Set<StackServiceResponse> getStackServices(
Set<StackServiceRequest> requests) throws AmbariException {
- Set<StackServiceResponse> response = new HashSet<StackServiceResponse>();
+ Set<StackServiceResponse> response = new HashSet<>();
for (StackServiceRequest request : requests) {
String stackName = request.getStackName();
@@ -4500,7 +4493,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
response = Collections.singleton(new StackServiceResponse(service));
} else {
Map<String, ServiceInfo> services = ambariMetaInfo.getServices(stackName, stackVersion);
- response = new HashSet<StackServiceResponse>();
+ response = new HashSet<>();
for (ServiceInfo service : services.values()) {
response.add(new StackServiceResponse(service));
}
@@ -4511,7 +4504,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
@Override
public Set<StackConfigurationResponse> getStackLevelConfigurations(
Set<StackLevelConfigurationRequest> requests) throws AmbariException {
- Set<StackConfigurationResponse> response = new HashSet<StackConfigurationResponse>();
+ Set<StackConfigurationResponse> response = new HashSet<>();
for (StackLevelConfigurationRequest request : requests) {
String stackName = request.getStackName();
@@ -4533,7 +4526,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
private Set<StackConfigurationResponse> getStackLevelConfigurations(
StackLevelConfigurationRequest request) throws AmbariException {
- Set<StackConfigurationResponse> response = new HashSet<StackConfigurationResponse>();
+ Set<StackConfigurationResponse> response = new HashSet<>();
String stackName = request.getStackName();
String stackVersion = request.getStackVersion();
@@ -4555,7 +4548,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
@Override
public Set<StackConfigurationResponse> getStackConfigurations(
Set<StackConfigurationRequest> requests) throws AmbariException {
- Set<StackConfigurationResponse> response = new HashSet<StackConfigurationResponse>();
+ Set<StackConfigurationResponse> response = new HashSet<>();
for (StackConfigurationRequest request : requests) {
String stackName = request.getStackName();
@@ -4579,7 +4572,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
private Set<StackConfigurationResponse> getStackConfigurations(
StackConfigurationRequest request) throws AmbariException {
- Set<StackConfigurationResponse> response = new HashSet<StackConfigurationResponse>();
+ Set<StackConfigurationResponse> response = new HashSet<>();
String stackName = request.getStackName();
String stackVersion = request.getStackVersion();
@@ -4602,7 +4595,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
@Override
public Set<StackServiceComponentResponse> getStackComponents(
Set<StackServiceComponentRequest> requests) throws AmbariException {
- Set<StackServiceComponentResponse> response = new HashSet<StackServiceComponentResponse>();
+ Set<StackServiceComponentResponse> response = new HashSet<>();
for (StackServiceComponentRequest request : requests) {
String stackName = request.getStackName();
String stackVersion = request.getStackVersion();
@@ -4646,7 +4639,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
} else {
List<ComponentInfo> components = ambariMetaInfo.getComponentsByService(stackName, stackVersion, serviceName);
- response = new HashSet<StackServiceComponentResponse>();
+ response = new HashSet<>();
for (ComponentInfo component: components) {
response.add(new StackServiceComponentResponse(component));
@@ -4658,7 +4651,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
@Override
public Set<OperatingSystemResponse> getOperatingSystems(
Set<OperatingSystemRequest> requests) throws AmbariException {
- Set<OperatingSystemResponse> response = new HashSet<OperatingSystemResponse>();
+ Set<OperatingSystemResponse> response = new HashSet<>();
for (OperatingSystemRequest request : requests) {
try {
String stackName = request.getStackName();
@@ -4689,7 +4682,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
private Set<OperatingSystemResponse> getOperatingSystems(
OperatingSystemRequest request) throws AmbariException {
- Set<OperatingSystemResponse> responses = new HashSet<OperatingSystemResponse>();
+ Set<OperatingSystemResponse> responses = new HashSet<>();
String stackName = request.getStackName();
String stackVersion = request.getStackVersion();
@@ -4767,7 +4760,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
@Override
public Set<RootServiceResponse> getRootServices(
Set<RootServiceRequest> requests) throws AmbariException {
- Set<RootServiceResponse> response = new HashSet<RootServiceResponse>();
+ Set<RootServiceResponse> response = new HashSet<>();
for (RootServiceRequest request : requests) {
try {
response.addAll(getRootServices(request));
@@ -4790,7 +4783,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
@Override
public Set<RootServiceComponentResponse> getRootServiceComponents(
Set<RootServiceComponentRequest> requests) throws AmbariException {
- Set<RootServiceComponentResponse> response = new HashSet<RootServiceComponentResponse>();
+ Set<RootServiceComponentResponse> response = new HashSet<>();
for (RootServiceComponentRequest request : requests) {
String serviceName = request.getServiceName();
try {
@@ -4905,7 +4898,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
hostsMap.getHostMap(hostName));
}
- Map<String, String> rcaParameters = new HashMap<String, String>();
+ Map<String, String> rcaParameters = new HashMap<>();
rcaParameters.put(AMBARI_DB_RCA_URL, url);
rcaParameters.put(AMBARI_DB_RCA_DRIVER, configs.getRcaDatabaseDriver());
@@ -5088,7 +5081,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
layoutEntity.setScope(WidgetLayoutResourceProvider.SCOPE.CLUSTER.name());
layoutEntity.setUserName(user);
- List<WidgetLayoutUserWidgetEntity> widgetLayoutUserWidgetEntityList = new LinkedList<WidgetLayoutUserWidgetEntity>();
+ List<WidgetLayoutUserWidgetEntity> widgetLayoutUserWidgetEntityList = new LinkedList<>();
int order = 0;
for (WidgetLayoutInfo layoutInfo : widgetLayout.getWidgetLayoutInfoList()) {
if (layoutInfo.getDefaultSectionName() == null) {
@@ -5119,7 +5112,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
// Add new widgets to end of the existing ones
List<WidgetLayoutUserWidgetEntity> layoutUserWidgetEntities = existingLayoutEntity.getListWidgetLayoutUserWidgetEntity();
if (layoutUserWidgetEntities == null) {
- layoutUserWidgetEntities = new LinkedList<WidgetLayoutUserWidgetEntity>();
+ layoutUserWidgetEntities = new LinkedList<>();
existingLayoutEntity.setListWidgetLayoutUserWidgetEntity(layoutUserWidgetEntities);
}
int order = layoutUserWidgetEntities.size() - 1;
@@ -5196,7 +5189,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
* @return a map of properties
*/
public Map<String,String> getCredentialStoreServiceProperties() {
- Map<String,String> properties = new HashMap<String, String>();
+ Map<String,String> properties = new HashMap<>();
properties.put("storage.persistent", String.valueOf(credentialStoreService.isInitialized(CredentialStoreType.PERSISTED)));
properties.put("storage.temporary", String.valueOf(credentialStoreService.isInitialized(CredentialStoreType.TEMPORARY)));
return properties;
@@ -5235,7 +5228,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
Map<PropertyInfo.PropertyType, Set<String>> propertyTypes = cluster.getConfigPropertiesTypes(configType);
// Create a composite set of properties to check...
- Set<String> propertiesToCheck = new HashSet<String>();
+ Set<String> propertiesToCheck = new HashSet<>();
Set<String> userProperties = propertyTypes.get(PropertyType.USER);
if (userProperties != null) {
@@ -5330,10 +5323,10 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
Map<String, String[]> relevantPropertyChanges;
// If necessary remove any non-relevant property changes.
- if (relevantChangesToIgnore == null)
+ if (relevantChangesToIgnore == null) {
relevantPropertyChanges = propertyChanges;
- else {
- relevantPropertyChanges = new HashMap<String, String[]>(propertyChanges);
+ } else {
+ relevantPropertyChanges = new HashMap<>(propertyChanges);
for (String propertyName : relevantChangesToIgnore) {
relevantPropertyChanges.remove(propertyName);
http://git-wip-us.apache.org/repos/asf/ambari/blob/d540f943/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
index 2ec43cf..9ecb774 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
@@ -19,7 +19,6 @@ package org.apache.ambari.server.controller.internal;
import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.HOOKS_FOLDER;
import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SERVICE_PACKAGE_FOLDER;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.VERSION;
import java.text.MessageFormat;
import java.util.ArrayList;
@@ -128,7 +127,6 @@ import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.google.gson.Gson;
import com.google.gson.JsonArray;
-import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import com.google.inject.Inject;
import com.google.inject.Provider;
@@ -221,30 +219,6 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
Arrays.asList(UPGRADE_REQUEST_ID, UPGRADE_CLUSTER_NAME));
private static final Set<String> PROPERTY_IDS = new HashSet<>();
- private static final String COMMAND_PARAM_VERSION = VERSION;
- private static final String COMMAND_PARAM_CLUSTER_NAME = "clusterName";
- private static final String COMMAND_PARAM_DIRECTION = "upgrade_direction";
- private static final String COMMAND_PARAM_UPGRADE_PACK = "upgrade_pack";
- private static final String COMMAND_PARAM_REQUEST_ID = "request_id";
-
- private static final String COMMAND_PARAM_UPGRADE_TYPE = "upgrade_type";
- private static final String COMMAND_PARAM_TASKS = "tasks";
- private static final String COMMAND_PARAM_STRUCT_OUT = "structured_out";
- private static final String COMMAND_DOWNGRADE_FROM_VERSION = "downgrade_from_version";
-
- /**
- * The original "current" stack of the cluster before the upgrade started.
- * This is the same regardless of whether the current direction is
- * {@link Direction#UPGRADE} or {@link Direction#DOWNGRADE}.
- */
- private static final String COMMAND_PARAM_ORIGINAL_STACK = "original_stack";
-
- /**
- * The target upgrade stack before the upgrade started. This is the same
- * regardless of whether the current direction is {@link Direction#UPGRADE} or
- * {@link Direction#DOWNGRADE}.
- */
- private static final String COMMAND_PARAM_TARGET_STACK = "target_stack";
private static final String DEFAULT_REASON_TEMPLATE = "Aborting upgrade %s";
@@ -395,8 +369,11 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
}
}
+ // the version being upgraded or downgraded to (ie 2.2.1.0-1234)
+ final String version = (String) requestMap.get(UPGRADE_VERSION);
+
final UpgradeContext upgradeContext = s_upgradeContextFactory.create(cluster, upgradeType,
- direction, requestMap);
+ direction, version, requestMap);
UpgradePack upgradePack = validateRequest(upgradeContext);
upgradeContext.setUpgradePack(upgradePack);
@@ -431,7 +408,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
public Set<Resource> getResources(Request request, Predicate predicate) throws SystemException,
UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
- Set<Resource> results = new HashSet<Resource>();
+ Set<Resource> results = new HashSet<>();
Set<String> requestPropertyIds = getRequestPropertyIds(request, predicate);
for (Map<String, Object> propertyMap : getPropertyMaps(predicate)) {
@@ -450,7 +427,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
String.format("Cluster %s could not be loaded", clusterName));
}
- List<UpgradeEntity> upgrades = new ArrayList<UpgradeEntity>();
+ List<UpgradeEntity> upgrades = new ArrayList<>();
String upgradeIdStr = (String) propertyMap.get(UPGRADE_REQUEST_ID);
if (null != upgradeIdStr) {
@@ -728,7 +705,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
String userName = getManagementController().getAuthName();
// the version being upgraded or downgraded to (ie 2.2.1.0-1234)
- final String version = (String) requestMap.get(UPGRADE_VERSION);
+ final String version = upgradeContext.getVersion();
MasterHostResolver resolver = null;
if (direction.isUpgrade()) {
@@ -737,34 +714,10 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
resolver = new MasterHostResolver(configHelper, cluster, version);
}
- StackId sourceStackId = null;
- StackId targetStackId = null;
-
Set<String> supportedServices = new HashSet<>();
UpgradeScope scope = UpgradeScope.COMPLETE;
- switch (direction) {
- case UPGRADE:
- sourceStackId = cluster.getCurrentStackVersion();
-
- RepositoryVersionEntity targetRepositoryVersion = s_repoVersionDAO.findByStackNameAndVersion(
- sourceStackId.getStackName(), version);
-
- // !!! TODO check the repo_version for patch-ness and restrict the context
- // to those services that require it. Consult the version definition and add the
- // service names to supportedServices
-
- targetStackId = targetRepositoryVersion.getStackId();
- break;
- case DOWNGRADE:
- sourceStackId = cluster.getCurrentStackVersion();
- targetStackId = cluster.getDesiredStackVersion();
- break;
- }
-
upgradeContext.setResolver(resolver);
- upgradeContext.setSourceAndTargetStacks(sourceStackId, targetStackId);
- upgradeContext.setVersion(version);
upgradeContext.setSupportedServices(supportedServices);
upgradeContext.setScope(scope);
@@ -835,6 +788,10 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
List<UpgradeGroupEntity> groupEntities = new ArrayList<>();
RequestStageContainer req = createRequest(direction, version);
+ // the upgrade context calculated these for us based on direction
+ StackId sourceStackId = upgradeContext.getOriginalStackId();
+ StackId targetStackId = upgradeContext.getTargetStackId();
+
/**
During a Rolling Upgrade, change the desired Stack Id if jumping across
major stack versions (e.g., HDP 2.2 -> 2.3), and then set config changes
@@ -1087,9 +1044,9 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
// We want to skip updating config-types of services that are not in the upgrade pack.
// Care should be taken as some config-types could be in services that are in and out
// of the upgrade pack. We should never ignore config-types of services in upgrade pack.
- Set<String> skipConfigTypes = new HashSet<String>();
- Set<String> upgradePackServices = new HashSet<String>();
- Set<String> upgradePackConfigTypes = new HashSet<String>();
+ Set<String> skipConfigTypes = new HashSet<>();
+ Set<String> upgradePackServices = new HashSet<>();
+ Set<String> upgradePackConfigTypes = new HashSet<>();
AmbariMetaInfo ambariMetaInfo = s_metaProvider.get();
Map<String, ServiceInfo> stackServicesMap = ambariMetaInfo.getServices(targetStack.getStackName(), targetStack.getStackVersion());
for (Grouping group : upgradePack.getGroups(direction)) {
@@ -1111,7 +1068,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
}
}
}
- Set<String> servicesNotInUpgradePack = new HashSet<String>(stackServicesMap.keySet());
+ Set<String> servicesNotInUpgradePack = new HashSet<>(stackServicesMap.keySet());
servicesNotInUpgradePack.removeAll(upgradePackServices);
for (String serviceNotInUpgradePack : servicesNotInUpgradePack) {
ServiceInfo serviceInfo = stackServicesMap.get(serviceNotInUpgradePack);
@@ -1321,16 +1278,11 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
// add each host to this stage
RequestResourceFilter filter = new RequestResourceFilter("", "",
- new ArrayList<String>(wrapper.getHosts()));
+ new ArrayList<>(wrapper.getHosts()));
LOG.debug("Analyzing upgrade item {} with tasks: {}.", entity.getText(), entity.getTasks());
- Map<String, String> params = getNewParameterMap(request);
- params.put(COMMAND_PARAM_TASKS, entity.getTasks());
- params.put(COMMAND_PARAM_VERSION, context.getVersion());
- params.put(COMMAND_PARAM_DIRECTION, context.getDirection().name().toLowerCase());
- params.put(COMMAND_PARAM_ORIGINAL_STACK, context.getOriginalStackId().getStackId());
- params.put(COMMAND_PARAM_TARGET_STACK, context.getTargetStackId().getStackId());
- params.put(COMMAND_DOWNGRADE_FROM_VERSION, context.getDowngradeFromVersion());
+ Map<String, String> params = getNewParameterMap(request, context);
+ params.put(UpgradeContext.COMMAND_PARAM_TASKS, entity.getTasks());
// Apply additional parameters to the command that come from the stage.
applyAdditionalParameters(wrapper, params);
@@ -1411,12 +1363,12 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
Cluster cluster = context.getCluster();
- List<RequestResourceFilter> filters = new ArrayList<RequestResourceFilter>();
+ List<RequestResourceFilter> filters = new ArrayList<>();
for (TaskWrapper tw : wrapper.getTasks()) {
// add each host to this stage
filters.add(new RequestResourceFilter(tw.getService(), tw.getComponent(),
- new ArrayList<String>(tw.getHosts())));
+ new ArrayList<>(tw.getHosts())));
}
String function = null;
@@ -1432,20 +1384,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
break;
}
- Map<String, String> commandParams = getNewParameterMap(request);
- if (null != context.getType()) {
- // use the serialized attributes of the enum to convert it to a string,
- // but first we must convert it into an element so that we don't get a
- // quoted string - using toString() actually returns a quoted stirng which is bad
- JsonElement json = s_gson.toJsonTree(context.getType());
- commandParams.put(COMMAND_PARAM_UPGRADE_TYPE, json.getAsString());
- }
-
- commandParams.put(COMMAND_PARAM_VERSION, context.getVersion());
- commandParams.put(COMMAND_PARAM_DIRECTION, context.getDirection().name().toLowerCase());
- commandParams.put(COMMAND_PARAM_ORIGINAL_STACK, context.getOriginalStackId().getStackId());
- commandParams.put(COMMAND_PARAM_TARGET_STACK, context.getTargetStackId().getStackId());
- commandParams.put(COMMAND_DOWNGRADE_FROM_VERSION, context.getDowngradeFromVersion());
+ Map<String, String> commandParams = getNewParameterMap(request, context);
// Apply additional parameters to the command that come from the stage.
applyAdditionalParameters(wrapper, commandParams);
@@ -1478,7 +1417,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
stage.setStageId(stageId);
entity.setStageId(Long.valueOf(stageId));
- Map<String, String> requestParams = new HashMap<String, String>();
+ Map<String, String> requestParams = new HashMap<>();
requestParams.put("command", function);
// !!! it is unclear the implications of this on rolling or express upgrade. To turn
@@ -1496,7 +1435,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
boolean supportsAutoSkipOnFailure, boolean allowRetry)
throws AmbariException {
- List<RequestResourceFilter> filters = new ArrayList<RequestResourceFilter>();
+ List<RequestResourceFilter> filters = new ArrayList<>();
for (TaskWrapper tw : wrapper.getTasks()) {
filters.add(new RequestResourceFilter(tw.getService(), "", Collections.<String> emptyList()));
@@ -1504,12 +1443,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
Cluster cluster = context.getCluster();
- Map<String, String> commandParams = getNewParameterMap(request);
- commandParams.put(COMMAND_PARAM_VERSION, context.getVersion());
- commandParams.put(COMMAND_PARAM_DIRECTION, context.getDirection().name().toLowerCase());
- commandParams.put(COMMAND_PARAM_ORIGINAL_STACK, context.getOriginalStackId().getStackId());
- commandParams.put(COMMAND_PARAM_TARGET_STACK, context.getTargetStackId().getStackId());
- commandParams.put(COMMAND_DOWNGRADE_FROM_VERSION, context.getDowngradeFromVersion());
+ Map<String, String> commandParams = getNewParameterMap(request, context);
// Apply additional parameters to the command that come from the stage.
applyAdditionalParameters(wrapper, commandParams);
@@ -1544,7 +1478,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
stage.setStageId(stageId);
entity.setStageId(Long.valueOf(stageId));
- Map<String, String> requestParams = getNewParameterMap(request);
+ Map<String, String> requestParams = getNewParameterMap(request, context);
s_commandExecutionHelper.get().addExecutionCommandsToStage(actionContext, stage, requestParams);
request.addStages(Collections.singletonList(stage));
@@ -1571,14 +1505,8 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
Cluster cluster = context.getCluster();
- Map<String, String> commandParams = getNewParameterMap(request);
- commandParams.put(COMMAND_PARAM_CLUSTER_NAME, cluster.getClusterName());
- commandParams.put(COMMAND_PARAM_VERSION, context.getVersion());
- commandParams.put(COMMAND_PARAM_DIRECTION, context.getDirection().name().toLowerCase());
- commandParams.put(COMMAND_PARAM_ORIGINAL_STACK, context.getOriginalStackId().getStackId());
- commandParams.put(COMMAND_PARAM_TARGET_STACK, context.getTargetStackId().getStackId());
- commandParams.put(COMMAND_DOWNGRADE_FROM_VERSION, context.getDowngradeFromVersion());
- commandParams.put(COMMAND_PARAM_UPGRADE_PACK, upgradePack.getName());
+ Map<String, String> commandParams = getNewParameterMap(request, context);
+ commandParams.put(UpgradeContext.COMMAND_PARAM_UPGRADE_PACK, upgradePack.getName());
// Notice that this does not apply any params because the input does not specify a stage.
// All of the other actions do use additional params.
@@ -1599,7 +1527,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
ManualTask mt = (ManualTask) task;
if (StringUtils.isNotBlank(mt.structuredOut)) {
- commandParams.put(COMMAND_PARAM_STRUCT_OUT, mt.structuredOut);
+ commandParams.put(UpgradeContext.COMMAND_PARAM_STRUCT_OUT, mt.structuredOut);
}
}
@@ -1691,9 +1619,16 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
}
/**
- * Gets a map initialized with parameters required for rolling uprgades to
- * work. The following properties are already set:
+ * Gets a map initialized with parameters required for upgrades to work. The
+ * following properties are already set:
* <ul>
+ * <li>{@link UpgradeContext#COMMAND_PARAM_CLUSTER_NAME}
+ * <li>{@link UpgradeContext#COMMAND_PARAM_VERSION}
+ * <li>{@link UpgradeContext#COMMAND_PARAM_DIRECTION}
+ * <li>{@link UpgradeContext#COMMAND_PARAM_ORIGINAL_STACK}
+ * <li>{@link UpgradeContext#COMMAND_PARAM_TARGET_STACK}
+ * <li>{@link UpgradeContext#COMMAND_DOWNGRADE_FROM_VERSION}
+ * <li>{@link UpgradeContext#COMMAND_PARAM_UPGRADE_TYPE}
* <li>{@link KeyNames#REFRESH_CONFIG_TAGS_BEFORE_EXECUTION} - necessary in
* order to have the commands contain the correct configurations. Otherwise,
* they will contain the configurations that were available at the time the
@@ -1703,12 +1638,13 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
* <li>{@link #COMMAND_PARAM_REQUEST_ID}</li> the ID of the request.
* <ul>
*
- * @return
+ * @return the initialized parameter map.
*/
- private Map<String, String> getNewParameterMap(RequestStageContainer requestStageContainer) {
- Map<String, String> parameters = new HashMap<String, String>();
- parameters.put(KeyNames.REFRESH_CONFIG_TAGS_BEFORE_EXECUTION, "true");
- parameters.put(COMMAND_PARAM_REQUEST_ID, String.valueOf(requestStageContainer.getId()));
+ private Map<String, String> getNewParameterMap(RequestStageContainer requestStageContainer,
+ UpgradeContext context) {
+ Map<String, String> parameters = context.getInitializedCommandParameters();
+ parameters.put(UpgradeContext.COMMAND_PARAM_REQUEST_ID,
+ String.valueOf(requestStageContainer.getId()));
return parameters;
}
@@ -1776,7 +1712,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
}
} else {
// Status must be PENDING.
- List<Long> taskIds = new ArrayList<Long>();
+ List<Long> taskIds = new ArrayList<>();
List<HostRoleCommandEntity> hrcEntities = s_hostRoleCommandDAO.findByRequestIdAndStatuses(
requestId, Sets.newHashSet(HostRoleStatus.ABORTED, HostRoleStatus.TIMEDOUT));
@@ -2079,7 +2015,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
direction.getText(false), e));
}
- List<Resource> failedResources = new LinkedList<Resource>();
+ List<Resource> failedResources = new LinkedList<>();
if (preUpgradeCheckResources != null) {
for (Resource res : preUpgradeCheckResources) {
PrereqCheckStatus prereqCheckStatus = (PrereqCheckStatus) res.getPropertyValue(
http://git-wip-us.apache.org/repos/asf/ambari/blob/d540f943/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java b/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
index 4e37c92..8074b31 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
@@ -746,4 +746,18 @@ public interface Cluster {
* @return the role command order instance (not {@code null}).
*/
RoleCommandOrder getRoleCommandOrder();
+
+ /**
+ * Adds upgrade specific command and role parameters to the command maps if
+ * there is a suspended upgrade. If there is not a suspended upgrade, then the
+ * maps are not modified.
+ * <p/>
+ *
+ * @param commandParams
+ * the command parameter map to supplement (not {@code null}).
+ * @param roleParams
+ * the role parameter map to supplement (not {@code null}).
+ */
+ void addSuspendedUpgradeParameters(Map<String, String> commandParams,
+ Map<String, String> roleParams);
}