You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jo...@apache.org on 2017/05/31 14:31:08 UTC
ambari git commit: AMBARI-21149 - Configurations Created During
Upgrade Must Use Correct StackId Based on Service (jonathanhurley)
Repository: ambari
Updated Branches:
refs/heads/branch-feature-AMBARI-12556 245afc1b4 -> 2892aee53
AMBARI-21149 - Configurations Created During Upgrade Must Use Correct StackId Based on Service (jonathanhurley)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/2892aee5
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/2892aee5
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/2892aee5
Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 2892aee53e63077c422c4c68dd565e786d83a71d
Parents: 245afc1
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Tue May 30 16:28:05 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Tue May 30 16:28:05 2017 -0400
----------------------------------------------------------------------
.../server/controller/KerberosHelperImpl.java | 3 +-
.../UpdateKerberosConfigsServerAction.java | 5 +-
.../serveraction/upgrades/ConfigureAction.java | 32 +-
.../ambari/server/state/ConfigHelper.java | 75 +-
.../ambari/server/state/UpgradeHelper.java | 7 +-
.../state/stack/upgrade/ClusterGrouping.java | 7 +
.../server/upgrade/UpgradeCatalog200.java | 3 +-
.../server/upgrade/UpgradeCatalog240.java | 5 +-
.../StackUpgradeConfigurationMergeTest.java | 2 +-
.../UpdateKerberosConfigsServerActionTest.java | 5 +-
.../upgrades/ConfigureActionTest.java | 693 +++++++++++--------
.../upgrades/UpgradeActionTest.java | 2 +-
.../ambari/server/state/ConfigHelperTest.java | 10 +-
.../ambari/server/state/UpgradeHelperTest.java | 6 +-
.../server/upgrade/UpgradeCatalog200Test.java | 7 +-
.../server/upgrade/UpgradeCatalog240Test.java | 6 +-
16 files changed, 480 insertions(+), 388 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/2892aee5/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
index db6ffc2..e1e6b4a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
@@ -336,7 +336,8 @@ public class KerberosHelperImpl implements KerberosHelper {
existingConfigurations, installedServices, serviceFilter, previouslyExistingServices, true, true);
for (Map.Entry<String, Map<String, String>> entry : updates.entrySet()) {
- configHelper.updateConfigType(cluster, ambariManagementController, entry.getKey(), entry.getValue(), null,
+ configHelper.updateConfigType(cluster, cluster.getDesiredStackVersion(),
+ ambariManagementController, entry.getKey(), entry.getValue(), null,
ambariManagementController.getAuthName(), "Enabling Kerberos for added components");
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/2892aee5/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/UpdateKerberosConfigsServerAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/UpdateKerberosConfigsServerAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/UpdateKerberosConfigsServerAction.java
index f776575..2f32312 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/UpdateKerberosConfigsServerAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/UpdateKerberosConfigsServerAction.java
@@ -147,9 +147,8 @@ public class UpdateKerberosConfigsServerAction extends AbstractServerAction {
}
for (String configType : configTypes) {
- configHelper.updateConfigType(cluster, controller, configType,
- propertiesToSet.get(configType),
- propertiesToRemove.get(configType),
+ configHelper.updateConfigType(cluster, cluster.getDesiredStackVersion(), controller,
+ configType, propertiesToSet.get(configType), propertiesToRemove.get(configType),
authenticatedUserName, configNote);
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/2892aee5/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ConfigureAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ConfigureAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ConfigureAction.java
index b55c52f..17bb3f8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ConfigureAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ConfigureAction.java
@@ -34,7 +34,7 @@ import org.apache.ambari.server.api.services.AmbariMetaInfo;
import org.apache.ambari.server.configuration.Configuration;
import org.apache.ambari.server.controller.AmbariManagementController;
import org.apache.ambari.server.controller.ConfigurationRequest;
-import org.apache.ambari.server.serveraction.AbstractServerAction;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
import org.apache.ambari.server.serveraction.ServerAction;
import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
@@ -45,12 +45,14 @@ import org.apache.ambari.server.state.ConfigMergeHelper.ThreeWayValue;
import org.apache.ambari.server.state.DesiredConfig;
import org.apache.ambari.server.state.PropertyInfo;
import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.UpgradeContext;
import org.apache.ambari.server.state.stack.upgrade.ConfigUpgradeChangeDefinition.ConfigurationKeyValue;
import org.apache.ambari.server.state.stack.upgrade.ConfigUpgradeChangeDefinition.Insert;
import org.apache.ambari.server.state.stack.upgrade.ConfigUpgradeChangeDefinition.Masked;
import org.apache.ambari.server.state.stack.upgrade.ConfigUpgradeChangeDefinition.Replace;
import org.apache.ambari.server.state.stack.upgrade.ConfigUpgradeChangeDefinition.Transfer;
import org.apache.ambari.server.state.stack.upgrade.ConfigureTask;
+import org.apache.ambari.server.state.stack.upgrade.Direction;
import org.apache.ambari.server.state.stack.upgrade.PropertyKeyState;
import org.apache.ambari.server.state.stack.upgrade.TransferOperation;
import org.apache.commons.lang.StringUtils;
@@ -82,7 +84,7 @@ import com.google.inject.Provider;
* property value</li>
* </ul>
*/
-public class ConfigureAction extends AbstractServerAction {
+public class ConfigureAction extends AbstractUpgradeServerAction {
private static Logger LOG = LoggerFactory.getLogger(ConfigureAction.class);
@@ -182,9 +184,16 @@ public class ConfigureAction extends AbstractServerAction {
String clusterName = commandParameters.get("clusterName");
Cluster cluster = m_clusters.getCluster(clusterName);
+ UpgradeContext upgradeContext = getUpgradeContext(cluster);
// such as hdfs-site or hbase-env
String configType = commandParameters.get(ConfigureTask.PARAMETER_CONFIG_TYPE);
+ String serviceName = cluster.getServiceByConfigType(configType);
+
+ RepositoryVersionEntity sourceRepoVersion = upgradeContext.getSourceRepositoryVersion(serviceName);
+ RepositoryVersionEntity targetRepoVersion = upgradeContext.getTargetRepositoryVersion(serviceName);
+ StackId sourceStackId = sourceRepoVersion.getStackId();
+ StackId targetStackId = targetRepoVersion.getStackId();
// extract setters
List<ConfigurationKeyValue> keyValuePairs = Collections.emptyList();
@@ -252,13 +261,12 @@ public class ConfigureAction extends AbstractServerAction {
if (desiredConfig == null) {
throw new AmbariException("Could not find desired config type with name " + configType);
}
+
Config config = cluster.getConfig(configType, desiredConfig.getTag());
if (config == null) {
throw new AmbariException("Could not find config type with name " + configType);
}
- StackId currentStack = cluster.getCurrentStackVersion();
- StackId targetStack = cluster.getDesiredStackVersion();
StackId configStack = config.getStackId();
// !!! initial reference values
@@ -405,8 +413,8 @@ public class ConfigureAction extends AbstractServerAction {
String oldValue = base.get(key);
// !!! values are not changing, so make this a no-op
- if (null != oldValue && value.equals(oldValue)) {
- if (currentStack.equals(targetStack) && !changedValues) {
+ if (StringUtils.equals(value, oldValue)) {
+ if (sourceStackId.equals(targetStackId) && !changedValues) {
updateBufferWithMessage(outputBuffer,
MessageFormat.format(
"{0}/{1} for cluster {2} would not change, skipping setting", configType, key,
@@ -519,7 +527,7 @@ public class ConfigureAction extends AbstractServerAction {
// !!! check to see if we're going to a new stack and double check the
// configs are for the target. Then simply update the new properties instead
// of creating a whole new history record since it was already done
- if (!targetStack.equals(currentStack) && targetStack.equals(configStack)) {
+ if (!targetStackId.equals(sourceStackId) && targetStackId.equals(configStack)) {
config.setProperties(newValues);
config.save();
@@ -528,7 +536,9 @@ public class ConfigureAction extends AbstractServerAction {
// !!! values are different and within the same stack. create a new
// config and service config version
- String serviceVersionNote = "Stack Upgrade";
+ Direction direction = upgradeContext.getDirection();
+ String serviceVersionNote = String.format("%s %s %s", direction.getText(true),
+ direction.getPreposition(), upgradeContext.getRepositoryVersion().getVersion());
String auditName = getExecutionCommand().getRoleParams().get(ServerAction.ACTION_USER_NAME);
@@ -536,12 +546,10 @@ public class ConfigureAction extends AbstractServerAction {
auditName = m_configuration.getAnonymousAuditName();
}
- m_configHelper.createConfigType(cluster, m_controller, configType,
+ m_configHelper.createConfigType(cluster, targetStackId, m_controller, configType,
newValues, auditName, serviceVersionNote);
- String message = "Finished updating configuration ''{0}''";
- message = MessageFormat.format(message, configType);
- return createCommandReport(0, HostRoleStatus.COMPLETED, "{}", message, "");
+ return createCommandReport(0, HostRoleStatus.COMPLETED, "{}", outputBuffer.toString(), "");
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/2892aee5/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
index 4d44e55..9f75bf9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
@@ -969,11 +969,10 @@ public class ConfigHelper {
* @param serviceVersionNote
* @throws AmbariException
*/
- public void updateConfigType(Cluster cluster,
- AmbariManagementController controller, String configType,
- Map<String, String> updates, Collection<String> removals,
- String authenticatedUserName,
- String serviceVersionNote) throws AmbariException {
+ public void updateConfigType(Cluster cluster, StackId stackId,
+ AmbariManagementController controller, String configType, Map<String, String> updates,
+ Collection<String> removals, String authenticatedUserName, String serviceVersionNote)
+ throws AmbariException {
// Nothing to update or remove
if (configType == null ||
@@ -1016,22 +1015,27 @@ public class ConfigHelper {
if ((oldConfigProperties == null)
|| !Maps.difference(oldConfigProperties, properties).areEqual()) {
- createConfigType(cluster, controller, configType, properties,
+ createConfigType(cluster, stackId, controller, configType, properties,
propertiesAttributes, authenticatedUserName, serviceVersionNote);
}
}
- private void createConfigType(Cluster cluster,
- AmbariManagementController controller,
- String configType, Map<String, String> properties,
- Map<String, Map<String, String>> propertyAttributes,
- String authenticatedUserName,
- String serviceVersionNote) throws AmbariException {
+ public void createConfigType(Cluster cluster, StackId stackId,
+ AmbariManagementController controller, String configType, Map<String, String> properties,
+ String authenticatedUserName, String serviceVersionNote) throws AmbariException {
+
+ createConfigType(cluster, stackId, controller, configType, properties,
+ new HashMap<String, Map<String, String>>(), authenticatedUserName, serviceVersionNote);
+ }
+
+ public void createConfigType(Cluster cluster, StackId stackId,
+ AmbariManagementController controller, String configType, Map<String, String> properties,
+ Map<String, Map<String, String>> propertyAttributes, String authenticatedUserName,
+ String serviceVersionNote) throws AmbariException {
// create the configuration history entry
- Config baseConfig = createConfig(cluster, controller, cluster.getDesiredStackVersion(),
- configType, FIRST_VERSION_TAG, properties,
- propertyAttributes);
+ Config baseConfig = createConfig(cluster, stackId, controller, configType, FIRST_VERSION_TAG,
+ properties, propertyAttributes);
if (baseConfig != null) {
cluster.addDesiredConfig(authenticatedUserName,
@@ -1040,34 +1044,6 @@ public class ConfigHelper {
}
/**
- * A helper method to create a new {@link Config} for a given configuration
- * type. This method will perform the following tasks:
- * <ul>
- * <li>Create a {@link Config} in the cluster for the specified type. This
- * will have the proper versions and tags set automatically.</li>
- * <li>Set the cluster's {@link DesiredConfig} to the new configuration</li>
- * <li>Create an entry in the configuration history with a note and username.</li>
- * <ul>
- *
- * @param cluster
- * @param controller
- * @param configType
- * @param properties
- * @param authenticatedUserName
- * @param serviceVersionNote
- * @throws AmbariException
- */
- public void createConfigType(Cluster cluster,
- AmbariManagementController controller,
- String configType, Map<String, String> properties,
- String authenticatedUserName,
- String serviceVersionNote) throws AmbariException {
- createConfigType(cluster, controller, configType, properties,
- new HashMap<String, Map<String, String>>(), authenticatedUserName,
- serviceVersionNote);
- }
-
- /**
* Create configurations and assign them for services.
* @param cluster the cluster
* @param controller the controller
@@ -1077,10 +1053,9 @@ public class ConfigHelper {
* @param serviceVersionNote the service version note
* @throws AmbariException
*/
- public void createConfigTypes(Cluster cluster,
- AmbariManagementController controller, StackId stackId,
- Map<String, Map<String, String>> batchProperties, String authenticatedUserName,
- String serviceVersionNote) throws AmbariException {
+ public void createConfigTypes(Cluster cluster, StackId stackId,
+ AmbariManagementController controller, Map<String, Map<String, String>> batchProperties,
+ String authenticatedUserName, String serviceVersionNote) throws AmbariException {
Map<String, Set<Config>> serviceMapped = new HashMap<>();
@@ -1088,7 +1063,7 @@ public class ConfigHelper {
String type = entry.getKey();
Map<String, String> properties = entry.getValue();
- Config baseConfig = createConfig(cluster, controller, stackId, type, FIRST_VERSION_TAG,
+ Config baseConfig = createConfig(cluster, stackId, controller, type, FIRST_VERSION_TAG,
properties, Collections.<String, Map<String, String>> emptyMap());
if (null != baseConfig) {
@@ -1121,6 +1096,8 @@ public class ConfigHelper {
*
* @param cluster
* the cluster (not {@code null}).
+ * @param stackId
+ * the stack to create the new properties for
* @param controller
* the controller which actually creates the configuration (not
* {@code null}).
@@ -1138,7 +1115,7 @@ public class ConfigHelper {
* @return
* @throws AmbariException
*/
- Config createConfig(Cluster cluster, AmbariManagementController controller, StackId stackId,
+ Config createConfig(Cluster cluster, StackId stackId, AmbariManagementController controller,
String type, String tag, Map<String, String> properties,
Map<String, Map<String, String>> propertyAttributes) throws AmbariException {
http://git-wip-us.apache.org/repos/asf/ambari/blob/2892aee5/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
index 77fabf8..5fdcd66 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
@@ -1018,8 +1018,11 @@ public class UpgradeHelper {
LOG.info("The upgrade will create the following configurations for stack {}: {}",
targetStackId, StringUtils.join(configTypes, ','));
- configHelper.createConfigTypes(cluster, controller, targetStackId,
- newServiceDefaultConfigsByType, userName, "Configuration created for Upgrade");
+ String serviceVersionNote = String.format("%s %s %s", direction.getText(true),
+ direction.getPreposition(), upgradeContext.getRepositoryVersion().getVersion());
+
+ configHelper.createConfigTypes(cluster, targetStackId, controller,
+ newServiceDefaultConfigsByType, userName, serviceVersionNote);
}
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/2892aee5/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ClusterGrouping.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ClusterGrouping.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ClusterGrouping.java
index 05bbdc1..8e59602 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ClusterGrouping.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ClusterGrouping.java
@@ -171,6 +171,13 @@ public class ClusterGrouping extends Grouping {
continue;
}
+ // only schedule this stage if its service is part of the upgrade
+ if (StringUtils.isNotBlank(execution.service)) {
+ if (!upgradeContext.isServiceSupported(execution.service)) {
+ continue;
+ }
+ }
+
Task task = execution.task;
StageWrapper wrapper = null;
http://git-wip-us.apache.org/repos/asf/ambari/blob/2892aee5/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog200.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog200.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog200.java
index b7a2e78..a9280a4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog200.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog200.java
@@ -597,7 +597,8 @@ public class UpgradeCatalog200 extends AbstractUpgradeCatalog {
// -----------------------------------------
// Set the updated configuration
- configHelper.createConfigType(cluster, ambariManagementController, "cluster-env", properties,
+ configHelper.createConfigType(cluster, cluster.getDesiredStackVersion(),
+ ambariManagementController, "cluster-env", properties,
AUTHENTICATED_USER_NAME, "Upgrading to Ambari 2.0");
// Set configuration (end)
http://git-wip-us.apache.org/repos/asf/ambari/blob/2892aee5/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java
index 1e8b51b..f413c69 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java
@@ -698,8 +698,9 @@ public class UpgradeCatalog240 extends AbstractUpgradeCatalog {
if (installedServices.contains(SLIDER_SERVICE_NAME)) {
Config sliderClientConfig = cluster.getDesiredConfigByType(SLIDER_CLIENT_CONFIG);
if (sliderClientConfig == null) {
- configHelper.createConfigType(cluster, ambariManagementController, SLIDER_CLIENT_CONFIG,
- new HashMap<String, String>(), AUTHENTICATED_USER_NAME, "");
+ configHelper.createConfigType(cluster, cluster.getDesiredStackVersion(),
+ ambariManagementController, SLIDER_CLIENT_CONFIG, new HashMap<String, String>(),
+ AUTHENTICATED_USER_NAME, "");
}
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/2892aee5/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/StackUpgradeConfigurationMergeTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/StackUpgradeConfigurationMergeTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/StackUpgradeConfigurationMergeTest.java
index 7679211..97b94c3 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/StackUpgradeConfigurationMergeTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/StackUpgradeConfigurationMergeTest.java
@@ -241,7 +241,7 @@ public class StackUpgradeConfigurationMergeTest extends EasyMockSupport {
Capture<Map<String, Map<String, String>>> expectedConfigurationsCapture = EasyMock.newCapture();
configHelper.createConfigTypes(EasyMock.anyObject(Cluster.class),
- EasyMock.anyObject(AmbariManagementController.class), EasyMock.anyObject(StackId.class),
+ EasyMock.anyObject(StackId.class), EasyMock.anyObject(AmbariManagementController.class),
EasyMock.capture(expectedConfigurationsCapture), EasyMock.anyObject(String.class),
EasyMock.anyObject(String.class));
http://git-wip-us.apache.org/repos/asf/ambari/blob/2892aee5/ambari-server/src/test/java/org/apache/ambari/server/serveraction/kerberos/UpdateKerberosConfigsServerActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/kerberos/UpdateKerberosConfigsServerActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/kerberos/UpdateKerberosConfigsServerActionTest.java
index 722ab0c..07391b7 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/kerberos/UpdateKerberosConfigsServerActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/kerberos/UpdateKerberosConfigsServerActionTest.java
@@ -35,6 +35,7 @@ import org.apache.ambari.server.controller.AmbariManagementController;
import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.ConfigHelper;
+import org.apache.ambari.server.state.StackId;
import org.apache.ambari.server.state.stack.OsFamily;
import org.easymock.Capture;
import org.easymock.CaptureType;
@@ -104,7 +105,7 @@ public class UpdateKerberosConfigsServerActionTest extends EasyMockSupport{
executionCommand.setCommandParams(commandParams);
ConfigHelper configHelper = injector.getInstance(ConfigHelper.class);
- configHelper.updateConfigType(anyObject(Cluster.class), anyObject(AmbariManagementController.class),
+ configHelper.updateConfigType(anyObject(Cluster.class), anyObject(StackId.class), anyObject(AmbariManagementController.class),
anyObject(String.class), EasyMock.<Map<String, String>>anyObject(), EasyMock.<Collection<String>>anyObject(), anyObject(String.class), anyObject(String.class));
expectLastCall().atLeastOnce();
@@ -157,7 +158,7 @@ public class UpdateKerberosConfigsServerActionTest extends EasyMockSupport{
Capture<String> configTypes = Capture.newInstance(CaptureType.ALL);
Capture<Map<String, String>> configUpdates = Capture.newInstance(CaptureType.ALL);
- configHelper.updateConfigType(anyObject(Cluster.class), anyObject(AmbariManagementController.class),
+ configHelper.updateConfigType(anyObject(Cluster.class), anyObject(StackId.class), anyObject(AmbariManagementController.class),
capture(configTypes), capture(configUpdates), anyObject(Collection.class), anyObject(String.class), anyObject(String.class));
expectLastCall().atLeastOnce();
http://git-wip-us.apache.org/repos/asf/ambari/blob/2892aee5/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
index b12eb9b..478b126 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
@@ -32,6 +32,7 @@ import javax.persistence.EntityManager;
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.H2DatabaseCleaner;
+import org.apache.ambari.server.ServiceComponentNotFoundException;
import org.apache.ambari.server.ServiceNotFoundException;
import org.apache.ambari.server.actionmanager.ExecutionCommandWrapper;
import org.apache.ambari.server.actionmanager.HostRoleCommand;
@@ -41,13 +42,12 @@ import org.apache.ambari.server.agent.ExecutionCommand;
import org.apache.ambari.server.orm.GuiceJpaInitializer;
import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
import org.apache.ambari.server.orm.OrmTestHelper;
-import org.apache.ambari.server.orm.dao.HostDAO;
-import org.apache.ambari.server.orm.dao.HostVersionDAO;
-import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
-import org.apache.ambari.server.orm.dao.StackDAO;
-import org.apache.ambari.server.orm.entities.HostVersionEntity;
+import org.apache.ambari.server.orm.dao.RequestDAO;
+import org.apache.ambari.server.orm.dao.UpgradeDAO;
import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
-import org.apache.ambari.server.orm.entities.StackEntity;
+import org.apache.ambari.server.orm.entities.RequestEntity;
+import org.apache.ambari.server.orm.entities.UpgradeEntity;
+import org.apache.ambari.server.orm.entities.UpgradeHistoryEntity;
import org.apache.ambari.server.serveraction.ServerAction;
import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
@@ -55,10 +55,14 @@ import org.apache.ambari.server.state.Config;
import org.apache.ambari.server.state.ConfigFactory;
import org.apache.ambari.server.state.ConfigHelper;
import org.apache.ambari.server.state.Host;
-import org.apache.ambari.server.state.RepositoryVersionState;
import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.ServiceComponent;
+import org.apache.ambari.server.state.ServiceComponentFactory;
+import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.ServiceComponentHostFactory;
import org.apache.ambari.server.state.ServiceFactory;
import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.State;
import org.apache.ambari.server.state.stack.upgrade.ConfigUpgradeChangeDefinition.ConfigurationKeyValue;
import org.apache.ambari.server.state.stack.upgrade.ConfigUpgradeChangeDefinition.Insert;
import org.apache.ambari.server.state.stack.upgrade.ConfigUpgradeChangeDefinition.InsertType;
@@ -68,7 +72,10 @@ import org.apache.ambari.server.state.stack.upgrade.ConfigureTask;
import org.apache.ambari.server.state.stack.upgrade.PropertyKeyState;
import org.apache.ambari.server.state.stack.upgrade.TransferCoercionType;
import org.apache.ambari.server.state.stack.upgrade.TransferOperation;
+import org.apache.ambari.server.state.stack.upgrade.UpgradeType;
+import org.apache.commons.lang3.StringUtils;
import org.junit.After;
+import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
@@ -82,41 +89,59 @@ import com.google.inject.Injector;
*/
public class ConfigureActionTest {
- private static final String HDP_2_2_0_0 = "2.2.0.0-2041";
- private static final String HDP_2_2_0_1 = "2.2.0.1-2270";
- private static final StackId HDP_211_STACK = new StackId("HDP-2.1.1");
- private static final StackId HDP_220_STACK = new StackId("HDP-2.2.0");
-
@Inject
private Injector m_injector;
+
@Inject
private OrmTestHelper m_helper;
- @Inject
- private RepositoryVersionDAO repoVersionDAO;
- @Inject
- private HostVersionDAO hostVersionDAO;
+
@Inject
private HostRoleCommandFactory hostRoleCommandFactory;
+
@Inject
private ServiceFactory serviceFactory;
+
@Inject
private ConfigHelper m_configHelper;
+
@Inject
private Clusters clusters;
+
@Inject
- private ConfigFactory cf;
+ private ConfigFactory configFactory;
+
@Inject
private ConfigureAction action;
+
@Inject
- private HostDAO hostDAO;
+ private RequestDAO requestDAO;
+
@Inject
- private StackDAO stackDAO;
+ private UpgradeDAO upgradeDAO;
+
+ @Inject
+ private ServiceComponentFactory serviceComponentFactory;
+
+ @Inject
+ private ServiceComponentHostFactory serviceComponentHostFactory;
+
+ private RepositoryVersionEntity repoVersion2110;
+ private RepositoryVersionEntity repoVersion2111;
+ private RepositoryVersionEntity repoVersion2200;
+
+ private final Map<String, Map<String, String>> NO_ATTRIBUTES = new HashMap<>();
@Before
public void setup() throws Exception {
m_injector = Guice.createInjector(new InMemoryDefaultTestModule());
m_injector.getInstance(GuiceJpaInitializer.class);
m_injector.injectMembers(this);
+
+ repoVersion2110 = m_helper.getOrCreateRepositoryVersion(new StackId("HDP-2.1.1"), "2.1.1.0-1234");
+ repoVersion2111 = m_helper.getOrCreateRepositoryVersion(new StackId("HDP-2.1.1"), "2.1.1.1-5678");
+ repoVersion2200 = m_helper.getOrCreateRepositoryVersion(new StackId("HDP-2.2.0"), "2.2.0.0-1234");
+
+ makeUpgradeCluster();
}
@After
@@ -125,18 +150,24 @@ public class ConfigureActionTest {
}
+ /**
+ * Tests that a new configuration is created when upgrading across stack when
+ * there is no existing configuration with the correct target stack.
+ *
+ * @throws Exception
+ */
@Test
- public void testConfigActionUpgradeAcrossStack() throws Exception {
- makeUpgradeCluster();
-
+ public void testNewConfigCreatedWhenUpgradingAcrossStacks() throws Exception {
Cluster c = clusters.getCluster("c1");
assertEquals(1, c.getConfigsByType("zoo.cfg").size());
- c.setCurrentStackVersion(HDP_211_STACK);
- c.setDesiredStackVersion(HDP_220_STACK);
- Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {{
- put("initLimit", "10");
- }}, new HashMap<String, Map<String,String>>());
+ Map<String, String> properties = new HashMap<String, String>() {
+ {
+ put("initLimit", "10");
+ }
+ };
+
+ Config config = createConfig(c, repoVersion2110, "zoo.cfg", "version2", properties);
c.addDesiredConfig("user", Collections.singleton(config));
assertEquals(2, c.getConfigsByType("zoo.cfg").size());
@@ -147,23 +178,73 @@ public class ConfigureActionTest {
keyValue.key = "initLimit";
keyValue.value = "11";
+ createUpgrade(c, repoVersion2200);
+
Map<String, String> commandParams = new HashMap<>();
- commandParams.put("upgrade_direction", "upgrade");
- commandParams.put("version", HDP_2_2_0_1);
commandParams.put("clusterName", "c1");
commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg");
commandParams.put(ConfigureTask.PARAMETER_KEY_VALUE_PAIRS, new Gson().toJson(configurations));
- ExecutionCommand executionCommand = new ExecutionCommand();
- executionCommand.setCommandParams(commandParams);
- executionCommand.setClusterName("c1");
-
+ ExecutionCommand executionCommand = getExecutionCommand(commandParams);
HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null,
null, null);
hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(
executionCommand));
+ action.setExecutionCommand(executionCommand);
+ action.setHostRoleCommand(hostRoleCommand);
+
+ CommandReport report = action.execute(null);
+ assertNotNull(report);
+
+ assertEquals(3, c.getConfigsByType("zoo.cfg").size());
+
+ config = c.getDesiredConfigByType("zoo.cfg");
+ assertNotNull(config);
+ assertFalse(StringUtils.equals("version2", config.getTag()));
+ assertEquals("11", config.getProperties().get("initLimit"));
+ }
+
+ /**
+ * Tests that if a configuration with the target stack already exists, then it
+ * will be re-used instead of a new one created.
+ *
+ * @throws Exception
+ */
+ @Test
+ public void testConfigurationWithTargetStackUsed() throws Exception {
+ Cluster c = clusters.getCluster("c1");
+ assertEquals(1, c.getConfigsByType("zoo.cfg").size());
+
+ Map<String, String> properties = new HashMap<String, String>() {
+ {
+ put("initLimit", "10");
+ }
+ };
+
+ Config config = createConfig(c, repoVersion2200, "zoo.cfg", "version2", properties);
+
+ c.addDesiredConfig("user", Collections.singleton(config));
+ assertEquals(2, c.getConfigsByType("zoo.cfg").size());
+
+ List<ConfigurationKeyValue> configurations = new ArrayList<>();
+ ConfigurationKeyValue keyValue = new ConfigurationKeyValue();
+ configurations.add(keyValue);
+ keyValue.key = "initLimit";
+ keyValue.value = "11";
+
+ createUpgrade(c, repoVersion2200);
+
+ Map<String, String> commandParams = new HashMap<>();
+ commandParams.put("clusterName", "c1");
+ commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg");
+ commandParams.put(ConfigureTask.PARAMETER_KEY_VALUE_PAIRS, new Gson().toJson(configurations));
+
+ ExecutionCommand executionCommand = getExecutionCommand(commandParams);
+ HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null);
+
+ hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand));
action.setExecutionCommand(executionCommand);
action.setHostRoleCommand(hostRoleCommand);
@@ -186,31 +267,29 @@ public class ConfigureActionTest {
*/
@Test
public void testDeletePreserveChanges() throws Exception {
- makeUpgradeCluster();
-
Cluster c = clusters.getCluster("c1");
assertEquals(1, c.getConfigsByType("zoo.cfg").size());
- c.setDesiredStackVersion(HDP_220_STACK);
-
// create a config for zoo.cfg with two values; one is a stack value and the
// other is custom
- Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {
+ Map<String, String> properties = new HashMap<String, String>() {
{
put("tickTime", "2000");
put("foo", "bar");
}
- }, new HashMap<String, Map<String, String>>());
+ };
+
+ Config config = createConfig(c, repoVersion2110, "zoo.cfg", "version2", properties);
c.addDesiredConfig("user", Collections.singleton(config));
assertEquals(2, c.getConfigsByType("zoo.cfg").size());
Map<String, String> commandParams = new HashMap<>();
- commandParams.put("upgrade_direction", "upgrade");
- commandParams.put("version", HDP_2_2_0_1);
commandParams.put("clusterName", "c1");
commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg");
+ createUpgrade(c, repoVersion2111);
+
// delete all keys, preserving edits or additions
List<Transfer> transfers = new ArrayList<>();
Transfer transfer = new Transfer();
@@ -221,16 +300,10 @@ public class ConfigureActionTest {
commandParams.put(ConfigureTask.PARAMETER_TRANSFERS, new Gson().toJson(transfers));
- ExecutionCommand executionCommand = new ExecutionCommand();
- executionCommand.setCommandParams(commandParams);
- executionCommand.setClusterName("c1");
- executionCommand.setRoleParams(new HashMap<String, String>());
- executionCommand.getRoleParams().put(ServerAction.ACTION_USER_NAME, "username");
-
+ ExecutionCommand executionCommand = getExecutionCommand(commandParams);
HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null);
hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand));
-
action.setExecutionCommand(executionCommand);
action.setHostRoleCommand(hostRoleCommand);
@@ -252,18 +325,19 @@ public class ConfigureActionTest {
@Test
public void testConfigTransferCopy() throws Exception {
- makeUpgradeCluster();
-
Cluster c = clusters.getCluster("c1");
assertEquals(1, c.getConfigsByType("zoo.cfg").size());
- c.setDesiredStackVersion(HDP_220_STACK);
- Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {{
- put("initLimit", "10");
- put("copyIt", "10");
- put("moveIt", "10");
- put("deleteIt", "10");
- }}, new HashMap<String, Map<String,String>>());
+ Map<String, String> properties = new HashMap<String, String>() {
+ {
+ put("initLimit", "10");
+ put("copyIt", "10");
+ put("moveIt", "10");
+ put("deleteIt", "10");
+ }
+ };
+
+ Config config = createConfig(c, repoVersion2110, "zoo.cfg", "version2", properties);
c.addDesiredConfig("user", Collections.singleton(config));
assertEquals(2, c.getConfigsByType("zoo.cfg").size());
@@ -274,9 +348,9 @@ public class ConfigureActionTest {
keyValue.key = "initLimit";
keyValue.value = "11";
+ createUpgrade(c, repoVersion2111);
+
Map<String, String> commandParams = new HashMap<>();
- commandParams.put("upgrade_direction", "upgrade");
- commandParams.put("version", HDP_2_2_0_1);
commandParams.put("clusterName", "c1");
commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg");
commandParams.put(ConfigureTask.PARAMETER_KEY_VALUE_PAIRS, new Gson().toJson(configurations));
@@ -320,12 +394,7 @@ public class ConfigureActionTest {
commandParams.put(ConfigureTask.PARAMETER_TRANSFERS, new Gson().toJson(transfers));
- ExecutionCommand executionCommand = new ExecutionCommand();
- executionCommand.setCommandParams(commandParams);
- executionCommand.setClusterName("c1");
- executionCommand.setRoleParams(new HashMap<String, String>());
- executionCommand.getRoleParams().put(ServerAction.ACTION_USER_NAME, "username");
-
+ ExecutionCommand executionCommand = getExecutionCommand(commandParams);
HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null,
null, null);
@@ -389,24 +458,23 @@ public class ConfigureActionTest {
@Test
public void testCoerceValueOnCopy() throws Exception {
- makeUpgradeCluster();
-
Cluster c = clusters.getCluster("c1");
assertEquals(1, c.getConfigsByType("zoo.cfg").size());
- c.setDesiredStackVersion(HDP_220_STACK);
- Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {
+ Map<String, String> properties = new HashMap<String, String>() {
{
put("zoo.server.csv", "c6401,c6402, c6403");
}
- }, new HashMap<String, Map<String, String>>());
+ };
+
+ Config config = createConfig(c, repoVersion2110, "zoo.cfg", "version2", properties);
c.addDesiredConfig("user", Collections.singleton(config));
assertEquals(2, c.getConfigsByType("zoo.cfg").size());
+ createUpgrade(c, repoVersion2111);
+
Map<String, String> commandParams = new HashMap<>();
- commandParams.put("upgrade_direction", "upgrade");
- commandParams.put("version", HDP_2_2_0_1);
commandParams.put("clusterName", "c1");
commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg");
@@ -422,12 +490,7 @@ public class ConfigureActionTest {
commandParams.put(ConfigureTask.PARAMETER_TRANSFERS, new Gson().toJson(transfers));
- ExecutionCommand executionCommand = new ExecutionCommand();
- executionCommand.setCommandParams(commandParams);
- executionCommand.setClusterName("c1");
- executionCommand.setRoleParams(new HashMap<String, String>());
- executionCommand.getRoleParams().put(ServerAction.ACTION_USER_NAME, "username");
-
+ ExecutionCommand executionCommand = getExecutionCommand(commandParams);
HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null);
hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand));
@@ -452,25 +515,24 @@ public class ConfigureActionTest {
@Test
public void testValueReplacement() throws Exception {
- makeUpgradeCluster();
-
Cluster c = clusters.getCluster("c1");
assertEquals(1, c.getConfigsByType("zoo.cfg").size());
- c.setDesiredStackVersion(HDP_220_STACK);
- Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {
+ Map<String, String> properties = new HashMap<String, String>() {
{
put("key_to_replace", "My New Cat");
put("key_with_no_match", "WxyAndZ");
}
- }, new HashMap<String, Map<String, String>>());
+ };
+
+ Config config = createConfig(c, repoVersion2110, "zoo.cfg", "version2", properties);
c.addDesiredConfig("user", Collections.singleton(config));
assertEquals(2, c.getConfigsByType("zoo.cfg").size());
+ createUpgrade(c, repoVersion2111);
+
Map<String, String> commandParams = new HashMap<>();
- commandParams.put("upgrade_direction", "upgrade");
- commandParams.put("version", HDP_2_2_0_1);
commandParams.put("clusterName", "c1");
commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg");
@@ -490,12 +552,7 @@ public class ConfigureActionTest {
commandParams.put(ConfigureTask.PARAMETER_REPLACEMENTS, new Gson().toJson(replacements));
- ExecutionCommand executionCommand = new ExecutionCommand();
- executionCommand.setCommandParams(commandParams);
- executionCommand.setClusterName("c1");
- executionCommand.setRoleParams(new HashMap<String, String>());
- executionCommand.getRoleParams().put(ServerAction.ACTION_USER_NAME, "username");
-
+ ExecutionCommand executionCommand = getExecutionCommand(commandParams);
HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null);
hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand));
@@ -524,25 +581,24 @@ public class ConfigureActionTest {
*/
@Test
public void testValueReplacementWithMissingConfigurations() throws Exception {
- makeUpgradeCluster();
-
Cluster c = clusters.getCluster("c1");
assertEquals(1, c.getConfigsByType("zoo.cfg").size());
- c.setDesiredStackVersion(HDP_220_STACK);
- Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {
+ Map<String, String> properties = new HashMap<String, String>() {
{
put("existing", "This exists!");
put("missing", null);
}
- }, new HashMap<String, Map<String, String>>());
+ };
+
+ Config config = createConfig(c, repoVersion2110, "zoo.cfg", "version2", properties);
c.addDesiredConfig("user", Collections.singleton(config));
assertEquals(2, c.getConfigsByType("zoo.cfg").size());
+ createUpgrade(c, repoVersion2111);
+
Map<String, String> commandParams = new HashMap<>();
- commandParams.put("upgrade_direction", "upgrade");
- commandParams.put("version", HDP_2_2_0_1);
commandParams.put("clusterName", "c1");
commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg");
@@ -581,18 +637,16 @@ public class ConfigureActionTest {
@Test
public void testMultipleKeyValuesPerTask() throws Exception {
- makeUpgradeCluster();
-
Cluster c = clusters.getCluster("c1");
assertEquals(1, c.getConfigsByType("zoo.cfg").size());
- c.setCurrentStackVersion(HDP_211_STACK);
- c.setDesiredStackVersion(HDP_220_STACK);
- Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {
+ Map<String, String> properties = new HashMap<String, String>() {
{
put("fooKey", "barValue");
}
- }, new HashMap<String, Map<String, String>>());
+ };
+
+ Config config = createConfig(c, repoVersion2200, "zoo.cfg", "version2", properties);
c.addDesiredConfig("user", Collections.singleton(config));
assertEquals(2, c.getConfigsByType("zoo.cfg").size());
@@ -610,17 +664,14 @@ public class ConfigureActionTest {
fooKey3.value = "barValue3";
fooKey3.mask = true;
+ createUpgrade(c, repoVersion2200);
+
Map<String, String> commandParams = new HashMap<>();
- commandParams.put("upgrade_direction", "upgrade");
- commandParams.put("version", HDP_2_2_0_1);
commandParams.put("clusterName", "c1");
commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg");
commandParams.put(ConfigureTask.PARAMETER_KEY_VALUE_PAIRS, new Gson().toJson(configurations));
- ExecutionCommand executionCommand = new ExecutionCommand();
- executionCommand.setCommandParams(commandParams);
- executionCommand.setClusterName("c1");
-
+ ExecutionCommand executionCommand = getExecutionCommand(commandParams);
HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null);
hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand));
@@ -644,21 +695,19 @@ public class ConfigureActionTest {
@Test
public void testAllowedSet() throws Exception {
- makeUpgradeCluster();
-
Cluster c = clusters.getCluster("c1");
assertEquals(1, c.getConfigsByType("zoo.cfg").size());
- c.setCurrentStackVersion(HDP_211_STACK);
- c.setDesiredStackVersion(HDP_220_STACK);
- Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {
+ Map<String, String> properties = new HashMap<String, String>() {
{
put("set.key.1", "s1");
put("set.key.2", "s2");
put("set.key.3", "s3");
put("set.key.4", "s4");
}
- }, new HashMap<String, Map<String, String>>());
+ };
+
+ Config config = createConfig(c, repoVersion2200, "zoo.cfg", "version2", properties);
c.addDesiredConfig("user", Collections.singleton(config));
assertEquals(2, c.getConfigsByType("zoo.cfg").size());
@@ -699,18 +748,14 @@ public class ConfigureActionTest {
fooKey5.ifType = "zoo.cfg";
fooKey5.ifKeyState= PropertyKeyState.ABSENT;
+ createUpgrade(c, repoVersion2200);
Map<String, String> commandParams = new HashMap<>();
- commandParams.put("upgrade_direction", "upgrade");
- commandParams.put("version", HDP_2_2_0_1);
commandParams.put("clusterName", "c1");
commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg");
commandParams.put(ConfigureTask.PARAMETER_KEY_VALUE_PAIRS, new Gson().toJson(configurations));
- ExecutionCommand executionCommand = new ExecutionCommand();
- executionCommand.setCommandParams(commandParams);
- executionCommand.setClusterName("c1");
-
+ ExecutionCommand executionCommand = getExecutionCommand(commandParams);
HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null);
hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand));
@@ -738,21 +783,19 @@ public class ConfigureActionTest {
@Test
public void testDisallowedSet() throws Exception {
- makeUpgradeCluster();
-
Cluster c = clusters.getCluster("c1");
assertEquals(1, c.getConfigsByType("zoo.cfg").size());
- c.setCurrentStackVersion(HDP_211_STACK);
- c.setDesiredStackVersion(HDP_220_STACK);
- Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {
+ Map<String, String> properties = new HashMap<String, String>() {
{
put("set.key.1", "s1");
put("set.key.2", "s2");
put("set.key.3", "s3");
put("set.key.4", "s4");
}
- }, new HashMap<String, Map<String, String>>());
+ };
+
+ Config config = createConfig(c, repoVersion2200, "zoo.cfg", "version2", properties);
c.addDesiredConfig("user", Collections.singleton(config));
assertEquals(2, c.getConfigsByType("zoo.cfg").size());
@@ -783,18 +826,14 @@ public class ConfigureActionTest {
fooKey5.ifType = "zoo.cfg";
fooKey5.ifKeyState= PropertyKeyState.PRESENT;
+ createUpgrade(c, repoVersion2200);
Map<String, String> commandParams = new HashMap<>();
- commandParams.put("upgrade_direction", "upgrade");
- commandParams.put("version", HDP_2_2_0_1);
commandParams.put("clusterName", "c1");
commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg");
commandParams.put(ConfigureTask.PARAMETER_KEY_VALUE_PAIRS, new Gson().toJson(configurations));
- ExecutionCommand executionCommand = new ExecutionCommand();
- executionCommand.setCommandParams(commandParams);
- executionCommand.setClusterName("c1");
-
+ ExecutionCommand executionCommand = getExecutionCommand(commandParams);
HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null);
hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand));
@@ -820,14 +859,12 @@ public class ConfigureActionTest {
@Test
public void testAllowedReplacment() throws Exception {
- makeUpgradeCluster();
+
Cluster c = clusters.getCluster("c1");
assertEquals(1, c.getConfigsByType("zoo.cfg").size());
- c.setCurrentStackVersion(HDP_211_STACK);
- c.setDesiredStackVersion(HDP_220_STACK);
- Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {
+ Map<String, String> properties = new HashMap<String, String>() {
{
put("replace.key.1", "r1");
put("replace.key.2", "r2");
@@ -835,7 +872,9 @@ public class ConfigureActionTest {
put("replace.key.4", "r4");
put("replace.key.5", "r5");
}
- }, new HashMap<String, Map<String, String>>());
+ };
+
+ Config config = createConfig(c, repoVersion2200, "zoo.cfg", "version2", properties);
c.addDesiredConfig("user", Collections.singleton(config));
assertEquals(2, c.getConfigsByType("zoo.cfg").size());
@@ -878,17 +917,14 @@ public class ConfigureActionTest {
replace4.ifKeyState = PropertyKeyState.ABSENT;
replacements.add(replace4);
+ createUpgrade(c, repoVersion2200);
+
Map<String, String> commandParams = new HashMap<>();
- commandParams.put("upgrade_direction", "upgrade");
- commandParams.put("version", HDP_2_2_0_1);
commandParams.put("clusterName", "c1");
commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg");
commandParams.put(ConfigureTask.PARAMETER_REPLACEMENTS, new Gson().toJson(replacements));
- ExecutionCommand executionCommand = new ExecutionCommand();
- executionCommand.setCommandParams(commandParams);
- executionCommand.setClusterName("c1");
-
+ ExecutionCommand executionCommand = getExecutionCommand(commandParams);
HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null);
hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand));
@@ -912,14 +948,10 @@ public class ConfigureActionTest {
@Test
public void testDisallowedReplacment() throws Exception {
- makeUpgradeCluster();
-
Cluster c = clusters.getCluster("c1");
assertEquals(1, c.getConfigsByType("zoo.cfg").size());
- c.setCurrentStackVersion(HDP_211_STACK);
- c.setDesiredStackVersion(HDP_220_STACK);
- Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {
+ Map<String, String> properties = new HashMap<String, String>() {
{
put("replace.key.1", "r1");
put("replace.key.2", "r2");
@@ -927,7 +959,9 @@ public class ConfigureActionTest {
put("replace.key.4", "r4");
put("replace.key.5", "r5");
}
- }, new HashMap<String, Map<String, String>>());
+ };
+
+ Config config = createConfig(c, repoVersion2110, "zoo.cfg", "version2", properties);
c.addDesiredConfig("user", Collections.singleton(config));
assertEquals(2, c.getConfigsByType("zoo.cfg").size());
@@ -965,17 +999,14 @@ public class ConfigureActionTest {
replace4.ifKeyState = PropertyKeyState.PRESENT;
replacements.add(replace4);
+ createUpgrade(c, repoVersion2200);
+
Map<String, String> commandParams = new HashMap<>();
- commandParams.put("upgrade_direction", "upgrade");
- commandParams.put("version", HDP_2_2_0_1);
commandParams.put("clusterName", "c1");
commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg");
commandParams.put(ConfigureTask.PARAMETER_REPLACEMENTS, new Gson().toJson(replacements));
- ExecutionCommand executionCommand = new ExecutionCommand();
- executionCommand.setCommandParams(commandParams);
- executionCommand.setClusterName("c1");
-
+ ExecutionCommand executionCommand = getExecutionCommand(commandParams);
HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null);
hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand));
@@ -999,17 +1030,18 @@ public class ConfigureActionTest {
@Test
public void testAllowedTransferCopy() throws Exception {
- makeUpgradeCluster();
-
Cluster c = clusters.getCluster("c1");
assertEquals(1, c.getConfigsByType("zoo.cfg").size());
- c.setDesiredStackVersion(HDP_220_STACK);
- Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {{
- put("initLimit", "10");
- put("copy.key.1", "c1");
- put("copy.key.2", "c2");
- }}, new HashMap<String, Map<String,String>>());
+ Map<String, String> properties = new HashMap<String, String>() {
+ {
+ put("initLimit", "10");
+ put("copy.key.1", "c1");
+ put("copy.key.2", "c2");
+ }
+ };
+
+ Config config = createConfig(c, repoVersion2110, "zoo.cfg", "version2", properties);
c.addDesiredConfig("user", Collections.singleton(config));
assertEquals(2, c.getConfigsByType("zoo.cfg").size());
@@ -1020,9 +1052,9 @@ public class ConfigureActionTest {
keyValue.key = "initLimit";
keyValue.value = "11";
+ createUpgrade(c, repoVersion2200);
+
Map<String, String> commandParams = new HashMap<>();
- commandParams.put("upgrade_direction", "upgrade");
- commandParams.put("version", HDP_2_2_0_1);
commandParams.put("clusterName", "c1");
commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg");
commandParams.put(ConfigureTask.PARAMETER_KEY_VALUE_PAIRS, new Gson().toJson(configurations));
@@ -1073,12 +1105,7 @@ public class ConfigureActionTest {
commandParams.put(ConfigureTask.PARAMETER_TRANSFERS, new Gson().toJson(transfers));
- ExecutionCommand executionCommand = new ExecutionCommand();
- executionCommand.setCommandParams(commandParams);
- executionCommand.setClusterName("c1");
- executionCommand.setRoleParams(new HashMap<String, String>());
- executionCommand.getRoleParams().put(ServerAction.ACTION_USER_NAME, "username");
-
+ ExecutionCommand executionCommand = getExecutionCommand(commandParams);
HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null,
null, null);
@@ -1112,17 +1139,18 @@ public class ConfigureActionTest {
@Test
public void testDisallowedTransferCopy() throws Exception {
- makeUpgradeCluster();
-
Cluster c = clusters.getCluster("c1");
assertEquals(1, c.getConfigsByType("zoo.cfg").size());
- c.setDesiredStackVersion(HDP_220_STACK);
- Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {{
- put("initLimit", "10");
- put("copy.key.1", "c1");
- put("copy.key.2", "c2");
- }}, new HashMap<String, Map<String,String>>());
+ Map<String, String> properties = new HashMap<String, String>() {
+ {
+ put("initLimit", "10");
+ put("copy.key.1", "c1");
+ put("copy.key.2", "c2");
+ }
+ };
+
+ Config config = createConfig(c, repoVersion2110, "zoo.cfg", "version2", properties);
c.addDesiredConfig("user", Collections.singleton(config));
assertEquals(2, c.getConfigsByType("zoo.cfg").size());
@@ -1133,9 +1161,9 @@ public class ConfigureActionTest {
keyValue.key = "initLimit";
keyValue.value = "11";
+ createUpgrade(c, repoVersion2111);
+
Map<String, String> commandParams = new HashMap<>();
- commandParams.put("upgrade_direction", "upgrade");
- commandParams.put("version", HDP_2_2_0_1);
commandParams.put("clusterName", "c1");
commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg");
commandParams.put(ConfigureTask.PARAMETER_KEY_VALUE_PAIRS, new Gson().toJson(configurations));
@@ -1205,19 +1233,20 @@ public class ConfigureActionTest {
@Test
public void testAllowedTransferMove() throws Exception {
- makeUpgradeCluster();
-
Cluster c = clusters.getCluster("c1");
assertEquals(1, c.getConfigsByType("zoo.cfg").size());
- c.setDesiredStackVersion(HDP_220_STACK);
- Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {{
- put("initLimit", "10");
- put("move.key.1", "m1");
- put("move.key.2", "m2");
- put("move.key.3", "m3");
- put("move.key.4", "m4");
- }}, new HashMap<String, Map<String,String>>());
+ Map<String, String> properties = new HashMap<String, String>() {
+ {
+ put("initLimit", "10");
+ put("move.key.1", "m1");
+ put("move.key.2", "m2");
+ put("move.key.3", "m3");
+ put("move.key.4", "m4");
+ }
+ };
+
+ Config config = createConfig(c, repoVersion2110, "zoo.cfg", "version2", properties);
c.addDesiredConfig("user", Collections.singleton(config));
assertEquals(2, c.getConfigsByType("zoo.cfg").size());
@@ -1228,9 +1257,9 @@ public class ConfigureActionTest {
keyValue.key = "initLimit";
keyValue.value = "11";
+ createUpgrade(c, repoVersion2111);
+
Map<String, String> commandParams = new HashMap<>();
- commandParams.put("upgrade_direction", "upgrade");
- commandParams.put("version", HDP_2_2_0_1);
commandParams.put("clusterName", "c1");
commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg");
commandParams.put(ConfigureTask.PARAMETER_KEY_VALUE_PAIRS, new Gson().toJson(configurations));
@@ -1271,12 +1300,7 @@ public class ConfigureActionTest {
commandParams.put(ConfigureTask.PARAMETER_TRANSFERS, new Gson().toJson(transfers));
- ExecutionCommand executionCommand = new ExecutionCommand();
- executionCommand.setCommandParams(commandParams);
- executionCommand.setClusterName("c1");
- executionCommand.setRoleParams(new HashMap<String, String>());
- executionCommand.getRoleParams().put(ServerAction.ACTION_USER_NAME, "username");
-
+ ExecutionCommand executionCommand = getExecutionCommand(commandParams);
HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null,
null, null);
@@ -1311,20 +1335,20 @@ public class ConfigureActionTest {
@Test
public void testDisallowedTransferMove() throws Exception {
- makeUpgradeCluster();
-
Cluster c = clusters.getCluster("c1");
assertEquals(1, c.getConfigsByType("zoo.cfg").size());
- c.setDesiredStackVersion(HDP_220_STACK);
- Config config = cf.createNew(c, "zoo.cfg", "version2",
- new HashMap<String, String>() {{
- put("initLimit", "10");
- put("move.key.1", "m1");
- put("move.key.2", "m2");
- put("move.key.3", "m3");
- put("move.key.4", "m4");
- }}, new HashMap<String, Map<String,String>>());
+ Map<String, String> properties = new HashMap<String, String>() {
+ {
+ put("initLimit", "10");
+ put("move.key.1", "m1");
+ put("move.key.2", "m2");
+ put("move.key.3", "m3");
+ put("move.key.4", "m4");
+ }
+ };
+
+ Config config = createConfig(c, repoVersion2110, "zoo.cfg", "version2", properties);
c.addDesiredConfig("user", Collections.singleton(config));
assertEquals(2, c.getConfigsByType("zoo.cfg").size());
@@ -1335,9 +1359,9 @@ public class ConfigureActionTest {
keyValue.key = "initLimit";
keyValue.value = "11";
+ createUpgrade(c, repoVersion2111);
+
Map<String, String> commandParams = new HashMap<>();
- commandParams.put("upgrade_direction", "upgrade");
- commandParams.put("version", HDP_2_2_0_1);
commandParams.put("clusterName", "c1");
commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg");
commandParams.put(ConfigureTask.PARAMETER_KEY_VALUE_PAIRS, new Gson().toJson(configurations));
@@ -1372,12 +1396,7 @@ public class ConfigureActionTest {
commandParams.put(ConfigureTask.PARAMETER_TRANSFERS, new Gson().toJson(transfers));
- ExecutionCommand executionCommand = new ExecutionCommand();
- executionCommand.setCommandParams(commandParams);
- executionCommand.setClusterName("c1");
- executionCommand.setRoleParams(new HashMap<String, String>());
- executionCommand.getRoleParams().put(ServerAction.ACTION_USER_NAME, "username");
-
+ ExecutionCommand executionCommand = getExecutionCommand(commandParams);
HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null,
null, null);
@@ -1413,19 +1432,20 @@ public class ConfigureActionTest {
@Test
public void testAllowedTransferDelete() throws Exception {
- makeUpgradeCluster();
-
Cluster c = clusters.getCluster("c1");
assertEquals(1, c.getConfigsByType("zoo.cfg").size());
- c.setDesiredStackVersion(HDP_220_STACK);
- Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {{
- put("initLimit", "10");
- put("delete.key.1", "d1");
- put("delete.key.2", "d2");
- put("delete.key.3", "d3");
- put("delete.key.4", "d4");
- }}, new HashMap<String, Map<String,String>>());
+ Map<String, String> properties = new HashMap<String, String>() {
+ {
+ put("initLimit", "10");
+ put("delete.key.1", "d1");
+ put("delete.key.2", "d2");
+ put("delete.key.3", "d3");
+ put("delete.key.4", "d4");
+ }
+ };
+
+ Config config = createConfig(c, repoVersion2110, "zoo.cfg", "version2", properties);
c.addDesiredConfig("user", Collections.singleton(config));
assertEquals(2, c.getConfigsByType("zoo.cfg").size());
@@ -1436,9 +1456,9 @@ public class ConfigureActionTest {
keyValue.key = "initLimit";
keyValue.value = "11";
+ createUpgrade(c, repoVersion2111);
+
Map<String, String> commandParams = new HashMap<>();
- commandParams.put("upgrade_direction", "upgrade");
- commandParams.put("version", HDP_2_2_0_1);
commandParams.put("clusterName", "c1");
commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg");
commandParams.put(ConfigureTask.PARAMETER_KEY_VALUE_PAIRS, new Gson().toJson(configurations));
@@ -1475,12 +1495,7 @@ public class ConfigureActionTest {
commandParams.put(ConfigureTask.PARAMETER_TRANSFERS, new Gson().toJson(transfers));
- ExecutionCommand executionCommand = new ExecutionCommand();
- executionCommand.setCommandParams(commandParams);
- executionCommand.setClusterName("c1");
- executionCommand.setRoleParams(new HashMap<String, String>());
- executionCommand.getRoleParams().put(ServerAction.ACTION_USER_NAME, "username");
-
+ ExecutionCommand executionCommand = getExecutionCommand(commandParams);
HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null,
null, null);
@@ -1511,19 +1526,20 @@ public class ConfigureActionTest {
@Test
public void testDisallowedTransferDelete() throws Exception {
- makeUpgradeCluster();
-
Cluster c = clusters.getCluster("c1");
assertEquals(1, c.getConfigsByType("zoo.cfg").size());
- c.setDesiredStackVersion(HDP_220_STACK);
- Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {{
- put("initLimit", "10");
- put("delete.key.1", "d1");
- put("delete.key.2", "d2");
- put("delete.key.3", "d3");
- put("delete.key.4", "d4");
- }}, new HashMap<String, Map<String,String>>());
+ Map<String, String> properties = new HashMap<String, String>() {
+ {
+ put("initLimit", "10");
+ put("delete.key.1", "d1");
+ put("delete.key.2", "d2");
+ put("delete.key.3", "d3");
+ put("delete.key.4", "d4");
+ }
+ };
+
+ Config config = createConfig(c, repoVersion2110, "zoo.cfg", "version2", properties);
c.addDesiredConfig("user", Collections.singleton(config));
assertEquals(2, c.getConfigsByType("zoo.cfg").size());
@@ -1534,9 +1550,9 @@ public class ConfigureActionTest {
keyValue.key = "initLimit";
keyValue.value = "11";
+ createUpgrade(c, repoVersion2111);
+
Map<String, String> commandParams = new HashMap<>();
- commandParams.put("upgrade_direction", "upgrade");
- commandParams.put("version", HDP_2_2_0_1);
commandParams.put("clusterName", "c1");
commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg");
commandParams.put(ConfigureTask.PARAMETER_KEY_VALUE_PAIRS, new Gson().toJson(configurations));
@@ -1568,12 +1584,7 @@ public class ConfigureActionTest {
commandParams.put(ConfigureTask.PARAMETER_TRANSFERS, new Gson().toJson(transfers));
- ExecutionCommand executionCommand = new ExecutionCommand();
- executionCommand.setCommandParams(commandParams);
- executionCommand.setClusterName("c1");
- executionCommand.setRoleParams(new HashMap<String, String>());
- executionCommand.getRoleParams().put(ServerAction.ACTION_USER_NAME, "username");
-
+ ExecutionCommand executionCommand = getExecutionCommand(commandParams);
HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null,
null, null);
@@ -1609,25 +1620,24 @@ public class ConfigureActionTest {
*/
@Test
public void testInsert() throws Exception {
- makeUpgradeCluster();
-
Cluster c = clusters.getCluster("c1");
assertEquals(1, c.getConfigsByType("zoo.cfg").size());
- c.setDesiredStackVersion(HDP_220_STACK);
- Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {
+ Map<String, String> properties = new HashMap<String, String>() {
{
put("key_to_append", "append");
put("key_to_prepend", "prepend");
}
- }, new HashMap<String, Map<String, String>>());
+ };
+
+ Config config = createConfig(c, repoVersion2110, "zoo.cfg", "version2", properties);
c.addDesiredConfig("user", Collections.singleton(config));
assertEquals(2, c.getConfigsByType("zoo.cfg").size());
+ createUpgrade(c, repoVersion2111);
+
Map<String, String> commandParams = new HashMap<>();
- commandParams.put("upgrade_direction", "upgrade");
- commandParams.put("version", HDP_2_2_0_1);
commandParams.put("clusterName", "c1");
commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg");
@@ -1662,12 +1672,7 @@ public class ConfigureActionTest {
commandParams.put(ConfigureTask.PARAMETER_INSERTIONS, new Gson().toJson(insertions));
- ExecutionCommand executionCommand = new ExecutionCommand();
- executionCommand.setCommandParams(commandParams);
- executionCommand.setClusterName("c1");
- executionCommand.setRoleParams(new HashMap<String, String>());
- executionCommand.getRoleParams().put(ServerAction.ACTION_USER_NAME, "username");
-
+ ExecutionCommand executionCommand = getExecutionCommand(commandParams);
HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null);
hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand));
action.setExecutionCommand(executionCommand);
@@ -1690,58 +1695,46 @@ public class ConfigureActionTest {
assertEquals(expectedAppend, config.getProperties().get("key_to_append"));
}
+ /**
+ * Creates a cluster using {@link #repoVersion2110} with ZooKeeper installed.
+ *
+ * @throws Exception
+ */
private void makeUpgradeCluster() throws Exception {
String clusterName = "c1";
String hostName = "h1";
- clusters.addCluster(clusterName, HDP_220_STACK);
-
- StackEntity stackEntity = stackDAO.find(HDP_220_STACK.getStackName(),
- HDP_220_STACK.getStackVersion());
-
- assertNotNull(stackEntity);
+ clusters.addCluster(clusterName, repoVersion2110.getStackId());
Cluster c = clusters.getCluster(clusterName);
- c.setDesiredStackVersion(HDP_220_STACK);
-
- // Creating starting repo
- RepositoryVersionEntity repositoryVersionEntity = m_helper.getOrCreateRepositoryVersion(
- HDP_220_STACK, HDP_2_2_0_0);
-
- // !!! very important, otherwise the loops that walk the list of installed
- // service properties will not run!
- installService(c, "ZOOKEEPER", repositoryVersionEntity);
-
- Config config = cf.createNew(c, "zoo.cfg", "version1", new HashMap<String, String>() {
- {
- put("initLimit", "10");
- }
- }, new HashMap<String, Map<String, String>>());
-
- c.addDesiredConfig("user", Collections.singleton(config));
// add a host component
clusters.addHost(hostName);
-
Host host = clusters.getHost(hostName);
-
Map<String, String> hostAttributes = new HashMap<>();
hostAttributes.put("os_family", "redhat");
hostAttributes.put("os_release_version", "6");
host.setHostAttributes(hostAttributes);
- String urlInfo = "[{'repositories':["
- + "{'Repositories/base_url':'http://foo1','Repositories/repo_name':'HDP','Repositories/repo_id':'HDP-2.2.0'}"
- + "], 'OperatingSystems/os_type':'redhat6'}]";
- repoVersionDAO.create(stackEntity, HDP_2_2_0_1, String.valueOf(System.currentTimeMillis()), urlInfo);
+ clusters.mapHostToCluster(hostName, clusterName);
+
+ // !!! very important, otherwise the loops that walk the list of installed
+ // service properties will not run!
+ Service zk = installService(c, "ZOOKEEPER", repoVersion2110);
+ addServiceComponent(c, zk, "ZOOKEEPER_SERVER");
+ addServiceComponent(c, zk, "ZOOKEEPER_CLIENT");
+ createNewServiceComponentHost(c, "ZOOKEEPER", "ZOOKEEPER_SERVER", hostName);
+ createNewServiceComponentHost(c, "ZOOKEEPER", "ZOOKEEPER_CLIENT", hostName);
+
+ Map<String, String> properties = new HashMap<String, String>() {
+ {
+ put("initLimit", "10");
+ }
+ };
- c.setCurrentStackVersion(HDP_220_STACK);
+ Config config = createConfig(c, repoVersion2110, "zoo.cfg", "version1", properties);
- HostVersionEntity entity = new HostVersionEntity();
- entity.setHostEntity(hostDAO.findByName(hostName));
- entity.setRepositoryVersion(repoVersionDAO.findByStackAndVersion(HDP_220_STACK, HDP_2_2_0_1));
- entity.setState(RepositoryVersionState.INSTALLED);
- hostVersionDAO.create(entity);
+ c.addDesiredConfig("user", Collections.singleton(config));
// verify that our configs are there
String tickTime = m_configHelper.getPropertyValueFromStackDefinitions(c, "zoo.cfg", "tickTime");
@@ -1769,4 +1762,96 @@ public class ConfigureActionTest {
return service;
}
+
+ private ServiceComponent addServiceComponent(Cluster cluster, Service service,
+ String componentName) throws AmbariException {
+ ServiceComponent serviceComponent = null;
+ try {
+ serviceComponent = service.getServiceComponent(componentName);
+ } catch (ServiceComponentNotFoundException e) {
+ serviceComponent = serviceComponentFactory.createNew(service, componentName);
+ service.addServiceComponent(serviceComponent);
+ serviceComponent.setDesiredState(State.INSTALLED);
+ }
+
+ return serviceComponent;
+ }
+
+ private ServiceComponentHost createNewServiceComponentHost(Cluster cluster, String serviceName,
+ String svcComponent, String hostName) throws AmbariException {
+ Assert.assertNotNull(cluster.getConfigGroups());
+ Service s = cluster.getService(serviceName);
+ ServiceComponent sc = addServiceComponent(cluster, s, svcComponent);
+
+ ServiceComponentHost sch = serviceComponentHostFactory.createNew(sc, hostName);
+
+ sc.addServiceComponentHost(sch);
+ sch.setDesiredState(State.INSTALLED);
+ sch.setState(State.INSTALLED);
+ return sch;
+ }
+
+ /**
+ * Creates an upgrade and associates it with the cluster.
+ *
+ * @param cluster
+ * @param sourceRepo
+ * @param targetRepo
+ * @throws Exception
+ */
+ private UpgradeEntity createUpgrade(Cluster cluster, RepositoryVersionEntity repositoryVersion)
+ throws Exception {
+
+ // create some entities for the finalize action to work with for patch
+ // history
+ RequestEntity requestEntity = new RequestEntity();
+ requestEntity.setClusterId(cluster.getClusterId());
+ requestEntity.setRequestId(1L);
+ requestEntity.setStartTime(System.currentTimeMillis());
+ requestEntity.setCreateTime(System.currentTimeMillis());
+ requestDAO.create(requestEntity);
+
+ UpgradeEntity upgradeEntity = new UpgradeEntity();
+ upgradeEntity.setId(1L);
+ upgradeEntity.setClusterId(cluster.getClusterId());
+ upgradeEntity.setRequestEntity(requestEntity);
+ upgradeEntity.setUpgradePackage("");
+ upgradeEntity.setRepositoryVersion(repositoryVersion);
+ upgradeEntity.setUpgradeType(UpgradeType.NON_ROLLING);
+
+ Map<String, Service> services = cluster.getServices();
+ for (String serviceName : services.keySet()) {
+ Service service = services.get(serviceName);
+ Map<String, ServiceComponent> components = service.getServiceComponents();
+ for (String componentName : components.keySet()) {
+ UpgradeHistoryEntity history = new UpgradeHistoryEntity();
+ history.setUpgrade(upgradeEntity);
+ history.setServiceName(serviceName);
+ history.setComponentName(componentName);
+ history.setFromRepositoryVersion(service.getDesiredRepositoryVersion());
+ history.setTargetRepositoryVersion(repositoryVersion);
+ upgradeEntity.addHistory(history);
+ }
+ }
+
+ upgradeDAO.create(upgradeEntity);
+ cluster.setUpgradeEntity(upgradeEntity);
+ return upgradeEntity;
+ }
+
+ private ExecutionCommand getExecutionCommand(Map<String, String> commandParams) {
+ ExecutionCommand executionCommand = new ExecutionCommand();
+ executionCommand.setClusterName("c1");
+ executionCommand.setCommandParams(commandParams);
+ executionCommand.setRoleParams(new HashMap<String, String>());
+ executionCommand.getRoleParams().put(ServerAction.ACTION_USER_NAME, "username");
+
+ return executionCommand;
+ }
+
+ private Config createConfig(Cluster cluster, RepositoryVersionEntity repoVersion, String type,
+ String tag, Map<String, String> properties) {
+ return configFactory.createNew(repoVersion.getStackId(), cluster, type, tag, properties,
+ NO_ATTRIBUTES);
+ }
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/2892aee5/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
index 625b2ea..35fffda 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
@@ -564,7 +564,7 @@ public class UpgradeActionTest {
}
/**
- * Creates an upgrade an associates it with the cluster.
+ * Creates an upgrade and associates it with the cluster.
*
* @param cluster
* @param sourceRepo
http://git-wip-us.apache.org/repos/asf/ambari/blob/2892aee5/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
index e1eca14..857da61 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
@@ -815,7 +815,8 @@ public class ConfigHelperTest {
updates.put("new-property", "new-value");
updates.put("fs.trash.interval", "updated-value");
Collection<String> removals = Collections.singletonList("ipc.client.connect.max.retries");
- configHelper.updateConfigType(cluster, managementController, "core-site", updates, removals, "admin", "Test note");
+ configHelper.updateConfigType(cluster, cluster.getCurrentStackVersion(), managementController,
+ "core-site", updates, removals, "admin", "Test note");
Config updatedConfig = cluster.getDesiredConfigByType("core-site");
@@ -853,8 +854,8 @@ public class ConfigHelperTest {
updates.put("oozie.authentication.type", "kerberos");
updates.put("oozie.service.HadoopAccessorService.kerberos.enabled", "true");
- configHelper.updateConfigType(cluster, managementController, "oozie-site", updates, null, "admin", "Test " +
- "note");
+ configHelper.updateConfigType(cluster, cluster.getCurrentStackVersion(), managementController,
+ "oozie-site", updates, null, "admin", "Test " + "note");
Config updatedConfig = cluster.getDesiredConfigByType("oozie-site");
// Config tag updated
@@ -881,7 +882,8 @@ public class ConfigHelperTest {
List<String> removals = new ArrayList<>();
removals.add("timeline.service.operating.mode");
- configHelper.updateConfigType(cluster, managementController, "ams-site", null, removals, "admin", "Test note");
+ configHelper.updateConfigType(cluster, cluster.getCurrentStackVersion(), managementController,
+ "ams-site", null, removals, "admin", "Test note");
Config updatedConfig = cluster.getDesiredConfigByType("ams-site");
// Config tag updated
http://git-wip-us.apache.org/repos/asf/ambari/blob/2892aee5/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
index 277ef8b..921322b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
@@ -333,7 +333,7 @@ public class UpgradeHelperTest extends EasyMockSupport {
assertEquals("Save Cluster State", postGroup.items.get(1).getText());
assertEquals(StageWrapper.Type.SERVER_SIDE_ACTION, postGroup.items.get(1).getType());
- assertEquals(3, groups.get(0).items.size());
+ assertEquals(2, groups.get(0).items.size());
assertEquals(7, groups.get(1).items.size());
assertEquals(2, groups.get(2).items.size());
@@ -389,7 +389,7 @@ public class UpgradeHelperTest extends EasyMockSupport {
assertEquals("Save Cluster State", postGroup.items.get(1).getText());
assertEquals(StageWrapper.Type.SERVER_SIDE_ACTION, postGroup.items.get(1).getType());
- assertEquals(3, groups.get(0).items.size());
+ assertEquals(2, groups.get(0).items.size());
assertEquals(6, groups.get(1).items.size());
assertEquals(1, groups.get(2).items.size());
@@ -2361,7 +2361,7 @@ public class UpgradeHelperTest extends EasyMockSupport {
Capture<Map<String, Map<String, String>>> expectedConfigurationsCapture = EasyMock.newCapture();
configHelper.createConfigTypes(EasyMock.anyObject(Cluster.class),
- EasyMock.anyObject(AmbariManagementController.class), EasyMock.anyObject(StackId.class),
+ EasyMock.anyObject(StackId.class), EasyMock.anyObject(AmbariManagementController.class),
EasyMock.capture(expectedConfigurationsCapture), EasyMock.anyObject(String.class),
EasyMock.anyObject(String.class));
http://git-wip-us.apache.org/repos/asf/ambari/blob/2892aee5/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog200Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog200Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog200Test.java
index e993f96..e82097b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog200Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog200Test.java
@@ -516,6 +516,7 @@ public class UpgradeCatalog200Test {
final Cluster mockClusterExpected = easyMockSupport.createStrictMock(Cluster.class);
final Cluster mockClusterMissingSmokeUser = easyMockSupport.createStrictMock(Cluster.class);
final Cluster mockClusterMissingConfig = easyMockSupport.createStrictMock(Cluster.class);
+ final StackId mockStackId = easyMockSupport.createNiceMock(StackId.class);
final Config mockClusterEnvExpected = easyMockSupport.createStrictMock(Config.class);
final Config mockClusterEnvMissingSmokeUser = easyMockSupport.createStrictMock(Config.class);
@@ -562,14 +563,16 @@ public class UpgradeCatalog200Test {
// Expected operation
expect(mockClusterExpected.getDesiredConfigByType("cluster-env")).andReturn(mockClusterEnvExpected).once();
+ expect(mockClusterExpected.getDesiredStackVersion()).andReturn(mockStackId).atLeastOnce();
expect(mockClusterEnvExpected.getProperties()).andReturn(propertiesExpectedT0).once();
- mockConfigHelper.createConfigType(mockClusterExpected, mockAmbariManagementController,
+ mockConfigHelper.createConfigType(mockClusterExpected, mockStackId, mockAmbariManagementController,
"cluster-env", propertiesExpectedT1, UpgradeCatalog200.AUTHENTICATED_USER_NAME, "Upgrading to Ambari 2.0");
expectLastCall().once();
// Missing smokeuser
expect(mockClusterMissingSmokeUser.getDesiredConfigByType("cluster-env")).andReturn(mockClusterEnvMissingSmokeUser).once();
+ expect(mockClusterMissingSmokeUser.getDesiredStackVersion()).andReturn(mockStackId).atLeastOnce();
expect(mockClusterEnvMissingSmokeUser.getProperties()).andReturn(propertiesMissingSmokeUserT0).once();
expect(mockConfigHelper.getStackProperties(mockClusterMissingSmokeUser)).andReturn(Collections.singleton(mockSmokeUserPropertyInfo)).once();
@@ -577,7 +580,7 @@ public class UpgradeCatalog200Test {
expect(mockSmokeUserPropertyInfo.getFilename()).andReturn("cluster-env.xml").once();
expect(mockSmokeUserPropertyInfo.getValue()).andReturn("ambari-qa").once();
- mockConfigHelper.createConfigType(mockClusterMissingSmokeUser, mockAmbariManagementController,
+ mockConfigHelper.createConfigType(mockClusterMissingSmokeUser, mockStackId, mockAmbariManagementController,
"cluster-env", propertiesMissingSmokeUserT1, UpgradeCatalog200.AUTHENTICATED_USER_NAME, "Upgrading to Ambari 2.0");
expectLastCall().once();
http://git-wip-us.apache.org/repos/asf/ambari/blob/2892aee5/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog240Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog240Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog240Test.java
index 46ce2d5..f106658 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog240Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog240Test.java
@@ -2488,6 +2488,7 @@ public class UpgradeCatalog240Test {
final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
final ConfigHelper configHelper = easyMockSupport.createNiceMock(ConfigHelper.class);
final Service serviceSlider = easyMockSupport.createNiceMock(Service.class);
+ StackId mockStackId = easyMockSupport.createNiceMock(StackId.class);
Map<String, Service> servicesMap = new HashMap<>();
servicesMap.put("SLIDER", serviceSlider);
@@ -2513,10 +2514,13 @@ public class UpgradeCatalog240Test {
expect(mockAmbariManagementController.getConfigHelper()).andReturn(configHelper).once();
expect(mockClusterExpected.getServices()).andReturn(servicesMap).once();
expect(mockClusterExpected.getDesiredConfigByType("slider-client")).andReturn(null).once();
+ expect(mockClusterExpected.getDesiredStackVersion()).andReturn(mockStackId).atLeastOnce();
- configHelper.createConfigType(mockClusterExpected, mockAmbariManagementController, "slider-client",
+ configHelper.createConfigType(mockClusterExpected, mockStackId,
+ mockAmbariManagementController, "slider-client",
new HashMap<String, String>(), "ambari-upgrade", "");
+
expectLastCall().once();
easyMockSupport.replayAll();