You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jo...@apache.org on 2017/07/19 20:31:47 UTC
ambari git commit: AMBARI-21522 - Installation Commands On New
Clusters Don't Send Down Correct Versions (jonathanhurley)
Repository: ambari
Updated Branches:
refs/heads/branch-2.5 f169fa9f8 -> 037d39260
AMBARI-21522 - Installation Commands On New Clusters Don't Send Down Correct Versions (jonathanhurley)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/037d3926
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/037d3926
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/037d3926
Branch: refs/heads/branch-2.5
Commit: 037d3926081cf79383466933f4495fdfe9bde6d5
Parents: f169fa9
Author: Dmytro Sen <ds...@apache.org>
Authored: Wed Jul 19 18:38:00 2017 +0300
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Wed Jul 19 16:31:29 2017 -0400
----------------------------------------------------------------------
.../actionmanager/ExecutionCommandWrapper.java | 29 ++++-
.../0.1.0.2.3/package/scripts/atlas_client.py | 2 +-
.../ATLAS/0.1.0.2.3/package/scripts/params.py | 2 +
.../ExecutionCommandWrapperTest.java | 112 ++++++++++++++++---
4 files changed, 120 insertions(+), 25 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/037d3926/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java
index 8875314..41521f8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java
@@ -20,6 +20,7 @@ package org.apache.ambari.server.actionmanager;
import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.HOOKS_FOLDER;
import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SERVICE_PACKAGE_FOLDER;
+import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
@@ -27,6 +28,7 @@ import java.util.TreeMap;
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.ClusterNotFoundException;
+import org.apache.ambari.server.RoleCommand;
import org.apache.ambari.server.agent.AgentCommand.AgentCommandType;
import org.apache.ambari.server.agent.ExecutionCommand;
import org.apache.ambari.server.agent.ExecutionCommand.KeyNames;
@@ -199,15 +201,30 @@ public class ExecutionCommandWrapper {
Map<String,String> commandParams = executionCommand.getCommandParams();
// set the version for the command if it's not already set
- ClusterVersionEntity effectiveClusterVersion = cluster.getEffectiveClusterVersion();
- if (null != effectiveClusterVersion && !commandParams.containsKey(KeyNames.VERSION)) {
- commandParams.put(KeyNames.VERSION,
- effectiveClusterVersion.getRepositoryVersion().getVersion());
+ if (!commandParams.containsKey(KeyNames.VERSION)) {
+ // the cluster's effective version should be used for this command
+ ClusterVersionEntity effectiveClusterVersion = cluster.getEffectiveClusterVersion();
+
+ // in the event that the effective version is NULL (meaning that most
+ // likely the cluster is still being provisioned), then send down the
+ // version if this is not an install command
+ if (null == effectiveClusterVersion
+ && executionCommand.getRoleCommand() != RoleCommand.INSTALL) {
+ Collection<ClusterVersionEntity> clusterVersions = cluster.getAllClusterVersions();
+ if (clusterVersions.size() == 1) {
+ effectiveClusterVersion = clusterVersions.iterator().next();
+ }
+ }
+
+ if (null != effectiveClusterVersion) {
+ commandParams.put(KeyNames.VERSION,
+ effectiveClusterVersion.getRepositoryVersion().getVersion());
+ }
}
// add the stack and common-services folders to the command, but only if
- // they don't exist - they may have been put on here with specific values
- // ahead of time
+ // they don't exist - they may have been put on here with specific
+ // values ahead of time
StackId stackId = cluster.getDesiredStackVersion();
StackInfo stackInfo = ambariMetaInfo.getStack(stackId.getStackName(),
stackId.getStackVersion());
http://git-wip-us.apache.org/repos/asf/ambari/blob/037d3926/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/atlas_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/atlas_client.py b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/atlas_client.py
index 2376ff9..d01ff84 100644
--- a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/atlas_client.py
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/atlas_client.py
@@ -37,7 +37,7 @@ class AtlasClient(Script):
import params
env.set_params(params)
- if check_stack_feature(StackFeature.ATLAS_UPGRADE_SUPPORT, params.version):
+ if check_stack_feature(StackFeature.ATLAS_UPGRADE_SUPPORT, params.version_for_stack_feature_checks):
conf_select.select(params.stack_name, "atlas", params.version)
stack_select.select("atlas-client", params.version)
http://git-wip-us.apache.org/repos/asf/ambari/blob/037d3926/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py
index bdc9ec4..b83a79f 100644
--- a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py
@@ -28,6 +28,7 @@ from resource_management import format_stack_version, Script
from resource_management.libraries.functions import format
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.stack_features import get_stack_feature_version
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.is_empty import is_empty
from resource_management.libraries.functions.expect import expect
@@ -98,6 +99,7 @@ if security_enabled:
# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade
version = default("/commandParams/version", None)
+version_for_stack_feature_checks = get_stack_feature_version(config)
# stack version
stack_version_unformatted = config['hostLevelParams']['stack_version']
http://git-wip-us.apache.org/repos/asf/ambari/blob/037d3926/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java
index c28ba5d..25c3381 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java
@@ -34,12 +34,16 @@ import org.apache.ambari.server.Role;
import org.apache.ambari.server.RoleCommand;
import org.apache.ambari.server.agent.AgentCommand.AgentCommandType;
import org.apache.ambari.server.agent.ExecutionCommand;
+import org.apache.ambari.server.agent.ExecutionCommand.KeyNames;
import org.apache.ambari.server.orm.GuiceJpaInitializer;
import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
+import org.apache.ambari.server.orm.OrmTestHelper;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.ConfigFactory;
import org.apache.ambari.server.state.ConfigHelper;
+import org.apache.ambari.server.state.RepositoryVersionState;
import org.apache.ambari.server.state.StackId;
import org.apache.ambari.server.state.svccomphost.ServiceComponentHostStartEvent;
import org.apache.ambari.server.utils.StageUtils;
@@ -94,6 +98,7 @@ public class ExecutionCommandWrapperTest {
private static ConfigFactory configFactory;
private static ConfigHelper configHelper;
private static StageFactory stageFactory;
+ private static OrmTestHelper ormTestHelper;
@BeforeClass
public static void setup() throws AmbariException {
@@ -102,6 +107,7 @@ public class ExecutionCommandWrapperTest {
configHelper = injector.getInstance(ConfigHelper.class);
configFactory = injector.getInstance(ConfigFactory.class);
stageFactory = injector.getInstance(StageFactory.class);
+ ormTestHelper = injector.getInstance(OrmTestHelper.class);
clusters = injector.getInstance(Clusters.class);
clusters.addHost(HOST1);
@@ -109,37 +115,41 @@ public class ExecutionCommandWrapperTest {
Cluster cluster1 = clusters.getCluster(CLUSTER1);
- SERVICE_SITE_CLUSTER = new HashMap<String, String>();
+ SERVICE_SITE_CLUSTER = new HashMap<>();
SERVICE_SITE_CLUSTER.put(SERVICE_SITE_NAME1, SERVICE_SITE_VAL1);
SERVICE_SITE_CLUSTER.put(SERVICE_SITE_NAME2, SERVICE_SITE_VAL2);
SERVICE_SITE_CLUSTER.put(SERVICE_SITE_NAME3, SERVICE_SITE_VAL3);
SERVICE_SITE_CLUSTER.put(SERVICE_SITE_NAME4, SERVICE_SITE_VAL4);
- SERVICE_SITE_SERVICE = new HashMap<String, String>();
+ SERVICE_SITE_SERVICE = new HashMap<>();
SERVICE_SITE_SERVICE.put(SERVICE_SITE_NAME1, SERVICE_SITE_VAL1_S);
SERVICE_SITE_SERVICE.put(SERVICE_SITE_NAME5, SERVICE_SITE_VAL5_S);
- SERVICE_SITE_HOST = new HashMap<String, String>();
+ SERVICE_SITE_HOST = new HashMap<>();
SERVICE_SITE_HOST.put(SERVICE_SITE_NAME2, SERVICE_SITE_VAL2_H);
SERVICE_SITE_HOST.put(SERVICE_SITE_NAME6, SERVICE_SITE_VAL6_H);
- GLOBAL_CLUSTER = new HashMap<String, String>();
+ GLOBAL_CLUSTER = new HashMap<>();
GLOBAL_CLUSTER.put(GLOBAL_NAME1, GLOBAL_CLUSTER_VAL1);
GLOBAL_CLUSTER.put(GLOBAL_NAME2, GLOBAL_CLUSTER_VAL2);
- CONFIG_ATTRIBUTES = new HashMap<String, Map<String,String>>();
+ CONFIG_ATTRIBUTES = new HashMap<>();
//Cluster level global config
- configFactory.createNew(cluster1, GLOBAL_CONFIG, CLUSTER_VERSION_TAG, GLOBAL_CLUSTER, CONFIG_ATTRIBUTES);
+ configFactory.createNew(cluster1, GLOBAL_CONFIG, CLUSTER_VERSION_TAG, GLOBAL_CLUSTER,
+ CONFIG_ATTRIBUTES);
//Cluster level service config
- configFactory.createNew(cluster1, SERVICE_SITE_CONFIG, CLUSTER_VERSION_TAG, SERVICE_SITE_CLUSTER, CONFIG_ATTRIBUTES);
+ configFactory.createNew(cluster1, SERVICE_SITE_CONFIG, CLUSTER_VERSION_TAG,
+ SERVICE_SITE_CLUSTER, CONFIG_ATTRIBUTES);
//Service level service config
- configFactory.createNew(cluster1, SERVICE_SITE_CONFIG, SERVICE_VERSION_TAG, SERVICE_SITE_SERVICE, CONFIG_ATTRIBUTES);
+ configFactory.createNew(cluster1, SERVICE_SITE_CONFIG, SERVICE_VERSION_TAG,
+ SERVICE_SITE_SERVICE, CONFIG_ATTRIBUTES);
//Host level service config
- configFactory.createNew(cluster1, SERVICE_SITE_CONFIG, HOST_VERSION_TAG, SERVICE_SITE_HOST, CONFIG_ATTRIBUTES);
+ configFactory.createNew(cluster1, SERVICE_SITE_CONFIG, HOST_VERSION_TAG, SERVICE_SITE_HOST,
+ CONFIG_ATTRIBUTES);
ActionDBAccessor db = injector.getInstance(ActionDBAccessorImpl.class);
@@ -155,7 +165,7 @@ public class ExecutionCommandWrapperTest {
RoleCommand.START,
new ServiceComponentHostStartEvent(Role.NAMENODE.toString(),
hostName, System.currentTimeMillis()), clusterName, "HDFS", false, false);
- List<Stage> stages = new ArrayList<Stage>();
+ List<Stage> stages = new ArrayList<>();
stages.add(s);
Request request = new Request(stages, "clusterHostInfo", clusters);
db.persistActions(request);
@@ -165,13 +175,13 @@ public class ExecutionCommandWrapperTest {
public void testGetExecutionCommand() throws JSONException, AmbariException {
- Map<String, Map<String, String>> confs = new HashMap<String, Map<String, String>>();
- Map<String, String> configurationsGlobal = new HashMap<String, String>();
+ Map<String, Map<String, String>> confs = new HashMap<>();
+ Map<String, String> configurationsGlobal = new HashMap<>();
configurationsGlobal.put(GLOBAL_NAME1, GLOBAL_VAL1);
confs.put(GLOBAL_CONFIG, configurationsGlobal);
- Map<String, Map<String, String>> confTags = new HashMap<String, Map<String, String>>();
- Map<String, String> confTagServiceSite = new HashMap<String, String>();
+ Map<String, Map<String, String>> confTags = new HashMap<>();
+ Map<String, String> confTagServiceSite = new HashMap<>();
confTagServiceSite.put("tag", CLUSTER_VERSION_TAG);
confTagServiceSite.put("service_override_tag", SERVICE_VERSION_TAG);
@@ -223,7 +233,7 @@ public class ExecutionCommandWrapperTest {
//Union of all keys of service site configs
- Set<String> serviceSiteKeys = new HashSet<String>();
+ Set<String> serviceSiteKeys = new HashSet<>();
serviceSiteKeys.addAll(SERVICE_SITE_CLUSTER.keySet());
serviceSiteKeys.addAll(SERVICE_SITE_SERVICE.keySet());
serviceSiteKeys.addAll(SERVICE_SITE_HOST.keySet());
@@ -234,7 +244,7 @@ public class ExecutionCommandWrapperTest {
@Test
public void testGetMergedConfig() {
- Map<String, String> baseConfig = new HashMap<String, String>();
+ Map<String, String> baseConfig = new HashMap<>();
baseConfig.put(SERVICE_SITE_NAME1, SERVICE_SITE_VAL1);
baseConfig.put(SERVICE_SITE_NAME2, SERVICE_SITE_VAL2);
@@ -242,7 +252,7 @@ public class ExecutionCommandWrapperTest {
baseConfig.put(SERVICE_SITE_NAME4, SERVICE_SITE_VAL4);
baseConfig.put(SERVICE_SITE_NAME5, SERVICE_SITE_VAL5);
- Map<String, String> overrideConfig = new HashMap<String, String>();
+ Map<String, String> overrideConfig = new HashMap<>();
overrideConfig.put(SERVICE_SITE_NAME2, SERVICE_SITE_VAL2_H);
overrideConfig.put(SERVICE_SITE_NAME6, SERVICE_SITE_VAL6_H);
@@ -252,7 +262,7 @@ public class ExecutionCommandWrapperTest {
overrideConfig);
- Set<String> configsKeys = new HashSet<String>();
+ Set<String> configsKeys = new HashSet<>();
configsKeys.addAll(baseConfig.keySet());
configsKeys.addAll(overrideConfig.keySet());
@@ -266,6 +276,72 @@ public class ExecutionCommandWrapperTest {
Assert.assertEquals(SERVICE_SITE_VAL6_H, mergedConfig.get(SERVICE_SITE_NAME6));
}
+ /**
+ * Test that the execution command wrapper properly sets the version
+ * information when the cluster is in the INSTALLING state.
+ *
+ * @throws JSONException
+ * @throws AmbariException
+ */
+ @Test
+ public void testExecutionCommandHasVersionInfoWithoutCurrentClusterVersion()
+ throws JSONException, AmbariException {
+ Cluster cluster = clusters.getCluster(CLUSTER1);
+
+ StackId stackId = cluster.getDesiredStackVersion();
+ RepositoryVersionEntity repositoryVersion = ormTestHelper.getOrCreateRepositoryVersion(stackId, "0.1-0000");
+
+ cluster.createClusterVersion(stackId, repositoryVersion.getVersion(), "admin",
+ RepositoryVersionState.INSTALLING);
+
+ // first try with an INSTALL command - this should not populate version info
+ ExecutionCommand executionCommand = new ExecutionCommand();
+ Map<String, String> commandParams = new HashMap<>();
+
+ executionCommand.setClusterName(CLUSTER1);
+ executionCommand.setTaskId(1);
+ executionCommand.setRequestAndStage(1, 1);
+ executionCommand.setHostname(HOST1);
+ executionCommand.setRole("NAMENODE");
+ executionCommand.setRoleParams(Collections.<String, String>emptyMap());
+ executionCommand.setRoleCommand(RoleCommand.INSTALL);
+ executionCommand.setServiceName("HDFS");
+ executionCommand.setCommandType(AgentCommandType.EXECUTION_COMMAND);
+ executionCommand.setCommandParams(commandParams);
+
+ String json = StageUtils.getGson().toJson(executionCommand, ExecutionCommand.class);
+ ExecutionCommandWrapper execCommWrap = new ExecutionCommandWrapper(json);
+ injector.injectMembers(execCommWrap);
+
+ ExecutionCommand processedExecutionCommand = execCommWrap.getExecutionCommand();
+ commandParams = processedExecutionCommand.getCommandParams();
+ Assert.assertFalse(commandParams.containsKey(KeyNames.VERSION));
+
+ // now try with a START command which should populate the version even
+ // though the state is INSTALLING
+ executionCommand = new ExecutionCommand();
+ commandParams = new HashMap<>();
+
+ executionCommand.setClusterName(CLUSTER1);
+ executionCommand.setTaskId(1);
+ executionCommand.setRequestAndStage(1, 1);
+ executionCommand.setHostname(HOST1);
+ executionCommand.setRole("NAMENODE");
+ executionCommand.setRoleParams(Collections.<String, String> emptyMap());
+ executionCommand.setRoleCommand(RoleCommand.START);
+ executionCommand.setServiceName("HDFS");
+ executionCommand.setCommandType(AgentCommandType.EXECUTION_COMMAND);
+ executionCommand.setCommandParams(commandParams);
+
+ json = StageUtils.getGson().toJson(executionCommand, ExecutionCommand.class);
+ execCommWrap = new ExecutionCommandWrapper(json);
+ injector.injectMembers(execCommWrap);
+
+ processedExecutionCommand = execCommWrap.getExecutionCommand();
+ commandParams = processedExecutionCommand.getCommandParams();
+ Assert.assertEquals("0.1-0000", commandParams.get(KeyNames.VERSION));
+ }
+
@AfterClass
public static void tearDown() throws AmbariException, SQLException {
H2DatabaseCleaner.clearDatabaseAndStopPersistenceService(injector);