You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by nc...@apache.org on 2017/11/13 16:57:33 UTC
[16/51] [abbrv] ambari git commit: AMBARI-22360. Send
ClusterVersionSummary to install_packages and conf-select exclusion (ncole)
AMBARI-22360. Send ClusterVersionSummary to install_packages and conf-select exclusion (ncole)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/1afee609
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/1afee609
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/1afee609
Branch: refs/heads/branch-feature-AMBARI-21674
Commit: 1afee6096639b26ed74178bcdd2b6984fda42b32
Parents: 1fefbba
Author: Nate Cole <nc...@hortonworks.com>
Authored: Mon Nov 6 10:11:21 2017 -0500
Committer: Nate Cole <nc...@hortonworks.com>
Committed: Mon Nov 6 12:16:26 2017 -0500
----------------------------------------------------------------------
.../libraries/functions/conf_select.py | 58 +++++
.../ambari/server/agent/ExecutionCommand.java | 34 ++-
.../ClusterStackVersionResourceProvider.java | 3 +-
.../HostStackVersionResourceProvider.java | 2 +-
.../state/repository/ClusterVersionSummary.java | 3 +
.../state/repository/ServiceVersionSummary.java | 11 +-
.../state/repository/VersionDefinitionXml.java | 2 +-
.../stack/upgrade/RepositoryVersionHelper.java | 28 ++-
.../custom_actions/scripts/install_packages.py | 12 +-
.../HDP/2.0.6/properties/stack_packages.json | 101 ++++++++
.../HDP/3.0/properties/stack_packages.json | 108 ++++++++-
...ClusterStackVersionResourceProviderTest.java | 239 +++++++++++++++++++
.../stacks/2.2/common/test_conf_select.py | 14 +-
13 files changed, 600 insertions(+), 15 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/1afee609/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py b/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
index c89e767..d38e273 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
@@ -294,6 +294,64 @@ def convert_conf_directories_to_symlinks(package, version, dirs):
Logger.warning("Could not change symlink for package {0} to point to current directory. Error: {1}".format(package, e))
+def get_restricted_packages():
+ """
+ Gets the list of conf-select 'package' names that need to be invoked on the command.
+ When the server passes down the list of packages to install, check the service names
+ and use the information in stack_packages json to determine the list of packages that should
+ be executed. That is valid only for PATCH or MAINT upgrades. STANDARD upgrades should be
+ conf-select'ing everything it can find.
+ """
+ package_names = []
+
+ # shortcut the common case if we are not patching
+ cluster_version_summary = default("/roleParameters/cluster_version_summary/services", None)
+
+ if cluster_version_summary is None:
+ Logger.info("Cluster Summary is not available, there are no restrictions for conf-select")
+ return package_names
+
+ service_names = []
+
+ # pick out the services that are targeted
+ for servicename, servicedetail in cluster_version_summary.iteritems():
+ if servicedetail['upgrade']:
+ service_names.append(servicename)
+
+ if 0 == len(service_names):
+ Logger.info("No services found, there are no restrictions for conf-select")
+ return package_names
+
+ stack_name = default("/hostLevelParams/stack_name", None)
+ if stack_name is None:
+ Logger.info("The stack name is not present in the command. Restricted names skipped.")
+ return package_names
+
+ stack_packages_config = default("/configurations/cluster-env/stack_packages", None)
+ if stack_packages_config is None:
+ Logger.info("The stack packages are not defined on the command. Restricted names skipped.")
+ return package_names
+
+ data = json.loads(stack_packages_config)
+
+ if stack_name not in data:
+ Logger.info("Cannot find conf-select packages for the {0} stack".format(stack_name))
+ return package_names
+
+ conf_select_key = "conf-select-patching"
+ if conf_select_key not in data[stack_name]:
+ Logger.info("There are no conf-select-patching elements defined for this command for the {0} stack".format(stack_name))
+ return package_names
+
+ service_dict = data[stack_name][conf_select_key]
+
+ for servicename in service_names:
+ if servicename in service_dict and 'packages' in service_dict[servicename]:
+ package_names.extend(service_dict[servicename]['packages'])
+
+ return package_names
+
+
def _seed_new_configuration_directories(package, created_directories):
"""
Copies any files from the "current" configuration directory to the directories which were
http://git-wip-us.apache.org/repos/asf/ambari/blob/1afee609/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
index 9198164..e475c05 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
@@ -160,6 +160,9 @@ public class ExecutionCommand extends AgentCommand {
@SerializedName("upgradeSummary")
private UpgradeSummary upgradeSummary;
+ @SerializedName("roleParameters")
+ private Map<String, Object> roleParameters;
+
public void setConfigurationCredentials(Map<String, Map<String, String>> configurationCredentials) {
this.configurationCredentials = configurationCredentials;
}
@@ -235,6 +238,10 @@ public class ExecutionCommand extends AgentCommand {
return roleParams;
}
+ /**
+ * Sets the roleParams for the command. Consider instead using {@link #setRoleParameters}
+ * @param roleParams
+ */
public void setRoleParams(Map<String, String> roleParams) {
this.roleParams = roleParams;
}
@@ -332,11 +339,11 @@ public class ExecutionCommand extends AgentCommand {
}
public String getServiceType() {
- return serviceType;
+ return serviceType;
}
public void setServiceType(String serviceType) {
- this.serviceType = serviceType;
+ this.serviceType = serviceType;
}
/**
@@ -413,6 +420,23 @@ public class ExecutionCommand extends AgentCommand {
}
/**
+ * Gets the object-based role parameters for the command.
+ */
+ public Map<String, Object> getRoleParameters() {
+ return roleParameters;
+ }
+
+ /**
+ * Sets the role parameters for the command. This is preferred over {@link #setRoleParams(Map)},
+ * as this form will pass values as structured data, as opposed to unstructured, escaped json.
+ *
+ * @param params
+ */
+ public void setRoleParameters(Map<String, Object> params) {
+ roleParameters = params;
+ }
+
+ /**
* Contains key name strings. These strings are used inside maps
* incapsulated inside command.
*/
@@ -512,6 +536,12 @@ public class ExecutionCommand extends AgentCommand {
feature = ExperimentalFeature.PATCH_UPGRADES,
comment = "Change this to reflect the component version")
String VERSION = "version";
+
+
+ /**
+ * When installing packages, includes what services will be included in the upgrade
+ */
+ String CLUSTER_VERSION_SUMMARY = "cluster_version_summary";
}
/**
http://git-wip-us.apache.org/repos/asf/ambari/blob/1afee609/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
index 969fca1..98adcd1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
@@ -730,7 +730,6 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
return null;
}
-
Map<String, String> roleParams = repoVersionHelper.buildRoleParams(managementController, repoVersion,
osFamily, servicesOnHost);
@@ -744,7 +743,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
actionContext.setRepositoryVersion(repoVersion);
actionContext.setTimeout(Short.valueOf(configuration.getDefaultAgentTaskTimeout(true)));
- repoVersionHelper.addCommandRepository(actionContext, repoVersion, osEntity);
+ repoVersionHelper.addCommandRepository(actionContext, cluster, repoVersion, osEntity);
return actionContext;
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/1afee609/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProvider.java
index ba5fccc..b106209 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProvider.java
@@ -443,7 +443,7 @@ public class HostStackVersionResourceProvider extends AbstractControllerResource
roleParams);
actionContext.setTimeout(Short.valueOf(configuration.getDefaultAgentTaskTimeout(true)));
- repoVersionHelper.addCommandRepository(actionContext, repoVersionEnt, osEntity);
+ repoVersionHelper.addCommandRepository(actionContext, cluster, repoVersionEnt, osEntity);
String caption = String.format(INSTALL_PACKAGES_FULL_NAME + " on host %s", hostName);
RequestStageContainer req = createRequest(caption);
http://git-wip-us.apache.org/repos/asf/ambari/blob/1afee609/ambari-server/src/main/java/org/apache/ambari/server/state/repository/ClusterVersionSummary.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/repository/ClusterVersionSummary.java b/ambari-server/src/main/java/org/apache/ambari/server/state/repository/ClusterVersionSummary.java
index e9d9920..5486ecd 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/repository/ClusterVersionSummary.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/repository/ClusterVersionSummary.java
@@ -24,11 +24,14 @@ import java.util.Set;
import org.codehaus.jackson.annotate.JsonIgnore;
import org.codehaus.jackson.annotate.JsonProperty;
+import com.google.gson.annotations.SerializedName;
+
/**
* For a version, collects summary information for a cluster.
*/
public class ClusterVersionSummary {
+ @SerializedName("services")
@JsonProperty("services")
private Map<String, ServiceVersionSummary> m_services;
http://git-wip-us.apache.org/repos/asf/ambari/blob/1afee609/ambari-server/src/main/java/org/apache/ambari/server/state/repository/ServiceVersionSummary.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/repository/ServiceVersionSummary.java b/ambari-server/src/main/java/org/apache/ambari/server/state/repository/ServiceVersionSummary.java
index 29505c8..d87caef 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/repository/ServiceVersionSummary.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/repository/ServiceVersionSummary.java
@@ -20,25 +20,26 @@ package org.apache.ambari.server.state.repository;
import org.codehaus.jackson.annotate.JsonIgnore;
import org.codehaus.jackson.annotate.JsonProperty;
+import com.google.gson.annotations.SerializedName;
+
/**
* Used to hold information about Service's ability to upgrade for a repository version.
*/
public class ServiceVersionSummary {
- @JsonProperty("display_name")
- private String m_displayName;
-
+ @SerializedName("version")
@JsonProperty("version")
private String m_version;
+ @SerializedName("release_version")
@JsonProperty("release_version")
private String m_releaseVersion;
+ @SerializedName("upgrade")
@JsonProperty("upgrade")
private boolean m_upgrade = false;
- ServiceVersionSummary(String displayName) {
- m_displayName = displayName;
+ ServiceVersionSummary() {
}
/**
http://git-wip-us.apache.org/repos/asf/ambari/blob/1afee609/ambari-server/src/main/java/org/apache/ambari/server/state/repository/VersionDefinitionXml.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/repository/VersionDefinitionXml.java b/ambari-server/src/main/java/org/apache/ambari/server/state/repository/VersionDefinitionXml.java
index 62f78de..a519d00 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/repository/VersionDefinitionXml.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/repository/VersionDefinitionXml.java
@@ -300,7 +300,7 @@ public class VersionDefinitionXml {
continue;
}
- ServiceVersionSummary summary = new ServiceVersionSummary(service.getDisplayName());
+ ServiceVersionSummary summary = new ServiceVersionSummary();
summaries.put(service.getName(), summary);
String serviceVersion = service.getDesiredRepositoryVersion().getVersion();
http://git-wip-us.apache.org/repos/asf/ambari/blob/1afee609/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/RepositoryVersionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/RepositoryVersionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/RepositoryVersionHelper.java
index 8f34613..87943d1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/RepositoryVersionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/RepositoryVersionHelper.java
@@ -41,10 +41,14 @@ import org.apache.ambari.server.controller.spi.SystemException;
import org.apache.ambari.server.orm.entities.OperatingSystemEntity;
import org.apache.ambari.server.orm.entities.RepositoryEntity;
import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
+import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.RepositoryInfo;
+import org.apache.ambari.server.state.RepositoryType;
import org.apache.ambari.server.state.ServiceInfo;
import org.apache.ambari.server.state.ServiceOsSpecific;
import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.repository.ClusterVersionSummary;
+import org.apache.ambari.server.state.repository.VersionDefinitionXml;
import org.apache.ambari.server.state.stack.UpgradePack;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
@@ -312,7 +316,7 @@ public class RepositoryVersionHelper {
* @param osEntity the OS family
* @param repoVersion the repository version entity
*/
- public void addCommandRepository(ActionExecutionContext context,
+ public void addCommandRepository(ActionExecutionContext context, Cluster cluster,
RepositoryVersionEntity repoVersion, OperatingSystemEntity osEntity) {
final CommandRepository commandRepo = new CommandRepository();
@@ -344,12 +348,34 @@ public class RepositoryVersionHelper {
commandRepo.getFeature().setIsScoped(false);
}
+ ClusterVersionSummary summary = null;
+ if (RepositoryType.STANDARD != repoVersion.getType()) {
+ try {
+ VersionDefinitionXml xml = repoVersion.getRepositoryXml();
+ summary = xml.getClusterSummary(cluster);
+ } catch (Exception e) {
+ LOG.warn("Could not determine repository from %s/%s. Will not pass cluster version.");
+ }
+ }
+
+ final ClusterVersionSummary clusterSummary = summary;
+
context.addVisitor(new ExecutionCommandVisitor() {
@Override
public void visit(ExecutionCommand command) {
if (null == command.getRepositoryFile()) {
command.setRepositoryFile(commandRepo);
}
+
+ if (null != clusterSummary) {
+ Map<String, Object> params = command.getRoleParameters();
+ if (null == params) {
+ params = new HashMap<>();
+ command.setRoleParameters(params);
+ }
+ params.put(KeyNames.CLUSTER_VERSION_SUMMARY, clusterSummary);
+ }
+
}
});
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/1afee609/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py b/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py
index 47f9a74..fff18bb 100644
--- a/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py
+++ b/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py
@@ -179,8 +179,18 @@ class InstallPackages(Script):
and not sudo.path_exists("/usr/bin/conf-select") and sudo.path_exists("/usr/bin/hdfconf-select"):
Link("/usr/bin/conf-select", to="/usr/bin/hdfconf-select")
+
+ restricted_packages = conf_select.get_restricted_packages()
+
+ if 0 == len(restricted_packages):
+ Logger.info("There are no restricted conf-select packages for this installation")
+ else:
+ Logger.info("Restricting conf-select packages to {0}".format(restricted_packages))
+
for package_name, directories in conf_select.get_package_dirs().iteritems():
- conf_select.convert_conf_directories_to_symlinks(package_name, stack_version, directories)
+ if 0 == len(restricted_packages) or package_name in restricted_packages:
+ conf_select.convert_conf_directories_to_symlinks(package_name, stack_version, directories)
+
def compute_actual_version(self):
"""
http://git-wip-us.apache.org/repos/asf/ambari/blob/1afee609/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_packages.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_packages.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_packages.json
index 20b12a9..23c6d32 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_packages.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_packages.json
@@ -994,6 +994,12 @@
"current_dir": "{0}/current/atlas-client/conf"
}
],
+ "beacon": [
+ {
+ "conf_dir": "/etc/beacon/conf",
+ "current_dir": "{0}/current/beacon-client/conf"
+ }
+ ],
"druid": [
{
"conf_dir": "/etc/druid/conf",
@@ -1060,6 +1066,12 @@
"current_dir": "{0}/current/knox-server/conf"
}
],
+ "livy": [
+ {
+ "conf_dir": "/etc/livy/conf",
+ "current_dir": "{0}/current/livy"
+ }
+ ],
"mahout": [
{
"conf_dir": "/etc/mahout/conf",
@@ -1175,6 +1187,95 @@
}
]
},
+ "conf-select-patching": {
+ "ACCUMULO": {
+ "packages": ["accumulo"]
+ },
+ "ATLAS": {
+ "packages": ["atlas"]
+ },
+ "BEACON": {
+ "packages": ["beacon"]
+ },
+ "DRUID": {
+ "packages": ["druid", "superset"]
+ },
+ "FALCON": {
+ "packages": ["falcon"]
+ },
+ "FLUME": {
+ "packages": ["flume"]
+ },
+ "HBASE": {
+ "packages": ["hbase"]
+ },
+ "HDFS": {
+ "packages": []
+ },
+ "HIVE": {
+ "packages": ["hive", "hive-hcatalog", "hive2", "tez_hive2"]
+ },
+ "KAFKA": {
+ "packages": ["kafka"]
+ },
+ "KNOX": {
+ "packages": ["knox"]
+ },
+ "MAHOUT": {
+ "packages": ["mahout"]
+ },
+ "MAPREDUCE2": {
+ "packages": []
+ },
+ "OOZIE": {
+ "packages": ["oozie"]
+ },
+ "PIG": {
+ "packages": ["pig"]
+ },
+ "R4ML": {
+ "packages": []
+ },
+ "RANGER": {
+ "packages": ["ranger-admin", "ranger-usersync", "ranger-tagsync"]
+ },
+ "RANGER_KMS": {
+ "packages": ["ranger-kms"]
+ },
+ "SLIDER": {
+ "packages": ["slider"]
+ },
+ "SPARK": {
+ "packages": ["spark", "livy"]
+ },
+ "SPARK2": {
+ "packages": ["spark2", "livy"]
+ },
+ "SQOOP": {
+ "packages": ["sqoop"]
+ },
+ "STORM": {
+ "packages": ["storm", "storm-slider-client"]
+ },
+ "SYSTEMML": {
+ "packages": []
+ },
+ "TEZ": {
+ "packages": ["tez"]
+ },
+ "TITAN": {
+ "packages": []
+ },
+ "YARN": {
+ "packages": []
+ },
+ "ZEPPELIN": {
+ "packages": ["zeppelin"]
+ },
+ "ZOOKEEPER": {
+ "packages": ["zookeeper"]
+ }
+ },
"upgrade-dependencies" : {
"YARN": ["TEZ"],
"TEZ": ["YARN"],
http://git-wip-us.apache.org/repos/asf/ambari/blob/1afee609/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_packages.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_packages.json b/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_packages.json
index be4c718..d8cd015 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_packages.json
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_packages.json
@@ -871,6 +871,12 @@
"current_dir": "{0}/current/atlas-client/conf"
}
],
+ "beacon": [
+ {
+ "conf_dir": "/etc/beacon/conf",
+ "current_dir": "{0}/current/beacon-client/conf"
+ }
+ ],
"druid": [
{
"conf_dir": "/etc/druid/conf",
@@ -937,6 +943,12 @@
"current_dir": "{0}/current/knox-server/conf"
}
],
+ "livy": [
+ {
+ "conf_dir": "/etc/livy/conf",
+ "current_dir": "{0}/current/livy"
+ }
+ ],
"mahout": [
{
"conf_dir": "/etc/mahout/conf",
@@ -1051,6 +1063,100 @@
"current_dir": "{0}/current/zookeeper-client/conf"
}
]
- }
+ },
+ "conf-select-patching": {
+ "ACCUMULO": {
+ "packages": ["accumulo"]
+ },
+ "ATLAS": {
+ "packages": ["atlas"]
+ },
+ "BEACON": {
+ "packages": ["beacon"]
+ },
+ "DRUID": {
+ "packages": ["druid", "superset"]
+ },
+ "FALCON": {
+ "packages": ["falcon"]
+ },
+ "FLUME": {
+ "packages": ["flume"]
+ },
+ "HBASE": {
+ "packages": ["hbase"]
+ },
+ "HDFS": {
+ "packages": []
+ },
+ "HIVE": {
+ "packages": ["hive", "hive-hcatalog", "hive2", "tez_hive2"]
+ },
+ "KAFKA": {
+ "packages": ["kafka"]
+ },
+ "KNOX": {
+ "packages": ["knox"]
+ },
+ "MAHOUT": {
+ "packages": ["mahout"]
+ },
+ "MAPREDUCE2": {
+ "packages": []
+ },
+ "OOZIE": {
+ "packages": ["oozie"]
+ },
+ "PIG": {
+ "packages": ["pig"]
+ },
+ "R4ML": {
+ "packages": []
+ },
+ "RANGER": {
+ "packages": ["ranger-admin", "ranger-usersync", "ranger-tagsync"]
+ },
+ "RANGER_KMS": {
+ "packages": ["ranger-kms"]
+ },
+ "SLIDER": {
+ "packages": ["slider"]
+ },
+ "SPARK": {
+ "packages": ["spark", "livy"]
+ },
+ "SPARK2": {
+ "packages": ["spark2", "livy"]
+ },
+ "SQOOP": {
+ "packages": ["sqoop"]
+ },
+ "STORM": {
+ "packages": ["storm", "storm-slider-client"]
+ },
+ "SYSTEMML": {
+ "packages": []
+ },
+ "TEZ": {
+ "packages": ["tez"]
+ },
+ "TITAN": {
+ "packages": []
+ },
+ "YARN": {
+ "packages": []
+ },
+ "ZEPPELIN": {
+ "packages": ["zeppelin"]
+ },
+ "ZOOKEEPER": {
+ "packages": ["zookeeper"]
+ }
+ },
+ "upgrade-dependencies" : {
+ "YARN": ["TEZ"],
+ "TEZ": ["YARN"],
+ "MAHOUT": ["MAPREDUCE2"]
+ }
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/1afee609/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
index 9840106..f45ff75 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
@@ -99,6 +99,7 @@ import org.apache.ambari.server.state.StackId;
import org.apache.ambari.server.state.StackInfo;
import org.apache.ambari.server.state.cluster.ClusterImpl;
import org.apache.ambari.server.state.repository.AvailableService;
+import org.apache.ambari.server.state.repository.ClusterVersionSummary;
import org.apache.ambari.server.state.repository.VersionDefinitionXml;
import org.apache.ambari.server.state.stack.upgrade.Direction;
import org.apache.ambari.server.state.stack.upgrade.RepositoryVersionHelper;
@@ -1883,6 +1884,244 @@ public class ClusterStackVersionResourceProviderTest {
}
+ @Test
+ public void testCreateResourcesPatch() throws Exception {
+ JsonArray json = new JsonParser().parse(OS_JSON).getAsJsonArray();
+
+ JsonObject jsonObj = json.get(0).getAsJsonObject();
+ jsonObj.addProperty(OperatingSystemResourceProvider.OPERATING_SYSTEM_AMBARI_MANAGED_REPOS, false);
+
+ String os_json = json.toString();
+
+ Cluster cluster = createNiceMock(Cluster.class);
+ StackId stackId = new StackId("HDP", "2.0.1");
+
+ StackEntity fromStack = new StackEntity();
+ fromStack.setStackName(stackId.getStackName());
+ fromStack.setStackVersion(stackId.getStackVersion());
+
+ RepositoryVersionEntity fromRepo = new RepositoryVersionEntity();
+ fromRepo.setId(0L);
+ fromRepo.setOperatingSystems(os_json);
+ fromRepo.setVersionXsd("version_definition.xsd");
+ fromRepo.setType(RepositoryType.STANDARD);
+ fromRepo.setStack(fromStack);
+ fromRepo.setVersion("2.0.1.0-1234");
+
+
+ StackEntity stackEntity = new StackEntity();
+ stackEntity.setStackName("HDP");
+ stackEntity.setStackVersion("2.1.1");
+
+ File f = new File("src/test/resources/hbase_version_test.xml");
+ String xmlString = IOUtils.toString(new FileInputStream(f));
+ // hack to remove ZK
+ xmlString = xmlString.replace("<service idref=\"ZOOKEEPER-346\" />", "");
+
+
+ RepositoryVersionEntity repoVersion = new RepositoryVersionEntity();
+ repoVersion.setId(1l);
+ repoVersion.setOperatingSystems(os_json);
+ repoVersion.setVersionXml(xmlString);
+ repoVersion.setVersionXsd("version_definition.xsd");
+ repoVersion.setType(RepositoryType.PATCH);
+ repoVersion.setStack(stackEntity);
+
+ ambariMetaInfo.getComponent("HDP", "2.1.1", "HBASE", "HBASE_MASTER").setVersionAdvertised(true);
+ ambariMetaInfo.getComponent("HDP", "2.0.1", "HBASE", "HBASE_MASTER").setVersionAdvertised(true);
+
+ final String hostWithoutVersionableComponents = "host3";
+ List<Host> hostsNeedingInstallCommands = new ArrayList<>();
+
+ Map<String, Host> hostsForCluster = new HashMap<>();
+ int hostCount = 10;
+ for (int i = 0; i < hostCount; i++) {
+ String hostname = "host" + i;
+ Host host = createNiceMock(hostname, Host.class);
+ expect(host.getHostName()).andReturn(hostname).anyTimes();
+ expect(host.getOsFamily()).andReturn("redhat6").anyTimes();
+ expect(host.getMaintenanceState(EasyMock.anyLong())).andReturn(
+ MaintenanceState.OFF).anyTimes();
+ expect(host.getAllHostVersions()).andReturn(
+ Collections.<HostVersionEntity>emptyList()).anyTimes();
+ expect(host.getHostAttributes()).andReturn(new HashMap<String, String>()).anyTimes();
+ replay(host);
+ hostsForCluster.put(hostname, host);
+
+ if (StringUtils.equals(hostWithoutVersionableComponents, hostname)) {
+ hostsNeedingInstallCommands.add(host);
+ }
+ }
+
+ Service hdfsService = createNiceMock(Service.class);
+ expect(hdfsService.getName()).andReturn("HDFS").anyTimes();
+ expect(hdfsService.getServiceComponents()).andReturn(new HashMap<String, ServiceComponent>());
+ expect(hdfsService.getDesiredRepositoryVersion()).andReturn(fromRepo).anyTimes();
+
+ Service hbaseService = createNiceMock(Service.class);
+ expect(hbaseService.getName()).andReturn("HBASE").anyTimes();
+ expect(hbaseService.getServiceComponents()).andReturn(new HashMap<String, ServiceComponent>());
+ expect(hbaseService.getDesiredRepositoryVersion()).andReturn(fromRepo).anyTimes();
+
+ Map<String, Service> serviceMap = new HashMap<>();
+ serviceMap.put("HDFS", hdfsService);
+ serviceMap.put("HBASE", hbaseService);
+
+ final ServiceComponentHost schDatanode = createMock(ServiceComponentHost.class);
+ expect(schDatanode.getServiceName()).andReturn("HDFS").anyTimes();
+ expect(schDatanode.getServiceComponentName()).andReturn("DATANODE").anyTimes();
+
+ final ServiceComponentHost schNamenode = createMock(ServiceComponentHost.class);
+ expect(schNamenode.getServiceName()).andReturn("HDFS").anyTimes();
+ expect(schNamenode.getServiceComponentName()).andReturn("NAMENODE").anyTimes();
+
+ final ServiceComponentHost schHBM = createMock(ServiceComponentHost.class);
+ expect(schHBM.getServiceName()).andReturn("HBASE").anyTimes();
+ expect(schHBM.getServiceComponentName()).andReturn("HBASE_MASTER").anyTimes();
+
+ // First host contains versionable components
+ final List<ServiceComponentHost> schsH1 = Arrays.asList(schDatanode, schNamenode);
+
+ // Second host contains versionable components
+ final List<ServiceComponentHost> schsH2 = Arrays.asList(schDatanode);
+
+ // Third host only has hbase
+ final List<ServiceComponentHost> schsH3 = Arrays.asList(schHBM);
+
+ ServiceOsSpecific.Package hdfsPackage = new ServiceOsSpecific.Package();
+ hdfsPackage.setName("hdfs");
+
+ List<ServiceOsSpecific.Package> packages = Collections.singletonList(hdfsPackage);
+
+ ActionManager actionManager = createNiceMock(ActionManager.class);
+
+ RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
+ ResourceProvider csvResourceProvider = createNiceMock(ResourceProvider.class);
+
+ Map<String, Map<String, String>> hostConfigTags = new HashMap<>();
+ expect(configHelper.getEffectiveDesiredTags(anyObject(ClusterImpl.class), anyObject(String.class))).andReturn(hostConfigTags);
+
+ expect(managementController.getClusters()).andReturn(clusters).anyTimes();
+ expect(managementController.getAmbariMetaInfo()).andReturn(ambariMetaInfo).anyTimes();
+ expect(managementController.getAuthName()).andReturn("admin").anyTimes();
+ expect(managementController.getActionManager()).andReturn(actionManager).anyTimes();
+ expect(managementController.getJdkResourceUrl()).andReturn("/JdkResourceUrl").anyTimes();
+ expect(managementController.getPackagesForServiceHost(anyObject(ServiceInfo.class),
+ EasyMock.<Map<String, String>>anyObject(), anyObject(String.class))).
+ andReturn(packages).anyTimes(); // only one host has the versionable component
+
+ expect(managementController.findConfigurationTagsWithOverrides(anyObject(Cluster.class), EasyMock.anyString()))
+ .andReturn(new HashMap<String, Map<String, String>>()).anyTimes();
+
+ expect(clusters.getCluster(anyObject(String.class))).andReturn(cluster);
+ expect(clusters.getHostsForCluster(anyObject(String.class))).andReturn(
+ hostsForCluster).anyTimes();
+
+ String clusterName = "Cluster100";
+ expect(cluster.getClusterId()).andReturn(1L).anyTimes();
+ expect(cluster.getHosts()).andReturn(hostsForCluster.values()).atLeastOnce();
+ expect(cluster.getServices()).andReturn(serviceMap).anyTimes();
+ expect(cluster.getCurrentStackVersion()).andReturn(stackId);
+ expect(cluster.getServiceComponentHosts(anyObject(String.class))).andAnswer(new IAnswer<List<ServiceComponentHost>>() {
+ @Override
+ public List<ServiceComponentHost> answer() throws Throwable {
+ String hostname = (String) EasyMock.getCurrentArguments()[0];
+ if (hostname.equals("host2")) {
+ return schsH2;
+ } else if (hostname.equals("host3")) {
+ return schsH3;
+ } else {
+ return schsH1;
+ }
+ }
+ }).anyTimes();
+
+ expect(cluster.transitionHostsToInstalling(anyObject(RepositoryVersionEntity.class),
+ anyObject(VersionDefinitionXml.class), eq(false))).andReturn(hostsNeedingInstallCommands).atLeastOnce();
+
+ ExecutionCommand executionCommand = new ExecutionCommand();
+ ExecutionCommandWrapper executionCommandWrapper = createNiceMock(ExecutionCommandWrapper.class);
+
+ expect(executionCommandWrapper.getExecutionCommand()).andReturn(executionCommand).anyTimes();
+
+ Stage stage = createNiceMock(Stage.class);
+ expect(stage.getExecutionCommandWrapper(anyObject(String.class), anyObject(String.class))).
+ andReturn(executionCommandWrapper).anyTimes();
+
+ Map<Role, Float> successFactors = new HashMap<>();
+ expect(stage.getSuccessFactors()).andReturn(successFactors).atLeastOnce();
+
+ // Check that we create proper stage count
+ expect(stageFactory.createNew(anyLong(), anyObject(String.class),
+ anyObject(String.class), anyLong(),
+ anyObject(String.class), anyObject(String.class),
+ anyObject(String.class))).andReturn(stage).
+ times((int) Math.ceil(hostCount / MAX_TASKS_PER_STAGE));
+
+ expect(repositoryVersionDAOMock.findByStackAndVersion(anyObject(StackId.class),
+ anyObject(String.class))).andReturn(repoVersion);
+
+ Capture<org.apache.ambari.server.actionmanager.Request> c = Capture.newInstance();
+ Capture<ExecuteActionRequest> ear = Capture.newInstance();
+
+ actionManager.sendActions(capture(c), capture(ear));
+ expectLastCall().atLeastOnce();
+ expect(actionManager.getRequestTasks(anyLong())).andReturn(Collections.<HostRoleCommand>emptyList()).anyTimes();
+
+ ClusterEntity clusterEntity = new ClusterEntity();
+ clusterEntity.setClusterId(1l);
+ clusterEntity.setClusterName(clusterName);
+
+ StageUtils.setTopologyManager(injector.getInstance(TopologyManager.class));
+ StageUtils.setConfiguration(injector.getInstance(Configuration.class));
+
+ // replay
+ replay(managementController, response, clusters, hdfsService, hbaseService, csvResourceProvider,
+ cluster, repositoryVersionDAOMock, configHelper, schDatanode, schNamenode, schHBM, actionManager,
+ executionCommandWrapper,stage, stageFactory);
+
+ ResourceProvider provider = createProvider(managementController);
+ injector.injectMembers(provider);
+
+ // add the property map to a set for the request. add more maps for multiple creates
+ Set<Map<String, Object>> propertySet = new LinkedHashSet<>();
+
+ Map<String, Object> properties = new LinkedHashMap<>();
+
+ // add properties to the request map
+ properties.put(ClusterStackVersionResourceProvider.CLUSTER_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID, "Cluster100");
+ properties.put(ClusterStackVersionResourceProvider.CLUSTER_STACK_VERSION_REPOSITORY_VERSION_PROPERTY_ID, "2.2.0.1-885");
+ properties.put(ClusterStackVersionResourceProvider.CLUSTER_STACK_VERSION_STACK_PROPERTY_ID, "HDP");
+ properties.put(ClusterStackVersionResourceProvider.CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID, "2.1.1");
+
+ propertySet.add(properties);
+
+ // create the request
+ Request request = PropertyHelper.getCreateRequest(propertySet, null);
+
+ SecurityContextHolder.getContext().setAuthentication(TestAuthenticationFactory.createAdministrator());
+
+ RequestStatus status = provider.createResources(request);
+ Assert.assertNotNull(status);
+
+ // check that the success factor was populated in the stage
+ Float successFactor = successFactors.get(Role.INSTALL_PACKAGES);
+ Assert.assertEquals(Float.valueOf(0.85f), successFactor);
+
+ Assert.assertNotNull(executionCommand.getRepositoryFile());
+ Assert.assertNotNull(executionCommand.getRoleParameters());
+ Map<String, Object> roleParams = executionCommand.getRoleParameters();
+ Assert.assertTrue(roleParams.containsKey(KeyNames.CLUSTER_VERSION_SUMMARY));
+ Assert.assertEquals(ClusterVersionSummary.class,
+ roleParams.get(KeyNames.CLUSTER_VERSION_SUMMARY).getClass());
+
+ Assert.assertEquals(2, executionCommand.getRepositoryFile().getRepositories().size());
+ for (CommandRepository.Repository repo : executionCommand.getRepositoryFile().getRepositories()) {
+ Assert.assertFalse(repo.isAmbariManaged());
+ }
+
+ }
+
private ClusterStackVersionResourceProvider createProvider(AmbariManagementController amc) {
ResourceProviderFactory factory = injector.getInstance(ResourceProviderFactory.class);
AbstractControllerResourceProvider.init(factory);
http://git-wip-us.apache.org/repos/asf/ambari/blob/1afee609/ambari-server/src/test/python/stacks/2.2/common/test_conf_select.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/common/test_conf_select.py b/ambari-server/src/test/python/stacks/2.2/common/test_conf_select.py
index 92dd634..a199d00 100644
--- a/ambari-server/src/test/python/stacks/2.2/common/test_conf_select.py
+++ b/ambari-server/src/test/python/stacks/2.2/common/test_conf_select.py
@@ -185,4 +185,16 @@ class TestConfSelect(RMFTestCase):
conf_select.convert_conf_directories_to_symlinks("hadoop", "2.3.0.0-1234", packages["hadoop"])
- self.assertEqual(pprint.pformat(self.env.resource_list), "[]")
\ No newline at end of file
+ self.assertEqual(pprint.pformat(self.env.resource_list), "[]")
+
+
+ def test_restrictions(self):
+
+ Script.config.update({'roleParameters': {'cluster_version_summary': {'services': {'HIVE': {'upgrade': True}}}}})
+
+ restricted = conf_select.get_restricted_packages()
+ self.assertTrue("hive" in restricted)
+ self.assertTrue("hive-hcatalog" in restricted)
+ self.assertTrue("hive2" in restricted)
+ self.assertTrue("tez_hive2" in restricted)
+ self.assertTrue("hadoop" not in restricted)