You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by nc...@apache.org on 2017/07/13 19:14:52 UTC
[34/37] ambari git commit: AMBARI-21450. Initial cherry-picking for
feature branch (ncole)
http://git-wip-us.apache.org/repos/asf/ambari/blob/48f7fb22/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
index 9ea6083..93c02be 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -20,7 +20,6 @@ package org.apache.ambari.server.controller.internal;
import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JDK_LOCATION;
import java.util.ArrayList;
-import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
@@ -30,19 +29,15 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
+import org.apache.ambari.annotations.Experimental;
+import org.apache.ambari.annotations.ExperimentalFeature;
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.Role;
import org.apache.ambari.server.StaticallyInject;
import org.apache.ambari.server.actionmanager.ActionManager;
-import org.apache.ambari.server.actionmanager.HostRoleCommand;
-import org.apache.ambari.server.actionmanager.HostRoleCommandFactory;
-import org.apache.ambari.server.actionmanager.HostRoleStatus;
import org.apache.ambari.server.actionmanager.RequestFactory;
import org.apache.ambari.server.actionmanager.Stage;
import org.apache.ambari.server.actionmanager.StageFactory;
-import org.apache.ambari.server.agent.CommandReport;
-import org.apache.ambari.server.agent.ExecutionCommand;
-import org.apache.ambari.server.agent.ExecutionCommand.KeyNames;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
import org.apache.ambari.server.configuration.Configuration;
import org.apache.ambari.server.controller.ActionExecutionContext;
@@ -60,11 +55,8 @@ import org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException;
import org.apache.ambari.server.controller.spi.SystemException;
import org.apache.ambari.server.controller.spi.UnsupportedPropertyException;
import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.apache.ambari.server.orm.dao.ClusterVersionDAO;
-import org.apache.ambari.server.orm.dao.HostComponentStateDAO;
import org.apache.ambari.server.orm.dao.HostVersionDAO;
import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
-import org.apache.ambari.server.orm.entities.ClusterVersionEntity;
import org.apache.ambari.server.orm.entities.HostVersionEntity;
import org.apache.ambari.server.orm.entities.OperatingSystemEntity;
import org.apache.ambari.server.orm.entities.RepositoryEntity;
@@ -72,30 +64,26 @@ import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
import org.apache.ambari.server.orm.entities.StackEntity;
import org.apache.ambari.server.orm.entities.UpgradeEntity;
import org.apache.ambari.server.security.authorization.RoleAuthorization;
-import org.apache.ambari.server.serveraction.upgrades.FinalizeUpgradeAction;
import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.ComponentInfo;
-import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigHelper;
import org.apache.ambari.server.state.Host;
-import org.apache.ambari.server.state.MaintenanceState;
+import org.apache.ambari.server.state.RepositoryType;
import org.apache.ambari.server.state.RepositoryVersionState;
+import org.apache.ambari.server.state.Service;
import org.apache.ambari.server.state.ServiceComponentHost;
-import org.apache.ambari.server.state.ServiceInfo;
-import org.apache.ambari.server.state.ServiceOsSpecific;
import org.apache.ambari.server.state.StackId;
import org.apache.ambari.server.state.repository.VersionDefinitionXml;
+import org.apache.ambari.server.state.stack.upgrade.RepositoryVersionHelper;
import org.apache.ambari.server.utils.StageUtils;
import org.apache.ambari.server.utils.VersionUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.math.NumberUtils;
import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
-import com.google.gson.Gson;
import com.google.inject.Inject;
-import com.google.inject.Injector;
import com.google.inject.Provider;
import com.google.inject.persist.Transactional;
@@ -115,7 +103,6 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
protected static final String CLUSTER_STACK_VERSION_HOST_STATES_PROPERTY_ID = PropertyHelper.getPropertyId("ClusterStackVersions", "host_states");
protected static final String CLUSTER_STACK_VERSION_REPOSITORY_VERSION_PROPERTY_ID = PropertyHelper.getPropertyId("ClusterStackVersions", "repository_version");
protected static final String CLUSTER_STACK_VERSION_STAGE_SUCCESS_FACTOR = PropertyHelper.getPropertyId("ClusterStackVersions", "success_factor");
- protected static final String CLUSTER_STACK_VERSION_IGNORE_PACKAGE_DEPENDENCIES = PropertyHelper.getPropertyId("ClusterStackVersions", KeyNames.IGNORE_PACKAGE_DEPENDENCIES);
/**
* Forces the {@link HostVersionEntity}s to a specific
@@ -129,7 +116,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
protected static final String CLUSTER_STACK_VERSION_FORCE = "ClusterStackVersions/force";
protected static final String INSTALL_PACKAGES_ACTION = "install_packages";
- protected static final String INSTALL_PACKAGES_FULL_NAME = "Install version";
+ protected static final String INSTALL_PACKAGES_FULL_NAME = "Install Version";
/**
* The default success factor that will be used when determining if a stage's
@@ -153,7 +140,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
CLUSTER_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID, CLUSTER_STACK_VERSION_STACK_PROPERTY_ID,
CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID, CLUSTER_STACK_VERSION_HOST_STATES_PROPERTY_ID,
CLUSTER_STACK_VERSION_STATE_PROPERTY_ID, CLUSTER_STACK_VERSION_REPOSITORY_VERSION_PROPERTY_ID,
- CLUSTER_STACK_VERSION_STAGE_SUCCESS_FACTOR, CLUSTER_STACK_VERSION_IGNORE_PACKAGE_DEPENDENCIES, CLUSTER_STACK_VERSION_FORCE);
+ CLUSTER_STACK_VERSION_STAGE_SUCCESS_FACTOR, CLUSTER_STACK_VERSION_FORCE);
private static Map<Type, String> keyPropertyIds = ImmutableMap.<Type, String> builder()
.put(Type.Cluster, CLUSTER_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID)
@@ -164,21 +151,12 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
.build();
@Inject
- private static ClusterVersionDAO clusterVersionDAO;
-
- @Inject
private static HostVersionDAO hostVersionDAO;
@Inject
private static RepositoryVersionDAO repositoryVersionDAO;
@Inject
- private static HostRoleCommandFactory hostRoleCommandFactory;
-
- @Inject
- private static Gson gson;
-
- @Inject
private static Provider<AmbariActionExecutionHelper> actionExecutionHelper;
@Inject
@@ -191,24 +169,12 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
private static Configuration configuration;
@Inject
- private static Injector injector;
+ private static RepositoryVersionHelper repoVersionHelper;
- @Inject
- private static HostComponentStateDAO hostComponentStateDAO;
- /**
- * Used for updating the existing stack tools with those of the stack being
- * distributed.
- */
- @Inject
- private static Provider<ConfigHelper> configHelperProvider;
- /**
- * We have to include such a hack here, because if we
- * make finalizeUpgradeAction field static and request injection
- * for it, there will be a circle dependency error
- */
- private FinalizeUpgradeAction finalizeUpgradeAction = injector.getInstance(FinalizeUpgradeAction.class);
+ @Inject
+ private static Provider<Clusters> clusters;
/**
* Constructor.
@@ -223,70 +189,87 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
}
@Override
+ @Experimental(feature = ExperimentalFeature.PATCH_UPGRADES,
+ comment = "this is a fake response until the UI no longer uses the endpoint")
public Set<Resource> getResourcesAuthorized(Request request, Predicate predicate) throws
SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
final Set<Resource> resources = new HashSet<>();
+
final Set<String> requestedIds = getRequestPropertyIds(request, predicate);
final Set<Map<String, Object>> propertyMaps = getPropertyMaps(predicate);
- List<ClusterVersionEntity> requestedEntities = new ArrayList<>();
- for (Map<String, Object> propertyMap: propertyMaps) {
- final String clusterName = propertyMap.get(CLUSTER_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID).toString();
- final Long id;
- if (propertyMap.get(CLUSTER_STACK_VERSION_ID_PROPERTY_ID) == null && propertyMaps.size() == 1) {
- requestedEntities = clusterVersionDAO.findByCluster(clusterName);
- } else {
- try {
- id = Long.parseLong(propertyMap.get(CLUSTER_STACK_VERSION_ID_PROPERTY_ID).toString());
- } catch (Exception ex) {
- throw new SystemException("Stack version should have numerical id");
- }
- final ClusterVersionEntity entity = clusterVersionDAO.findByPK(id);
- if (entity == null) {
- throw new NoSuchResourceException("There is no stack version with id " + id);
- } else {
- requestedEntities.add(entity);
- }
+ if (1 != propertyMaps.size()) {
+ throw new SystemException("Cannot request more than one resource");
+ }
+
+ Map<String, Object> propertyMap = propertyMaps.iterator().next();
+
+ String clusterName = propertyMap.get(CLUSTER_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID).toString();
+ final Cluster cluster;
+ try {
+ cluster = clusters.get().getCluster(clusterName);
+ } catch (AmbariException e) {
+ throw new SystemException(e.getMessage(), e);
+ }
+
+ Set<Long> requestedEntities = new HashSet<>();
+
+ if (propertyMap.containsKey(CLUSTER_STACK_VERSION_ID_PROPERTY_ID)) {
+ Long id = Long.parseLong(propertyMap.get(CLUSTER_STACK_VERSION_ID_PROPERTY_ID).toString());
+ requestedEntities.add(id);
+ } else {
+ List<RepositoryVersionEntity> entities = repositoryVersionDAO.findAll();
+
+ for (RepositoryVersionEntity entity : entities) {
+ requestedEntities.add(entity.getId());
}
}
- for (ClusterVersionEntity entity: requestedEntities) {
+ if (requestedEntities.isEmpty()) {
+ throw new SystemException("Could not find any repositories to show");
+ }
+
+
+ for (Long repositoryVersionId : requestedEntities) {
final Resource resource = new ResourceImpl(Resource.Type.ClusterStackVersion);
- final Map<String, List<String>> hostStates = new HashMap<>();
+ RepositoryVersionEntity repositoryVersion = repositoryVersionDAO.findByPK(repositoryVersionId);
+
+ final List<RepositoryVersionState> allStates = new ArrayList<>();
+ final Map<RepositoryVersionState, List<String>> hostStates = new HashMap<>();
for (RepositoryVersionState state: RepositoryVersionState.values()) {
- hostStates.put(state.name(), new ArrayList<String>());
+ hostStates.put(state, new ArrayList<String>());
}
- StackEntity repoVersionStackEntity = entity.getRepositoryVersion().getStack();
+ StackEntity repoVersionStackEntity = repositoryVersion.getStack();
StackId repoVersionStackId = new StackId(repoVersionStackEntity);
- for (HostVersionEntity hostVersionEntity : hostVersionDAO.findByClusterStackAndVersion(
- entity.getClusterEntity().getClusterName(), repoVersionStackId,
- entity.getRepositoryVersion().getVersion())) {
+ List<HostVersionEntity> hostVersionsForRepository = hostVersionDAO.findHostVersionByClusterAndRepository(
+ cluster.getClusterId(), repositoryVersion);
- hostStates.get(hostVersionEntity.getState().name()).add(hostVersionEntity.getHostName());
+ // create the in-memory structures
+ for (HostVersionEntity hostVersionEntity : hostVersionsForRepository) {
+ hostStates.get(hostVersionEntity.getState()).add(hostVersionEntity.getHostName());
+ allStates.add(hostVersionEntity.getState());
}
- StackId stackId = new StackId(entity.getRepositoryVersion().getStack());
- RepositoryVersionEntity repoVerEntity = repositoryVersionDAO.findByStackAndVersion(
- stackId, entity.getRepositoryVersion().getVersion());
-
- setResourceProperty(resource, CLUSTER_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID, entity.getClusterEntity().getClusterName(), requestedIds);
+ setResourceProperty(resource, CLUSTER_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID, clusterName, requestedIds);
setResourceProperty(resource, CLUSTER_STACK_VERSION_HOST_STATES_PROPERTY_ID, hostStates, requestedIds);
- setResourceProperty(resource, CLUSTER_STACK_VERSION_ID_PROPERTY_ID, entity.getId(), requestedIds);
- setResourceProperty(resource, CLUSTER_STACK_VERSION_STACK_PROPERTY_ID, stackId.getStackName(), requestedIds);
- setResourceProperty(resource, CLUSTER_STACK_VERSION_STATE_PROPERTY_ID, entity.getState().name(), requestedIds);
- setResourceProperty(resource, CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID, stackId.getStackVersion(), requestedIds);
- if (repoVerEntity!=null) {
- Long repoVersionId = repoVerEntity.getId();
- setResourceProperty(resource, CLUSTER_STACK_VERSION_REPOSITORY_VERSION_PROPERTY_ID, repoVersionId, requestedIds);
- }
+ setResourceProperty(resource, CLUSTER_STACK_VERSION_ID_PROPERTY_ID, repositoryVersion.getId(), requestedIds);
+ setResourceProperty(resource, CLUSTER_STACK_VERSION_STACK_PROPERTY_ID, repoVersionStackId.getStackName(), requestedIds);
+ setResourceProperty(resource, CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID, repoVersionStackId.getStackVersion(), requestedIds);
+ setResourceProperty(resource, CLUSTER_STACK_VERSION_REPOSITORY_VERSION_PROPERTY_ID, repositoryVersion.getId(), requestedIds);
+
+ @Experimental(feature = ExperimentalFeature.PATCH_UPGRADES,
+ comment = "this is a fake status until the UI can handle services that are on their own")
+ RepositoryVersionState aggregateState = RepositoryVersionState.getAggregateState(allStates);
+ setResourceProperty(resource, CLUSTER_STACK_VERSION_STATE_PROPERTY_ID, aggregateState, requestedIds);
if (predicate == null || predicate.evaluate(resource)) {
resources.add(resource);
}
}
+
return resources;
}
@@ -304,6 +287,8 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
String clName;
final String desiredRepoVersion;
+ String stackName;
+ String stackVersion;
Map<String, Object> propertyMap = iterator.next();
@@ -342,35 +327,35 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
cluster.getClusterName(), entity.getDirection().getText(false)));
}
- String stackName = (String) propertyMap.get(CLUSTER_STACK_VERSION_STACK_PROPERTY_ID);
- String stackVersion = (String) propertyMap.get(CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID);
- if (StringUtils.isBlank(stackName) || StringUtils.isBlank(stackVersion)) {
- String message = String.format(
- "Both the %s and %s properties are required when distributing a new stack",
- CLUSTER_STACK_VERSION_STACK_PROPERTY_ID, CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID);
-
- throw new SystemException(message);
+ Set<StackId> stackIds = new HashSet<>();
+ if (propertyMap.containsKey(CLUSTER_STACK_VERSION_STACK_PROPERTY_ID) &&
+ propertyMap.containsKey(CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID)) {
+ stackName = (String) propertyMap.get(CLUSTER_STACK_VERSION_STACK_PROPERTY_ID);
+ stackVersion = (String) propertyMap.get(CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID);
+ StackId stackId = new StackId(stackName, stackVersion);
+ if (! ami.isSupportedStack(stackName, stackVersion)) {
+ throw new NoSuchParentResourceException(String.format("Stack %s is not supported",
+ stackId));
+ }
+ stackIds.add(stackId);
+ } else { // Using stack that is current for cluster
+ for (Service service : cluster.getServices().values()) {
+ stackIds.add(service.getDesiredStackId());
+ }
}
- final StackId stackId = new StackId(stackName, stackVersion);
-
- if (!ami.isSupportedStack(stackName, stackVersion)) {
- throw new NoSuchParentResourceException(String.format("Stack %s is not supported", stackId));
+ if (stackIds.size() > 1) {
+ throw new SystemException("Could not determine stack to add out of " + StringUtils.join(stackIds, ','));
}
- // bootstrap the stack tools if necessary for the stack which is being
- // distributed
- try {
- bootstrapStackTools(stackId, cluster);
- } catch (AmbariException ambariException) {
- throw new SystemException("Unable to modify stack tools for new stack being distributed",
- ambariException);
- }
+ StackId stackId = stackIds.iterator().next();
+ stackName = stackId.getStackName();
+ stackVersion = stackId.getStackVersion();
- RepositoryVersionEntity repoVersionEnt = repositoryVersionDAO.findByStackAndVersion(
+ RepositoryVersionEntity repoVersionEntity = repositoryVersionDAO.findByStackAndVersion(
stackId, desiredRepoVersion);
- if (repoVersionEnt == null) {
+ if (repoVersionEntity == null) {
throw new IllegalArgumentException(String.format(
"Repo version %s is not available for stack %s",
desiredRepoVersion, stackId));
@@ -378,130 +363,87 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
VersionDefinitionXml desiredVersionDefinition = null;
try {
- desiredVersionDefinition = repoVersionEnt.getRepositoryXml();
+ desiredVersionDefinition = repoVersionEntity.getRepositoryXml();
} catch (Exception e) {
throw new IllegalArgumentException(
String.format("Version %s is backed by a version definition, but it could not be parsed", desiredRepoVersion), e);
}
- // get all of the host eligible for stack distribution
- List<Host> hosts = getHostsForStackDistribution(cluster);
+ // if true, then we need to force all new host versions into the INSTALLED state
+ boolean forceInstalled = Boolean.parseBoolean((String)propertyMap.get(
+ CLUSTER_STACK_VERSION_FORCE));
- /*
- If there is a repository that is already ATTEMPTED to be installed and the version
- is GREATER than the one trying to install, we must fail (until we can support that via Patch Upgrades)
+ try {
+ // either create the necessary host version entries, or set them to INSTALLING when attempting to re-distribute an existing version
+ return createOrUpdateHostVersions(cluster, repoVersionEntity, desiredVersionDefinition,
+ stackId, forceInstalled, propertyMap);
+ } catch (AmbariException e) {
+ throw new SystemException("Can not persist request", e);
+ }
+ }
- For example:
+ @Transactional
+ RequestStatus createOrUpdateHostVersions(Cluster cluster,
+ RepositoryVersionEntity repoVersionEntity, VersionDefinitionXml versionDefinitionXml,
+ StackId stackId, boolean forceInstalled, Map<String, Object> propertyMap)
+ throws AmbariException, SystemException {
- 1. Install 2.3.0.0
- 2. Register and Install 2.5.0.0 (with or without package-version; it gets computed correctly)
- 3. Register 2.4 (without package-version)
+ final String desiredRepoVersion = repoVersionEntity.getVersion();
- Installation of 2.4 will fail because the way agents invoke installation is to
- install by name. if the package-version is not known, then the 'newest' is ALWAYS installed.
- In this case, 2.5.0.0. 2.4 is never picked up.
- */
- for (ClusterVersionEntity clusterVersion : clusterVersionDAO.findByCluster(clName)) {
- RepositoryVersionEntity clusterRepoVersion = clusterVersion.getRepositoryVersion();
+ // get all of the hosts eligible for stack distribution
+ List<Host> hosts = Lists.newArrayList(cluster.getHosts());
- int compare = compareVersions(clusterRepoVersion.getVersion(), desiredRepoVersion);
- // ignore earlier versions
- if (compare <= 0) {
- continue;
- }
+ for (Host host : hosts) {
+ for (HostVersionEntity hostVersion : host.getAllHostVersions()) {
+ RepositoryVersionEntity hostRepoVersion = hostVersion.getRepositoryVersion();
- // !!! the version is greater to the one to install
+ // !!! ignore stack differences
+ if (!hostRepoVersion.getStackName().equals(repoVersionEntity.getStackName())) {
+ continue;
+ }
- // if the stacks are different, then don't fail (further check same-stack version strings)
- if (!StringUtils.equals(clusterRepoVersion.getStackName(), repoVersionEnt.getStackName())) {
- continue;
- }
+ int compare = compareVersions(hostRepoVersion.getVersion(), desiredRepoVersion);
- // if there is no backing VDF for the desired version, allow the operation (legacy behavior)
- if (null == desiredVersionDefinition) {
- continue;
- }
+ // ignore earlier versions
+ if (compare <= 0) {
+ continue;
+ }
+
+ // !!! the version is greater to the one to install
+
+ // if there is no backing VDF for the desired version, allow the operation (legacy behavior)
+ if (null == versionDefinitionXml) {
+ continue;
+ }
- // backing VDF does not define the package version for any of the hosts, cannot install (allows a VDF with package-version)
- for (Host host : hosts) {
- if (StringUtils.isBlank(desiredVersionDefinition.getPackageVersion(host.getOsFamily()))) {
+ if (StringUtils.isBlank(versionDefinitionXml.getPackageVersion(host.getOsFamily()))) {
String msg = String.format("Ambari cannot install version %s. Version %s is already installed.",
- desiredRepoVersion, clusterRepoVersion.getVersion());
+ desiredRepoVersion, hostRepoVersion.getVersion());
throw new IllegalArgumentException(msg);
}
}
}
- // if true, then we need to force all new host versions into the INSTALLED state
- boolean forceInstalled = Boolean.parseBoolean((String)propertyMap.get(
- CLUSTER_STACK_VERSION_FORCE));
-
- final RequestStatusResponse response;
- try {
- if (forceInstalled) {
- createHostVersions(cluster, hosts, stackId, desiredRepoVersion, RepositoryVersionState.INSTALLED);
- response = null;
- } else {
- createHostVersions(cluster, hosts, stackId, desiredRepoVersion,
- RepositoryVersionState.INSTALLING);
+ // the cluster will create/update all of the host versions to the correct state
+ List<Host> hostsNeedingInstallCommands = cluster.transitionHostsToInstalling(
+ repoVersionEntity, versionDefinitionXml, forceInstalled);
- RequestStageContainer installRequest = createOrchestration(cluster, stackId, hosts,
- repoVersionEnt, propertyMap);
+ RequestStatusResponse response = null;
+ if (!forceInstalled) {
+ RequestStageContainer installRequest = createOrchestration(cluster, stackId,
+ hostsNeedingInstallCommands, repoVersionEntity, versionDefinitionXml, propertyMap);
- response = installRequest.getRequestStatusResponse();
- }
- } catch (AmbariException e) {
- throw new SystemException("Can not persist request", e);
+ response = installRequest.getRequestStatusResponse();
}
return getRequestStatus(response);
}
@Transactional
- void createHostVersions(Cluster cluster, List<Host> hosts, StackId stackId,
- String desiredRepoVersion, RepositoryVersionState repoState)
- throws AmbariException, SystemException {
- final String clusterName = cluster.getClusterName();
- final String authName = getManagementController().getAuthName();
-
- ClusterVersionEntity clusterVersionEntity = clusterVersionDAO.findByClusterAndStackAndVersion(
- clusterName, stackId, desiredRepoVersion);
-
- if (clusterVersionEntity == null) {
- try {
- // Create/persist new cluster stack version
- cluster.createClusterVersion(stackId, desiredRepoVersion, authName, repoState);
-
- clusterVersionEntity = clusterVersionDAO.findByClusterAndStackAndVersion(clusterName,
- stackId, desiredRepoVersion);
- } catch (AmbariException e) {
- throw new SystemException(
- String.format("Can not create cluster stack version %s for cluster %s",
- desiredRepoVersion, clusterName), e);
- }
- } else {
- // Move cluster version into the specified state (retry installation)
- cluster.transitionClusterVersion(stackId, desiredRepoVersion, repoState);
- }
-
- // Will also initialize all Host Versions to the specified state state.
- cluster.transitionHosts(clusterVersionEntity, repoState);
-
- // Directly transition host versions to NOT_REQUIRED for hosts that don't
- // have versionable components
- for (Host host : hosts) {
- if (!host.hasComponentsAdvertisingVersions(stackId)) {
- transitionHostVersionToNotRequired(host, cluster,
- clusterVersionEntity.getRepositoryVersion());
- }
- }
- }
-
- @Transactional
RequestStageContainer createOrchestration(Cluster cluster, StackId stackId,
- List<Host> hosts, RepositoryVersionEntity repoVersionEnt, Map<String, Object> propertyMap)
+ List<Host> hosts, RepositoryVersionEntity repoVersionEnt, VersionDefinitionXml desiredVersionDefinition, Map<String, Object> propertyMap)
throws AmbariException, SystemException {
final AmbariManagementController managementController = getManagementController();
final AmbariMetaInfo ami = managementController.getAmbariMetaInfo();
@@ -550,9 +492,6 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
successFactor = Float.valueOf(successFactorProperty);
}
- Object ignorePackageDependenciesProperty = propertyMap.get(CLUSTER_STACK_VERSION_IGNORE_PACKAGE_DEPENDENCIES);
- String ignorePackageDependencies = Boolean.valueOf(String.valueOf(ignorePackageDependenciesProperty)).toString();
-
boolean hasStage = false;
ArrayList<Stage> stages = new ArrayList<>(batchCount);
@@ -585,8 +524,11 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
// determine services for the repo
Set<String> serviceNames = new HashSet<>();
- // !!! TODO for patch upgrades, we need to limit the serviceNames to those
- // that are detailed for the repository
+ // !!! limit the serviceNames to those that are detailed for the repository.
+ // TODO packages don't have component granularity
+ if (RepositoryType.STANDARD != repoVersionEnt.getType()) {
+ serviceNames.addAll(desiredVersionDefinition.getAvailableServiceNames());
+ }
// Populate with commands for host
for (int i = 0; i < maxTasks && hostIterator.hasNext(); i++) {
@@ -595,7 +537,6 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
ActionExecutionContext actionContext = getHostVersionInstallCommand(repoVersionEnt,
cluster, managementController, ami, stackId, serviceNames, perOsRepos, stage, host);
if (null != actionContext) {
- actionContext.getParameters().put(KeyNames.IGNORE_PACKAGE_DEPENDENCIES, ignorePackageDependencies);
try {
actionExecutionHelper.get().addExecutionCommandsToStage(actionContext, stage, null);
hasStage = true;
@@ -626,6 +567,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
throws SystemException {
// Determine repositories for host
String osFamily = host.getOsFamily();
+
final List<RepositoryEntity> repoInfo = perOsRepos.get(osFamily);
if (repoInfo == null) {
throw new SystemException(String.format("Repositories for os type %s are " +
@@ -633,9 +575,11 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
osFamily, repoVersion.getVersion(), stackId));
}
+ if (repoInfo.isEmpty()){
+ LOG.error(String.format("Repository list is empty. Ambari may not be managing the repositories for %s", osFamily));
+ }
// determine packages for all services that are installed on host
- List<ServiceOsSpecific.Package> packages = new ArrayList<>();
Set<String> servicesOnHost = new HashSet<>();
List<ServiceComponentHost> components = cluster.getServiceComponentHosts(host.getHostName());
for (ServiceComponentHost component : components) {
@@ -647,76 +591,25 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
if (servicesOnHost.isEmpty()) {
return null;
}
- List<String> blacklistedPackagePrefixes = configuration.getRollingUpgradeSkipPackagesPrefixes();
- for (String serviceName : servicesOnHost) {
- try{
- if(ami.isServiceRemovedInStack(stackId.getStackName(), stackId.getStackVersion(), serviceName)){
- LOG.info(String.format("%s has been removed from stack %s-%s. Skip calculating its installation packages", stackId.getStackName(), stackId.getStackVersion(), serviceName));
- continue; //No need to calculate install packages for removed services
- }
- } catch (AmbariException e1) {
- throw new SystemException(String.format("Cannot obtain stack information for %s-%s", stackId.getStackName(), stackId.getStackVersion()), e1);
- }
- ServiceInfo info;
- try {
- info = ami.getService(stackId.getStackName(), stackId.getStackVersion(), serviceName);
- } catch (AmbariException e) {
- throw new SystemException("Cannot enumerate services", e);
- }
- List<ServiceOsSpecific.Package> packagesForService = managementController.getPackagesForServiceHost(info,
- new HashMap<String, String>(), // Contents are ignored
- osFamily);
- for (ServiceOsSpecific.Package aPackage : packagesForService) {
- if (! aPackage.getSkipUpgrade()) {
- boolean blacklisted = false;
- for(String prefix : blacklistedPackagePrefixes) {
- if (aPackage.getName().startsWith(prefix)) {
- blacklisted = true;
- break;
- }
- }
- if (! blacklisted) {
- packages.add(aPackage);
- }
- }
- }
- }
-
- final String packageList = gson.toJson(packages);
- final String repoList = gson.toJson(repoInfo);
-
- Map<String, String> params = new HashMap<>();
- params.put("stack_id", stackId.getStackId());
- params.put("repository_version", repoVersion.getVersion());
- params.put("base_urls", repoList);
- params.put(KeyNames.PACKAGE_LIST, packageList);
- params.put(KeyNames.REPO_VERSION_ID, repoVersion.getId().toString());
-
- VersionDefinitionXml xml = null;
- try {
- xml = repoVersion.getRepositoryXml();
- } catch (Exception e) {
- throw new SystemException(String.format("Could not load xml from repo version %s",
- repoVersion.getVersion()));
- }
-
- if (null != xml && StringUtils.isNotBlank(xml.getPackageVersion(osFamily))) {
- params.put(KeyNames.PACKAGE_VERSION, xml.getPackageVersion(osFamily));
- }
+ Map<String, String> roleParams = repoVersionHelper.buildRoleParams(managementController, repoVersion,
+ osFamily, servicesOnHost);
// add host to this stage
RequestResourceFilter filter = new RequestResourceFilter(null, null,
Collections.singletonList(host.getHostName()));
- ActionExecutionContext actionContext = new ActionExecutionContext(cluster.getClusterName(),
- INSTALL_PACKAGES_ACTION, Collections.singletonList(filter), params);
-
- actionContext.setStackId(stackId);
+ ActionExecutionContext actionContext = new ActionExecutionContext(
+ cluster.getClusterName(), INSTALL_PACKAGES_ACTION,
+ Collections.singletonList(filter),
+ roleParams);
actionContext.setTimeout(Short.valueOf(configuration.getDefaultAgentTaskTimeout(true)));
+ repoVersionHelper.addCommandRepository(actionContext, osFamily, repoVersion, repoInfo);
+
return actionContext;
+
}
@@ -752,29 +645,6 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
return false;
}
-
- /**
- * Sets host versions states to not-required.
- *
- * Transitioning host version to NOT_REQUIRED state manually is ok since
- * other completion handlers set success/fail states correctly during heartbeat.
- * The number of NOT_REQUIRED components for a cluster will be low.
- */
- private void transitionHostVersionToNotRequired(Host host, Cluster cluster, RepositoryVersionEntity repoVersion) {
- LOG.info(String.format("Transitioning version %s on host %s directly to %s" +
- " without distributing bits to host since it has no versionable components.",
- repoVersion.getVersion(), host.getHostName(), RepositoryVersionState.NOT_REQUIRED));
-
- for (HostVersionEntity hve : host.getAllHostVersions()) {
- if (hve.getRepositoryVersion().equals(repoVersion)) {
- hve.setState(RepositoryVersionState.NOT_REQUIRED);
- hostVersionDAO.merge(hve);
- }
- }
-
- }
-
-
private RequestStageContainer createRequest() {
ActionManager actionManager = getManagementController().getActionManager();
@@ -785,131 +655,6 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
return requestStages;
}
- /**
- * The only appliance of this method is triggering Finalize during
- * manual Stack Upgrade
- */
- @Override
- public RequestStatus updateResourcesAuthorized(Request request, Predicate predicate)
- throws SystemException, UnsupportedPropertyException,
- NoSuchResourceException, NoSuchParentResourceException {
- try {
- Iterator<Map<String, Object>> iterator = request.getProperties().iterator();
- String clName;
- final String desiredRepoVersion;
- if (request.getProperties().size() != 1) {
- throw new UnsupportedOperationException("Multiple requests cannot be executed at the same time.");
- }
- Map<String, Object> propertyMap = iterator.next();
-
- Set<String> requiredProperties = new HashSet<>();
- requiredProperties.add(CLUSTER_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID);
- requiredProperties.add(CLUSTER_STACK_VERSION_REPOSITORY_VERSION_PROPERTY_ID);
- requiredProperties.add(CLUSTER_STACK_VERSION_STATE_PROPERTY_ID);
-
- for (String requiredProperty : requiredProperties) {
- if (!propertyMap.containsKey(requiredProperty)) {
- throw new IllegalArgumentException(
- String.format("The required property %s is not defined",
- requiredProperty));
- }
- }
-
- clName = (String) propertyMap.get(CLUSTER_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID);
- String desiredDisplayRepoVersion = (String) propertyMap.get(CLUSTER_STACK_VERSION_REPOSITORY_VERSION_PROPERTY_ID);
- RepositoryVersionEntity rve = repositoryVersionDAO.findByDisplayName(desiredDisplayRepoVersion);
- if (rve == null) {
- throw new IllegalArgumentException(
- String.format("Repository version with display name %s does not exist",
- desiredDisplayRepoVersion));
- }
- desiredRepoVersion = rve.getVersion();
- String newStateStr = (String) propertyMap.get(CLUSTER_STACK_VERSION_STATE_PROPERTY_ID);
-
- LOG.info("Initiating finalization for manual upgrade to version {} for cluster {}",
- desiredRepoVersion, clName);
-
- // First, set desired cluster stack version to enable cross-stack upgrade
- StackId stackId = rve.getStackId();
- Cluster cluster = getManagementController().getClusters().getCluster(clName);
- cluster.setDesiredStackVersion(stackId);
-
- String forceCurrent = (String) propertyMap.get(CLUSTER_STACK_VERSION_FORCE);
- boolean force = false;
- if (null != forceCurrent) {
- force = Boolean.parseBoolean(forceCurrent);
- }
-
- if (!force) {
- Map<String, String> args = new HashMap<>();
- if (newStateStr.equals(RepositoryVersionState.CURRENT.toString())) {
- // Finalize upgrade workflow
- args.put(FinalizeUpgradeAction.UPGRADE_DIRECTION_KEY, "upgrade");
- } else if (newStateStr.equals(RepositoryVersionState.INSTALLED.toString())) {
- // Finalize downgrade workflow
- args.put(FinalizeUpgradeAction.UPGRADE_DIRECTION_KEY, "downgrade");
- } else {
- throw new IllegalArgumentException(
- String.format("Invalid desired state %s. Should be either CURRENT or INSTALLED",
- newStateStr));
- }
-
- // Get a host name to populate the hostrolecommand table's hostEntity.
- String defaultHostName;
- ArrayList<Host> hosts = new ArrayList<>(cluster.getHosts());
- if (!hosts.isEmpty()) {
- Collections.sort(hosts);
- defaultHostName = hosts.get(0).getHostName();
- } else {
- throw new AmbariException("Could not find at least one host to set the command for");
- }
-
- args.put(FinalizeUpgradeAction.VERSION_KEY, desiredRepoVersion);
- args.put(FinalizeUpgradeAction.CLUSTER_NAME_KEY, clName);
-
- ExecutionCommand command = new ExecutionCommand();
- command.setCommandParams(args);
- command.setClusterName(clName);
- finalizeUpgradeAction.setExecutionCommand(command);
-
- HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(defaultHostName,
- Role.AMBARI_SERVER_ACTION, null, null);
- finalizeUpgradeAction.setHostRoleCommand(hostRoleCommand);
-
- CommandReport report = finalizeUpgradeAction.execute(null);
-
- LOG.info("Finalize output:");
- LOG.info("STDOUT: {}", report.getStdOut());
- LOG.info("STDERR: {}", report.getStdErr());
-
- if (report.getStatus().equals(HostRoleStatus.COMPLETED.toString())) {
- return getRequestStatus(null);
- } else {
- String detailedOutput = "Finalization failed. More details: \n" +
- "STDOUT: " + report.getStdOut() + "\n" +
- "STDERR: " + report.getStdErr();
- throw new SystemException(detailedOutput);
- }
- } else {
- // !!! revisit for PU
- // If forcing to become CURRENT, get the Cluster Version whose state is CURRENT and make sure that
- // the Host Version records for the same Repo Version are also marked as CURRENT.
- ClusterVersionEntity current = cluster.getCurrentClusterVersion();
-
- if (!current.getRepositoryVersion().equals(rve)) {
- updateVersionStates(current.getClusterId(), current.getRepositoryVersion(), rve);
- }
-
-
- return getRequestStatus(null);
- }
- } catch (AmbariException e) {
- throw new SystemException("Cannot perform request", e);
- } catch (InterruptedException e) {
- throw new SystemException("Cannot perform request", e);
- }
- }
-
@Override
public RequestStatus deleteResourcesAuthorized(Request request, Predicate predicate)
throws SystemException, UnsupportedPropertyException,
@@ -923,44 +668,6 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
}
/**
- * Gets all of the hosts in a cluster which are not in "maintenance mode" and
- * are considered to be healthy. In the case of stack distribution, a host
- * must be explicitely marked as being in maintenance mode for it to be
- * considered as unhealthy.
- *
- * @param cluster
- * the cluster (not {@code null}).
- * @return the list of hosts that are not in maintenance mode and are
- * elidgable to have a stack distributed to them.
- */
- private List<Host> getHostsForStackDistribution(Cluster cluster) {
- Collection<Host> hosts = cluster.getHosts();
- List<Host> healthyHosts = new ArrayList<>(hosts.size());
- for (Host host : hosts) {
- if (host.getMaintenanceState(cluster.getClusterId()) == MaintenanceState.OFF) {
- healthyHosts.add(host);
- }
- }
-
- return healthyHosts;
- }
-
- /**
- * Updates the version states. Transactional to ensure only one transaction for all updates
- * @param clusterId the cluster
- * @param current the repository that is current for the cluster
- * @param target the target repository
- */
- @Transactional
- protected void updateVersionStates(Long clusterId, RepositoryVersionEntity current,
- RepositoryVersionEntity target) {
-
- hostComponentStateDAO.updateVersions(target.getVersion());
- hostVersionDAO.updateVersions(target, current);
- clusterVersionDAO.updateVersions(clusterId, target, current);
- }
-
- /**
* Additional check over {@link VersionUtils#compareVersions(String, String)} that
* compares build numbers
*/
@@ -991,101 +698,4 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
}
- /**
- * Ensures that the stack tools and stack features are set on
- * {@link ConfigHelper#CLUSTER_ENV} for the stack of the repository being
- * distributed. This step ensures that the new repository can be distributed
- * with the correct tools.
- * <p/>
- * If the cluster's current stack name matches that of the new stack or the
- * new stack's tools are already added in the configuration, then this method
- * will not change anything.
- *
- * @param stackId
- * the stack of the repository being distributed (not {@code null}).
- * @param cluster
- * the cluster the new stack/repo is being distributed for (not
- * {@code null}).
- * @throws AmbariException
- */
- private void bootstrapStackTools(StackId stackId, Cluster cluster) throws AmbariException {
- // if the stack name is the same as the cluster's current stack name, then
- // there's no work to do
- if (StringUtils.equals(stackId.getStackName(),
- cluster.getCurrentStackVersion().getStackName())) {
- return;
- }
-
- ConfigHelper configHelper = configHelperProvider.get();
-
- // get the stack tools/features for the stack being distributed
- Map<String, Map<String, String>> defaultStackConfigurationsByType = configHelper.getDefaultProperties(
- stackId, cluster);
-
- Map<String, String> clusterEnvDefaults = defaultStackConfigurationsByType.get(
- ConfigHelper.CLUSTER_ENV);
-
- Config clusterEnv = cluster.getDesiredConfigByType(ConfigHelper.CLUSTER_ENV);
- Map<String, String> clusterEnvProperties = clusterEnv.getProperties();
-
- // the 3 properties we need to check and update
- Set<String> properties = Sets.newHashSet(ConfigHelper.CLUSTER_ENV_STACK_ROOT_PROPERTY,
- ConfigHelper.CLUSTER_ENV_STACK_TOOLS_PROPERTY,
- ConfigHelper.CLUSTER_ENV_STACK_FEATURES_PROPERTY);
-
- // any updates are stored here and merged into the existing config type
- Map<String, String> updatedProperties = new HashMap<>();
-
- for (String property : properties) {
- // determine if the property exists in the stack being distributed (it
- // kind of has to, but we'll be safe if it's not found)
- String newStackDefaultJson = clusterEnvDefaults.get(property);
- if (StringUtils.isBlank(newStackDefaultJson)) {
- continue;
- }
-
- String existingPropertyJson = clusterEnvProperties.get(property);
-
- // if the stack tools/features property doesn't exist, then just set the
- // one from the new stack
- if (StringUtils.isBlank(existingPropertyJson)) {
- updatedProperties.put(property, newStackDefaultJson);
- continue;
- }
-
- // now is the hard part - we need to check to see if the new stack tools
- // exists alongside the current tools and if it doesn't, then add the new
- // tools in
- final Map<String, Object> existingJson;
- final Map<String, ?> newStackJsonAsObject;
- if (StringUtils.equals(property, ConfigHelper.CLUSTER_ENV_STACK_ROOT_PROPERTY)) {
- existingJson = gson.<Map<String, Object>> fromJson(existingPropertyJson, Map.class);
- newStackJsonAsObject = gson.<Map<String, String>> fromJson(newStackDefaultJson, Map.class);
- } else {
- existingJson = gson.<Map<String, Object>> fromJson(existingPropertyJson,
- Map.class);
-
- newStackJsonAsObject = gson.<Map<String, Map<Object, Object>>> fromJson(newStackDefaultJson,
- Map.class);
- }
-
- if (existingJson.keySet().contains(stackId.getStackName())) {
- continue;
- }
-
- existingJson.put(stackId.getStackName(), newStackJsonAsObject.get(stackId.getStackName()));
-
- String newJson = gson.toJson(existingJson);
- updatedProperties.put(property, newJson);
- }
-
- if (!updatedProperties.isEmpty()) {
- AmbariManagementController amc = getManagementController();
- String serviceNote = String.format(
- "Adding stack tools for %s while distributing a new repository", stackId.toString());
-
- configHelper.updateConfigType(cluster, amc, clusterEnv.getType(), updatedProperties, null,
- amc.getAuthName(), serviceNote);
- }
- }
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/48f7fb22/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java
index 65cfcaa..b35903b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java
@@ -89,6 +89,9 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide
protected static final String COMPONENT_UNKNOWN_COUNT_PROPERTY_ID = "ServiceComponentInfo/unknown_count";
protected static final String COMPONENT_INSTALL_FAILED_COUNT_PROPERTY_ID = "ServiceComponentInfo/install_failed_count";
protected static final String COMPONENT_RECOVERY_ENABLED_ID = "ServiceComponentInfo/recovery_enabled";
+ protected static final String COMPONENT_DESIRED_STACK = "ServiceComponentInfo/desired_stack";
+ protected static final String COMPONENT_DESIRED_VERSION = "ServiceComponentInfo/desired_version";
+ protected static final String COMPONENT_REPOSITORY_STATE = "ServiceComponentInfo/repository_state";
private static final String TRUE = "true";
@@ -324,7 +327,7 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide
// Expected
}
- StackId stackId = s.getDesiredStackVersion();
+ StackId stackId = s.getDesiredStackId();
if (!ambariMetaInfo.isValidServiceComponent(stackId.getStackName(),
stackId.getStackVersion(), s.getName(), request.getComponentName())) {
throw new IllegalArgumentException("Unsupported or invalid component"
@@ -349,7 +352,7 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide
Cluster cluster = clusters.getCluster(request.getClusterName());
Service s = cluster.getService(request.getServiceName());
ServiceComponent sc = serviceComponentFactory.createNew(s, request.getComponentName());
- sc.setDesiredStackVersion(s.getDesiredStackVersion());
+ sc.setDesiredRepositoryVersion(s.getDesiredRepositoryVersion());
if (StringUtils.isNotEmpty(request.getDesiredState())) {
State state = State.valueOf(request.getDesiredState());
@@ -367,7 +370,7 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide
sc.setRecoveryEnabled(recoveryEnabled);
LOG.info("Component: {}, recovery_enabled from request: {}", request.getComponentName(), recoveryEnabled);
} else {
- StackId stackId = s.getDesiredStackVersion();
+ StackId stackId = s.getDesiredStackId();
ComponentInfo componentInfo = ambariMetaInfo.getComponent(stackId.getStackName(),
stackId.getStackVersion(), s.getName(), request.getComponentName());
if (componentInfo == null) {
http://git-wip-us.apache.org/repos/asf/ambari/blob/48f7fb22/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
index c8ec08b..aaf4656 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -50,7 +50,6 @@ import org.apache.ambari.server.controller.spi.SystemException;
import org.apache.ambari.server.controller.spi.UnsupportedPropertyException;
import org.apache.ambari.server.controller.utilities.PropertyHelper;
import org.apache.ambari.server.orm.dao.HostVersionDAO;
-import org.apache.ambari.server.orm.entities.HostVersionEntity;
import org.apache.ambari.server.security.authorization.AuthorizationException;
import org.apache.ambari.server.security.authorization.AuthorizationHelper;
import org.apache.ambari.server.security.authorization.ResourceType;
@@ -97,10 +96,12 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
= PropertyHelper.getPropertyId("HostRoles", "state");
public static final String HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID
= PropertyHelper.getPropertyId("HostRoles", "desired_state");
- public static final String HOST_COMPONENT_STACK_ID_PROPERTY_ID
- = PropertyHelper.getPropertyId("HostRoles", "stack_id");
+ public static final String HOST_COMPONENT_VERSION_PROPERTY_ID
+ = PropertyHelper.getPropertyId("HostRoles", "version");
public static final String HOST_COMPONENT_DESIRED_STACK_ID_PROPERTY_ID
= PropertyHelper.getPropertyId("HostRoles", "desired_stack_id");
+ public static final String HOST_COMPONENT_DESIRED_REPOSITORY_VERSION
+ = PropertyHelper.getPropertyId("HostRoles", "desired_repository_version");
public static final String HOST_COMPONENT_ACTUAL_CONFIGS_PROPERTY_ID
= PropertyHelper.getPropertyId("HostRoles", "actual_configs");
public static final String HOST_COMPONENT_STALE_CONFIGS_PROPERTY_ID
@@ -109,18 +110,16 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
= PropertyHelper.getPropertyId("HostRoles", "desired_admin_state");
public static final String HOST_COMPONENT_MAINTENANCE_STATE_PROPERTY_ID
= "HostRoles/maintenance_state";
- public static final String HOST_COMPONENT_HDP_VERSION_PROPERTY_ID
- = PropertyHelper.getPropertyId("HostRoles", "hdp_version");
public static final String HOST_COMPONENT_UPGRADE_STATE_PROPERTY_ID = "HostRoles/upgrade_state";
//Parameters from the predicate
private static final String QUERY_PARAMETERS_RUN_SMOKE_TEST_ID = "params/run_smoke_test";
private static Set<String> pkPropertyIds =
- new HashSet<String>(Arrays.asList(new String[]{
- HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID,
- HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID,
- HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID,
- HOST_COMPONENT_HOST_NAME_PROPERTY_ID}));
+ new HashSet<>(Arrays.asList(new String[]{
+ HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID,
+ HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID,
+ HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID,
+ HOST_COMPONENT_HOST_NAME_PROPERTY_ID}));
/**
* maintenance state helper
@@ -160,7 +159,7 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
ResourceAlreadyExistsException,
NoSuchParentResourceException {
- final Set<ServiceComponentHostRequest> requests = new HashSet<ServiceComponentHostRequest>();
+ final Set<ServiceComponentHostRequest> requests = new HashSet<>();
for (Map<String, Object> propertyMap : request.getProperties()) {
requests.add(changeRequest(propertyMap));
}
@@ -182,7 +181,7 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
public Set<Resource> getResources(Request request, Predicate predicate)
throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
- final Set<ServiceComponentHostRequest> requests = new HashSet<ServiceComponentHostRequest>();
+ final Set<ServiceComponentHostRequest> requests = new HashSet<>();
for (Map<String, Object> propertyMap : getPropertyMaps(predicate)) {
requests.add(getRequest(propertyMap));
@@ -194,7 +193,7 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
private Set<Resource> getResourcesForUpdate(Request request, Predicate predicate)
throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
- final Set<ServiceComponentHostRequest> requests = new HashSet<ServiceComponentHostRequest>();
+ final Set<ServiceComponentHostRequest> requests = new HashSet<>();
for (Map<String, Object> propertyMap : getPropertyMaps(predicate)) {
requests.add(getRequest(propertyMap));
@@ -207,7 +206,7 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
private Set<Resource> findResources(Request request, final Predicate predicate,
final Set<ServiceComponentHostRequest> requests)
throws SystemException, NoSuchResourceException, NoSuchParentResourceException {
- Set<Resource> resources = new HashSet<Resource>();
+ Set<Resource> resources = new HashSet<>();
Set<String> requestedIds = getRequestPropertyIds(request, predicate);
// We always need host_name for sch
requestedIds.add(HOST_COMPONENT_HOST_NAME_PROPERTY_ID);
@@ -237,8 +236,8 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
response.getLiveState(), requestedIds);
setResourceProperty(resource, HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID,
response.getDesiredState(), requestedIds);
- setResourceProperty(resource, HOST_COMPONENT_STACK_ID_PROPERTY_ID,
- response.getStackVersion(), requestedIds);
+ setResourceProperty(resource, HOST_COMPONENT_VERSION_PROPERTY_ID, response.getVersion(),
+ requestedIds);
setResourceProperty(resource, HOST_COMPONENT_DESIRED_STACK_ID_PROPERTY_ID,
response.getDesiredStackVersion(), requestedIds);
setResourceProperty(resource, HOST_COMPONENT_ACTUAL_CONFIGS_PROPERTY_ID,
@@ -247,15 +246,8 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
response.isStaleConfig(), requestedIds);
setResourceProperty(resource, HOST_COMPONENT_UPGRADE_STATE_PROPERTY_ID,
response.getUpgradeState(), requestedIds);
-
- if (requestedIds.contains(HOST_COMPONENT_HDP_VERSION_PROPERTY_ID)) {
- HostVersionEntity versionEntity = hostVersionDAO.
- findByHostAndStateCurrent(response.getClusterName(), response.getHostname());
- if (versionEntity != null) {
- setResourceProperty(resource, HOST_COMPONENT_HDP_VERSION_PROPERTY_ID,
- versionEntity.getRepositoryVersion().getDisplayName(), requestedIds);
- }
- }
+ setResourceProperty(resource, HOST_COMPONENT_DESIRED_REPOSITORY_VERSION,
+ response.getDesiredRepositoryVersion(), requestedIds);
if (response.getAdminState() != null) {
setResourceProperty(resource, HOST_COMPONENT_DESIRED_ADMIN_STATE_PROPERTY_ID,
@@ -322,7 +314,7 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
if (propertyIds.isEmpty()) {
return propertyIds;
}
- Set<String> unsupportedProperties = new HashSet<String>();
+ Set<String> unsupportedProperties = new HashSet<>();
for (String propertyId : propertyIds) {
if (!propertyId.equals("config")) {
@@ -341,7 +333,7 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
RequestStageContainer requestStages;
//for (String host : hosts) {
- Map<String, Object> installProperties = new HashMap<String, Object>();
+ Map<String, Object> installProperties = new HashMap<>();
installProperties.put(HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID, "INSTALLED");
Map<String, String> requestInfo = new HashMap<>();
@@ -402,14 +394,14 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
RequestStageContainer requestStages;
try {
- Map<String, Object> startProperties = new HashMap<String, Object>();
+ Map<String, Object> startProperties = new HashMap<>();
startProperties.put(HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID, "STARTED");
Request startRequest = PropertyHelper.getUpdateRequest(startProperties, requestInfo);
// Important to query against desired_state as this has been updated when install stage was created
// If I query against state, then the getRequest compares predicate prop against desired_state and then when the predicate
// is later applied explicitly, it gets compared to live_state. Since live_state == INSTALLED == INIT at this point and
// desired_state == INSTALLED, we will always get 0 matches since both comparisons can't be true :(
- Predicate installedStatePredicate = new EqualsPredicate<String>(HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID, "INSTALLED");
+ Predicate installedStatePredicate = new EqualsPredicate<>(HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID, "INSTALLED");
Predicate notClientPredicate = new NotPredicate(new ClientComponentPredicate());
Predicate clusterAndClientPredicate = new AndPredicate(clusterPredicate, notClientPredicate);
Predicate hostAndStatePredicate = new AndPredicate(installedStatePredicate, hostPredicate);
@@ -422,10 +414,10 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
} else {
// any INSTALL_ONLY components should not be started
List<Predicate> listOfComponentPredicates =
- new ArrayList<Predicate>();
+ new ArrayList<>();
for (String installOnlyComponent : installOnlyComponents) {
- Predicate componentNameEquals = new EqualsPredicate<String>(HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID, installOnlyComponent);
+ Predicate componentNameEquals = new EqualsPredicate<>(HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID, installOnlyComponent);
// create predicate to filter out the install only component
listOfComponentPredicates.add(new NotPredicate(componentNameEquals));
}
@@ -482,11 +474,11 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
Clusters clusters = getManagementController().getClusters();
- Map<String, Map<State, List<ServiceComponentHost>>> changedScHosts = new HashMap<String, Map<State, List<ServiceComponentHost>>>();
- Collection<ServiceComponentHost> ignoredScHosts = new ArrayList<ServiceComponentHost>();
- Set<String> clusterNames = new HashSet<String>();
- Map<String, Map<String, Map<String, Set<String>>>> requestClusters = new HashMap<String, Map<String, Map<String, Set<String>>>>();
- Map<ServiceComponentHost, State> directTransitionScHosts = new HashMap<ServiceComponentHost, State>();
+ Map<String, Map<State, List<ServiceComponentHost>>> changedScHosts = new HashMap<>();
+ Collection<ServiceComponentHost> ignoredScHosts = new ArrayList<>();
+ Set<String> clusterNames = new HashSet<>();
+ Map<String, Map<String, Map<String, Set<String>>>> requestClusters = new HashMap<>();
+ Map<ServiceComponentHost, State> directTransitionScHosts = new HashMap<>();
Resource.Type reqOpLvl = determineOperationLevel(requestProperties);
@@ -529,19 +521,19 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
// maps of cluster->services, services->components, components->hosts
Map<String, Map<String, Set<String>>> clusterServices = requestClusters.get(request.getClusterName());
if (clusterServices == null) {
- clusterServices = new HashMap<String, Map<String, Set<String>>>();
+ clusterServices = new HashMap<>();
requestClusters.put(request.getClusterName(), clusterServices);
}
Map<String, Set<String>> serviceComponents = clusterServices.get(request.getServiceName());
if (serviceComponents == null) {
- serviceComponents = new HashMap<String, Set<String>>();
+ serviceComponents = new HashMap<>();
clusterServices.put(request.getServiceName(), serviceComponents);
}
Set<String> componentHosts = serviceComponents.get(request.getComponentName());
if (componentHosts == null) {
- componentHosts = new HashSet<String>();
+ componentHosts = new HashSet<>();
serviceComponents.put(request.getComponentName(), componentHosts) ;
}
@@ -562,7 +554,7 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
// throw exception if desired state isn't a valid desired state (static check)
if (!newState.isValidDesiredState()) {
throw new IllegalArgumentException("Invalid arguments, invalid"
- + " desired state, desiredState=" + newState.toString());
+ + " desired state, desiredState=" + newState);
}
}
@@ -684,7 +676,6 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
(String) properties.get(HOST_COMPONENT_HOST_NAME_PROPERTY_ID),
(String) properties.get(HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID));
serviceComponentHostRequest.setState((String) properties.get(HOST_COMPONENT_STATE_PROPERTY_ID));
- serviceComponentHostRequest.setDesiredStackId((String) properties.get(HOST_COMPONENT_STACK_ID_PROPERTY_ID));
if (properties.get(HOST_COMPONENT_STALE_CONFIGS_PROPERTY_ID) != null) {
serviceComponentHostRequest.setStaleConfig(
properties.get(HOST_COMPONENT_STALE_CONFIGS_PROPERTY_ID).toString().toLowerCase());
@@ -724,8 +715,6 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
if (properties.get(HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID) != null) {
serviceComponentHostRequest.setDesiredState((String)properties.get(HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID));
}
- serviceComponentHostRequest.setDesiredStackId(
- (String) properties.get(HOST_COMPONENT_STACK_ID_PROPERTY_ID));
if (properties.get(HOST_COMPONENT_STALE_CONFIGS_PROPERTY_ID) != null) {
serviceComponentHostRequest.setStaleConfig(
properties.get(HOST_COMPONENT_STALE_CONFIGS_PROPERTY_ID).toString().toLowerCase());
@@ -764,7 +753,7 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
NoSuchResourceException,
NoSuchParentResourceException {
- final Set<ServiceComponentHostRequest> requests = new HashSet<ServiceComponentHostRequest>();
+ final Set<ServiceComponentHostRequest> requests = new HashSet<>();
final boolean runSmokeTest = "true".equals(getQueryParameterValue(
QUERY_PARAMETERS_RUN_SMOKE_TEST_ID, predicate));
@@ -782,7 +771,7 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
//todo: and then this predicate evaluation should always be performed and the
//todo: temporary performQueryEvaluation flag hack should be removed.
if (! performQueryEvaluation || predicate.evaluate(queryResource)) {
- Map<String, Object> updateRequestProperties = new HashMap<String, Object>();
+ Map<String, Object> updateRequestProperties = new HashMap<>();
// add props from query resource
updateRequestProperties.putAll(PropertyHelper.getProperties(queryResource));