You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by al...@apache.org on 2015/12/02 19:45:48 UTC
ambari git commit: AMBARI-14121. Express Upgrade authors configs as
_anonymous instead of auth user (alejandro)
Repository: ambari
Updated Branches:
refs/heads/trunk 4b57a0f8a -> e9ae39cb6
AMBARI-14121. Express Upgrade authors configs as _anonymous instead of auth user (alejandro)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e9ae39cb
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e9ae39cb
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e9ae39cb
Branch: refs/heads/trunk
Commit: e9ae39cb6b33024c9829c09402f0bc1bdb03a70c
Parents: 4b57a0f
Author: Alejandro Fernandez <af...@hortonworks.com>
Authored: Mon Nov 30 16:00:09 2015 -0800
Committer: Alejandro Fernandez <af...@hortonworks.com>
Committed: Wed Dec 2 10:38:28 2015 -0800
----------------------------------------------------------------------
.../internal/UpgradeResourceProvider.java | 14 +-
.../upgrades/UpdateDesiredStackAction.java | 51 +-
.../AmbariManagementControllerTest.java | 2 +-
.../internal/UpgradeResourceProviderTest.java | 2 +-
.../upgrades/UpgradeActionTest.java | 145 ++-
.../HDP/2.1.1/upgrades/config-upgrade.xml | 9 +
.../upgrades/upgrade_nonrolling_new_stack.xml | 974 +++++++++++++++++++
7 files changed, 1162 insertions(+), 35 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/e9ae39cb/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
index 748dbbe..2b53ab5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
@@ -672,6 +672,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
Cluster cluster = getManagementController().getClusters().getCluster(clusterName);
ConfigHelper configHelper = getManagementController().getConfigHelper();
+ String userName = getManagementController().getAuthName();
// the version being upgraded or downgraded to (ie 2.2.1.0-1234)
final String version = (String) requestMap.get(UPGRADE_VERSION);
@@ -765,7 +766,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
if (pack.getType() == UpgradeType.ROLLING) {
// Desired configs must be set before creating stages because the config tag
// names are read and set on the command for filling in later
- applyStackAndProcessConfigurations(targetStackId.getStackName(), cluster, version, direction, pack);
+ applyStackAndProcessConfigurations(targetStackId.getStackName(), cluster, version, direction, pack, userName);
}
// Resolve or build a proper config upgrade pack
@@ -903,9 +904,11 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
* @param upgradePack
* upgrade pack used for upgrade or downgrade. This is needed to determine
* which services are effected.
+ * @param userName
+ * username performing the action
* @throws AmbariException
*/
- public void applyStackAndProcessConfigurations(String stackName, Cluster cluster, String version, Direction direction, UpgradePack upgradePack)
+ public void applyStackAndProcessConfigurations(String stackName, Cluster cluster, String version, Direction direction, UpgradePack upgradePack, String userName)
throws AmbariException {
RepositoryVersionEntity targetRve = s_repoVersionDAO.findByStackNameAndVersion(stackName, version);
if (null == targetRve) {
@@ -913,6 +916,10 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
return;
}
+ if (null == userName) {
+ userName = getManagementController().getAuthName();
+ }
+
// if the current and target stacks are the same (ie HDP 2.2.0.0 -> 2.2.1.0)
// then we should never do anything with configs on either upgrade or
// downgrade; however if we are going across stacks, we have to do the stack
@@ -937,7 +944,6 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
Map<String, Map<String, String>> newConfigurationsByType = null;
ConfigHelper configHelper = getManagementController().getConfigHelper();
- // TODO AMBARI-12698, handle jumping across several stacks
if (direction == Direction.UPGRADE) {
// populate a map of default configurations for the old stack (this is
// used when determining if a property has been customized and should be
@@ -1074,7 +1080,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
// !!! configs must be created after setting the stack version
if (null != newConfigurationsByType) {
configHelper.createConfigTypes(cluster, getManagementController(), newConfigurationsByType,
- getManagementController().getAuthName(), "Configuration created for Upgrade");
+ userName, "Configuration created for Upgrade");
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/e9ae39cb/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpdateDesiredStackAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpdateDesiredStackAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpdateDesiredStackAction.java
index 44b13de..3d304ea 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpdateDesiredStackAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpdateDesiredStackAction.java
@@ -27,9 +27,11 @@ import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.actionmanager.HostRoleStatus;
import org.apache.ambari.server.agent.CommandReport;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.configuration.Configuration;
import org.apache.ambari.server.controller.AmbariServer;
import org.apache.ambari.server.controller.internal.UpgradeResourceProvider;
import org.apache.ambari.server.serveraction.AbstractServerAction;
+import org.apache.ambari.server.serveraction.ServerAction;
import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.StackId;
@@ -38,6 +40,8 @@ import org.apache.ambari.server.state.stack.UpgradePack;
import org.apache.ambari.server.state.stack.upgrade.Direction;
import com.google.inject.Inject;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Action that represents updating the Desired Stack Id during the middle of a stack upgrade (typically NonRolling).
@@ -47,24 +51,29 @@ import com.google.inject.Inject;
*/
public class UpdateDesiredStackAction extends AbstractServerAction {
- private static final String COMMAND_PARAM_VERSION = VERSION;
- private static final String COMMAND_DOWNGRADE_FROM_VERSION = "downgrade_from_version";
- private static final String COMMAND_PARAM_DIRECTION = "upgrade_direction";
- private static final String COMMAND_PARAM_UPGRADE_PACK = "upgrade_pack";
+ /**
+ * Logger.
+ */
+ private static final Logger LOG = LoggerFactory.getLogger(UpdateDesiredStackAction.class);
+
+ public static final String COMMAND_PARAM_VERSION = VERSION;
+ public static final String COMMAND_DOWNGRADE_FROM_VERSION = "downgrade_from_version";
+ public static final String COMMAND_PARAM_DIRECTION = "upgrade_direction";
+ public static final String COMMAND_PARAM_UPGRADE_PACK = "upgrade_pack";
/**
* The original "current" stack of the cluster before the upgrade started.
* This is the same regardless of whether the current direction is
* {@link Direction#UPGRADE} or {@link Direction#DOWNGRADE}.
*/
- private static final String COMMAND_PARAM_ORIGINAL_STACK = "original_stack";
+ public static final String COMMAND_PARAM_ORIGINAL_STACK = "original_stack";
/**
* The target upgrade stack before the upgrade started. This is the same
* regardless of whether the current direction is {@link Direction#UPGRADE} or
* {@link Direction#DOWNGRADE}.
*/
- private static final String COMMAND_PARAM_TARGET_STACK = "target_stack";
+ public static final String COMMAND_PARAM_TARGET_STACK = "target_stack";
/**
* The Cluster that this ServerAction implementation is executing on.
@@ -75,6 +84,12 @@ public class UpdateDesiredStackAction extends AbstractServerAction {
@Inject
private AmbariMetaInfo ambariMetaInfo;
+ /**
+ * The Ambari configuration.
+ */
+ @Inject
+ private Configuration m_configuration;
+
@Override
public CommandReport execute(ConcurrentMap<String, Object> requestSharedDataContext)
throws AmbariException, InterruptedException {
@@ -92,20 +107,34 @@ public class UpdateDesiredStackAction extends AbstractServerAction {
String clusterName = getExecutionCommand().getClusterName();
UpgradePack upgradePack = ambariMetaInfo.getUpgradePacks(originalStackId.getStackName(), originalStackId.getStackVersion()).get(upgradePackName);
- return updateDesiredStack(clusterName, originalStackId, targetStackId, version, direction, upgradePack);
+ Map<String, String> roleParams = getExecutionCommand().getRoleParams();
+
+ // Make a best attempt at setting the username
+ String userName;
+ if (roleParams != null && roleParams.containsKey(ServerAction.ACTION_USER_NAME)) {
+ userName = roleParams.get(ServerAction.ACTION_USER_NAME);
+ } else {
+ userName = m_configuration.getAnonymousAuditName();
+ LOG.warn(String.format("Did not receive role parameter %s, will save configs using anonymous username %s", ServerAction.ACTION_USER_NAME, userName));
+ }
+
+ return updateDesiredStack(clusterName, originalStackId, targetStackId, version, direction, upgradePack, userName);
}
/**
* Set the cluster's Desired Stack Id during an upgrade.
*
* @param clusterName the name of the cluster the action is meant for
- * @paran originalStackId the stack Id of the cluster before the upgrade.
- * @paran targetStackId the stack Id that was desired for this upgrade.
+ * @param originalStackId the stack Id of the cluster before the upgrade.
+ * @param targetStackId the stack Id that was desired for this upgrade.
+ * @param direction direction, either upgrade or downgrade
+ * @param upgradePack Upgrade Pack to use
+ * @param userName username performing the action
* @return the command report to return
*/
private CommandReport updateDesiredStack(
String clusterName, StackId originalStackId, StackId targetStackId,
- String version, Direction direction, UpgradePack upgradePack)
+ String version, Direction direction, UpgradePack upgradePack, String userName)
throws AmbariException, InterruptedException {
StringBuilder out = new StringBuilder();
StringBuilder err = new StringBuilder();
@@ -148,7 +177,7 @@ public class UpdateDesiredStackAction extends AbstractServerAction {
// Create Create new configurations that are a merge between the current stack and the desired stack
// Also updates the desired stack version.
UpgradeResourceProvider upgradeResourceProvider = new UpgradeResourceProvider(AmbariServer.getController());
- upgradeResourceProvider.applyStackAndProcessConfigurations(targetStackId.getStackName(), cluster, version, direction, upgradePack);
+ upgradeResourceProvider.applyStackAndProcessConfigurations(targetStackId.getStackName(), cluster, version, direction, upgradePack, userName);
String message = String.format("Success! Set cluster's %s Desired Stack Id to %s.\n", clusterName, targetStackId.getStackId());
out.append(message);
http://git-wip-us.apache.org/repos/asf/ambari/blob/e9ae39cb/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index 3bf6cad..05d5aff 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@ -7240,7 +7240,7 @@ public class AmbariManagementControllerTest {
Assert.assertEquals(1, responsesWithParams.size());
StackVersionResponse resp = responsesWithParams.iterator().next();
assertNotNull(resp.getUpgradePacks());
- assertEquals(7, resp.getUpgradePacks().size());
+ assertEquals(8, resp.getUpgradePacks().size());
assertTrue(resp.getUpgradePacks().contains("upgrade_test"));
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/e9ae39cb/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
index 93360bd..6a53076 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
@@ -1021,7 +1021,7 @@ public class UpgradeResourceProviderTest {
Map<String, UpgradePack> upgradePacks = ambariMetaInfo.getUpgradePacks("HDP", "2.1.1");
UpgradePack upgrade = upgradePacks.get("upgrade_to_new_stack");
- upgradeResourceProvider.applyStackAndProcessConfigurations(stack211.getStackName(), cluster, "2.2.0.0", Direction.UPGRADE, upgrade);
+ upgradeResourceProvider.applyStackAndProcessConfigurations(stack211.getStackName(), cluster, "2.2.0.0", Direction.UPGRADE, upgrade, "admin");
Map<String, Map<String, String>> expectedConfigurations = expectedConfigurationsCapture.getValue();
Map<String, String> expectedFooType = expectedConfigurations.get("foo-site");
http://git-wip-us.apache.org/repos/asf/ambari/blob/e9ae39cb/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
index 38370c7..520959c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
+import java.lang.reflect.Field;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
@@ -39,6 +40,9 @@ import org.apache.ambari.server.agent.CommandReport;
import org.apache.ambari.server.agent.ExecutionCommand;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
import org.apache.ambari.server.controller.AmbariCustomCommandExecutionHelper;
+import org.apache.ambari.server.controller.AmbariManagementController;
+import org.apache.ambari.server.controller.AmbariServer;
+import org.apache.ambari.server.controller.ServiceConfigVersionResponse;
import org.apache.ambari.server.orm.GuiceJpaInitializer;
import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
import org.apache.ambari.server.orm.OrmTestHelper;
@@ -51,6 +55,7 @@ import org.apache.ambari.server.orm.entities.ClusterVersionEntity;
import org.apache.ambari.server.orm.entities.HostVersionEntity;
import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
import org.apache.ambari.server.orm.entities.StackEntity;
+import org.apache.ambari.server.serveraction.ServerAction;
import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.Config;
@@ -66,6 +71,8 @@ import org.apache.ambari.server.state.ServiceComponentHostFactory;
import org.apache.ambari.server.state.ServiceFactory;
import org.apache.ambari.server.state.StackId;
import org.apache.ambari.server.state.State;
+import org.apache.ambari.server.state.stack.UpgradePack;
+import org.apache.ambari.server.state.stack.upgrade.Direction;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
@@ -88,7 +95,7 @@ public class UpgradeActionTest {
private static final String HDP_2_1_1_0 = "2.1.1.0-1";
private static final String HDP_2_1_1_1 = "2.1.1.1-2";
- private static final String HDP_2_2_1_0 = "2.2.0.1-3";
+ private static final String HDP_2_2_0_1 = "2.2.0.1-3";
private static final StackId HDP_21_STACK = new StackId("HDP-2.1.1");
private static final StackId HDP_22_STACK = new StackId("HDP-2.2.0");
@@ -97,6 +104,10 @@ public class UpgradeActionTest {
private Injector m_injector;
+ private AmbariManagementController amc;
+
+ private AmbariMetaInfo ambariMetaInfo;
+
@Inject
private OrmTestHelper m_helper;
@@ -104,6 +115,9 @@ public class UpgradeActionTest {
private RepositoryVersionDAO repoVersionDAO;
@Inject
+ private Clusters clusters;
+
+ @Inject
private ClusterVersionDAO clusterVersionDAO;
@Inject
@@ -130,6 +144,14 @@ public class UpgradeActionTest {
m_injector.getInstance(GuiceJpaInitializer.class);
m_injector.injectMembers(this);
m_injector.getInstance(UnitOfWork.class).begin();
+
+ // Initialize AmbariManagementController
+ amc = m_injector.getInstance(AmbariManagementController.class);
+ ambariMetaInfo = m_injector.getInstance(AmbariMetaInfo.class);
+
+ Field field = AmbariServer.class.getDeclaredField("clusterController");
+ field.setAccessible(true);
+ field.set(null, amc);
}
@After
@@ -142,7 +164,6 @@ public class UpgradeActionTest {
String clusterName = "c1";
String hostName = "h1";
- Clusters clusters = m_injector.getInstance(Clusters.class);
clusters.addCluster(clusterName, sourceStack);
Cluster c = clusters.getCluster(clusterName);
@@ -309,6 +330,94 @@ public class UpgradeActionTest {
hostVersionDAO.create(entity);
}
+ /***
+ * During an Express Upgrade that crosses a stack version, Ambari calls UpdateDesiredStackAction
+ * in order to change the stack and apply configs.
+ * The configs that are applied must be saved with the username that is passed in the role params.
+ */
+ @Test
+ public void testExpressUpgradeUpdateDesiredStackAction() throws Exception {
+ StackId sourceStack = HDP_21_STACK;
+ StackId targetStack = HDP_22_STACK;
+ String sourceRepo = HDP_2_1_1_0;
+ String targetRepo = HDP_2_2_0_1;
+
+ // Must be a NON_ROLLING upgrade that jumps stacks in order for it to apply config changes.
+ // That upgrade pack has changes for ZK and NameNode.
+ String upgradePackName = "upgrade_nonrolling_new_stack";
+
+ AmbariMetaInfo metaInfo = m_injector.getInstance(AmbariMetaInfo.class);
+
+ Map<String, UpgradePack> packs = metaInfo.getUpgradePacks(sourceStack.getStackName(), sourceStack.getStackVersion());
+ Assert.assertTrue(packs.containsKey(upgradePackName));
+
+ makeCrossStackUpgradeCluster(sourceStack, sourceRepo, targetStack, targetRepo);
+
+ RepositoryVersionEntity targetRve = repoVersionDAO.findByStackNameAndVersion("HDP", targetRepo);
+ Assert.assertNotNull(targetRve);
+
+ Cluster cluster = clusters.getCluster("c1");
+
+ // Install ZK and HDFS with some components
+ Service zk = installService(cluster, "ZOOKEEPER");
+ addServiceComponent(cluster, zk, "ZOOKEEPER_SERVER");
+ addServiceComponent(cluster, zk, "ZOOKEEPER_CLIENT");
+ createNewServiceComponentHost(cluster, "ZOOKEEPER", "ZOOKEEPER_SERVER", "h1");
+ createNewServiceComponentHost(cluster, "ZOOKEEPER", "ZOOKEEPER_CLIENT", "h1");
+
+ Service hdfs = installService(cluster, "HDFS");
+ addServiceComponent(cluster, hdfs, "NAMENODE");
+ addServiceComponent(cluster, hdfs, "DATANODE");
+ createNewServiceComponentHost(cluster, "HDFS", "NAMENODE", "h1");
+ createNewServiceComponentHost(cluster, "HDFS", "DATANODE", "h1");
+
+ // Create some configs
+ createConfigs(cluster);
+ Collection<Config> configs = cluster.getAllConfigs();
+ Assert.assertFalse(configs.isEmpty());
+
+ Map<String, String> commandParams = new HashMap<String, String>();
+ commandParams.put(UpdateDesiredStackAction.COMMAND_PARAM_ORIGINAL_STACK, sourceStack.getStackId());
+ commandParams.put(UpdateDesiredStackAction.COMMAND_PARAM_TARGET_STACK, targetStack.getStackId());
+ commandParams.put(UpdateDesiredStackAction.COMMAND_PARAM_DIRECTION, Direction.UPGRADE.toString());
+ commandParams.put(UpdateDesiredStackAction.COMMAND_PARAM_VERSION, targetRepo);
+ commandParams.put(UpdateDesiredStackAction.COMMAND_PARAM_UPGRADE_PACK, upgradePackName);
+
+ ExecutionCommand executionCommand = new ExecutionCommand();
+ executionCommand.setCommandParams(commandParams);
+ Map<String, String> roleParams = new HashMap<>();
+
+ // User that is performing the config changes
+ String userName = "admin";
+ roleParams.put(ServerAction.ACTION_USER_NAME, userName);
+ executionCommand.setRoleParams(roleParams);
+ executionCommand.setClusterName("c1");
+
+ HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null);
+ hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand));
+
+ // Call the action to change the desired stack and apply the configs from the Config Pack called by the Upgrade Pack.
+ UpdateDesiredStackAction action = m_injector.getInstance(UpdateDesiredStackAction.class);
+ action.setExecutionCommand(executionCommand);
+ action.setHostRoleCommand(hostRoleCommand);
+
+ CommandReport report = action.execute(null);
+ assertNotNull(report);
+ assertEquals(HostRoleStatus.COMPLETED.name(), report.getStatus());
+
+ List<ServiceConfigVersionResponse> configVersionsAfter = cluster.getServiceConfigVersions();
+ Assert.assertFalse(configVersionsAfter.isEmpty());
+ boolean atLeastOneCreated = false;
+ for (ServiceConfigVersionResponse configResponse : configVersionsAfter) {
+ if (configResponse.getIsCurrent() && configResponse.getVersion() > 1L && configResponse.getUserName().equals(userName)) {
+ atLeastOneCreated = true;
+ break;
+ }
+ }
+ // The user should have created at least one version.
+ Assert.assertTrue(atLeastOneCreated);
+ }
+
@Test
public void testFinalizeDowngrade() throws Exception {
StackId sourceStack = HDP_21_STACK;
@@ -366,7 +475,6 @@ public class UpgradeActionTest {
// Verify the repo before calling Finalize
AmbariMetaInfo metaInfo = m_injector.getInstance(AmbariMetaInfo.class);
AmbariCustomCommandExecutionHelper helper = m_injector.getInstance(AmbariCustomCommandExecutionHelper.class);
- Clusters clusters = m_injector.getInstance(Clusters.class);
Host host = clusters.getHost("h1");
Cluster cluster = clusters.getCluster("c1");
@@ -430,7 +538,6 @@ public class UpgradeActionTest {
// Verify the repo before calling Finalize
AmbariMetaInfo metaInfo = m_injector.getInstance(AmbariMetaInfo.class);
AmbariCustomCommandExecutionHelper helper = m_injector.getInstance(AmbariCustomCommandExecutionHelper.class);
- Clusters clusters = m_injector.getInstance(Clusters.class);
Host host = clusters.getHost("h1");
Cluster cluster = clusters.getCluster("c1");
@@ -478,11 +585,10 @@ public class UpgradeActionTest {
StackId sourceStack = HDP_21_STACK;
StackId targetStack = HDP_22_STACK;
String sourceRepo = HDP_2_1_1_0;
- String targetRepo = HDP_2_2_1_0;
+ String targetRepo = HDP_2_2_0_1;
makeCrossStackUpgradeCluster(sourceStack, sourceRepo, targetStack, targetRepo);
- Clusters clusters = m_injector.getInstance(Clusters.class);
Cluster cluster = clusters.getCluster("c1");
// setup the cluster for the upgrade across stacks
@@ -531,11 +637,9 @@ public class UpgradeActionTest {
StackId sourceStack = HDP_21_STACK;
StackId targetStack = HDP_22_STACK;
String sourceRepo = HDP_2_1_1_0;
- String targetRepo = HDP_2_2_1_0;
+ String targetRepo = HDP_2_2_0_1;
makeCrossStackUpgradeCluster(sourceStack, sourceRepo, targetStack, targetRepo);
-
- Clusters clusters = m_injector.getInstance(Clusters.class);
Cluster cluster = clusters.getCluster("c1");
// install HDFS with some components
@@ -559,7 +663,7 @@ public class UpgradeActionTest {
// verify we have configs in both HDP stacks
cluster = clusters.getCluster("c1");
Collection<Config> configs = cluster.getAllConfigs();
- assertEquals(6, configs.size());
+ assertEquals(8, configs.size());
Map<String, String> commandParams = new HashMap<String, String>();
commandParams.put(FinalizeUpgradeAction.UPGRADE_DIRECTION_KEY, "downgrade");
@@ -602,7 +706,7 @@ public class UpgradeActionTest {
// verify we have configs in only 1 stack
cluster = clusters.getCluster("c1");
configs = cluster.getAllConfigs();
- assertEquals(3, configs.size());
+ assertEquals(4, configs.size());
hosts = dao.findByClusterStackAndVersion("c1", targetStack, targetRepo);
assertFalse(hosts.isEmpty());
@@ -623,11 +727,10 @@ public class UpgradeActionTest {
StackId sourceStack = HDP_21_STACK;
StackId targetStack = HDP_22_STACK;
String sourceRepo = HDP_2_1_1_0;
- String targetRepo = HDP_2_2_1_0;
+ String targetRepo = HDP_2_2_0_1;
makeCrossStackUpgradeCluster(sourceStack, sourceRepo, targetStack, targetRepo);
- Clusters clusters = m_injector.getInstance(Clusters.class);
Cluster cluster = clusters.getCluster("c1");
Service service = installService(cluster, "HDFS");
@@ -752,18 +855,24 @@ public class UpgradeActionTest {
properties.put("a", "a1");
properties.put("b", "b1");
- Config c1 = new ConfigImpl(cluster, "hdfs-site", properties, propertiesAttributes, m_injector);
- properties.put("c", "c1");
- properties.put("d", "d1");
+ Config c1 = new ConfigImpl(cluster, "zookeeper-env", properties, propertiesAttributes, m_injector);
+ properties.put("zookeeper_a", "value_1");
+ properties.put("zookeeper_b", "value_2");
+
+ Config c2 = new ConfigImpl(cluster, "hdfs-site", properties, propertiesAttributes, m_injector);
+ properties.put("hdfs_a", "value_3");
+ properties.put("hdfs_b", "value_4");
- Config c2 = new ConfigImpl(cluster, "core-site", properties, propertiesAttributes, m_injector);
- Config c3 = new ConfigImpl(cluster, "foo-site", properties, propertiesAttributes, m_injector);
+ Config c3 = new ConfigImpl(cluster, "core-site", properties, propertiesAttributes, m_injector);
+ Config c4 = new ConfigImpl(cluster, "foo-site", properties, propertiesAttributes, m_injector);
cluster.addConfig(c1);
cluster.addConfig(c2);
cluster.addConfig(c3);
+ cluster.addConfig(c4);
c1.persist();
c2.persist();
c3.persist();
+ c4.persist();
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/e9ae39cb/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/config-upgrade.xml b/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/config-upgrade.xml
index 6a2986e..f345d12 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/config-upgrade.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/config-upgrade.xml
@@ -24,6 +24,11 @@
<changes>
<definition xsi:type="configure" id="hdp_2_1_1_zk_post_upgrade">
</definition>
+
+ <definition xsi:type="configure" id="hdp_2_1_1_zookeeper_new_config_type">
+ <type>zookeeper-newconfig</type>
+ <set key="fooKey" value="fooValue"/>
+ </definition>
</changes>
</component>
</service>
@@ -40,6 +45,10 @@
<key>myproperty</key>
<value>mynewvalue</value>
</definition>
+ <definition xsi:type="configure" id="hdp_2_1_1_hdfs_new_config_type">
+ <type>hdfs-newconfig</type>
+ <set key="fooKey" value="fooValue"/>
+ </definition>
</changes>
</component>
</service>
http://git-wip-us.apache.org/repos/asf/ambari/blob/e9ae39cb/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_nonrolling_new_stack.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_nonrolling_new_stack.xml b/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_nonrolling_new_stack.xml
new file mode 100644
index 0000000..ad6174c
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_nonrolling_new_stack.xml
@@ -0,0 +1,974 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<upgrade xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+ <target>2.2.*.*</target>
+ <target-stack>HDP-2.2.0.1</target-stack>
+ <type>NON_ROLLING</type>
+
+ <prerequisite-checks>
+ <configuration>
+ <!-- Configuration properties for all pre-reqs including required pre-reqs -->
+ <check-properties name="org.apache.ambari.server.checks.HiveDynamicServiceDiscoveryCheck">
+ <property name="min-failure-stack-version">HDP-2.3.0.0</property>
+ </check-properties>
+ </configuration>
+ </prerequisite-checks>
+
+ <order>
+ <group xsi:type="cluster" name="PRE_CLUSTER" title="Prepare Upgrade">
+ <direction>UPGRADE</direction>
+
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+
+ <execute-stage service="YARN" component="RESOURCEMANAGER" title="Stop YARN Queues">
+ <task xsi:type="manual">
+ <message>Before continuing, please stop all YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
+ </task>
+ </execute-stage>
+
+
+ <execute-stage service="STORM" component="NIMBUS" title="Stop Storm Topologies">
+ <task xsi:type="manual">
+ <message>Before continuing, please deactivate and kill any currently running topologies.</message>
+ </task>
+ </execute-stage>
+
+ <execute-stage service="TEZ" component="TEZ_CLIENT" title="Check Tez Tarball">
+ <task xsi:type="execute" hosts="any">
+ <script>scripts/pre_upgrade.py</script>
+ <function>prepare</function>
+ </task>
+ </execute-stage>
+ </group>
+
+ <group xsi:type="stop" name="STOP_HIGH_LEVEL_SERVICE_COMPONENTS" title="Stop Components for High-Level Services">
+ <direction>UPGRADE</direction>
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+ <service-check>false</service-check>
+ <parallel-scheduler/>
+
+ <service name="FLUME">
+ <component>FLUME_HANDLER</component>
+ </service>
+
+ <service name="ACCUMULO">
+ <component>ACCUMULO_TRACER</component>
+ <component>ACCUMULO_GC</component>
+ <component>ACCUMULO_TSERVER</component>
+ <component>ACCUMULO_MONITOR</component>
+ <component>ACCUMULO_MASTER</component>
+ </service>
+
+ <service name="STORM">
+ <component>DRPC_SERVER</component>
+ <component>STORM_UI_SERVER</component>
+ <component>SUPERVISOR</component>
+ <component>NIMBUS</component>
+ </service>
+
+ <service name="KNOX">
+ <component>KNOX_GATEWAY</component>
+ </service>
+
+ <service name="KAFKA">
+ <component>KAFKA_BROKER</component>
+ </service>
+
+ <service name="FALCON">
+ <component>FALCON_SERVER</component>
+ </service>
+
+ <service name="OOZIE">
+ <component>OOZIE_SERVER</component>
+ </service>
+
+ <service name="SPARK">
+ <component>SPARK_JOBHISTORYSERVER</component>
+ </service>
+
+ <service name="HIVE">
+ <component>WEBHCAT_SERVER</component>
+ <component>HIVE_SERVER</component>
+ <component>HIVE_METASTORE</component>
+ </service>
+
+ <service name="YARN">
+ <component>NODEMANAGER</component>
+ <component>RESOURCEMANAGER</component>
+ <component>APP_TIMELINE_SERVER</component>
+ </service>
+
+ <service name="MAPREDUCE2">
+ <component>HISTORYSERVER</component>
+ </service>
+ </group>
+
+ <group xsi:type="cluster" name="Backups" title="Perform Backups">
+ <direction>UPGRADE</direction>
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+
+ <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Backup Oozie Database">
+ <task xsi:type="manual">
+ <message>Before continuing, please backup the Oozie Server database on {{hosts.all}}.</message>
+ </task>
+ </execute-stage>
+
+ <execute-stage service="HIVE" component="HIVE_METASTORE" title="Backup Hive Metastore">
+ <task xsi:type="manual">
+ <message>Before continuing, please backup the Hive Metastore database referenced by the Hive Metastore service(s) located on the following host(s): {{hosts.all}}.</message>
+ </task>
+ </execute-stage>
+
+ <execute-stage service="KNOX" component="KNOX_GATEWAY" title="Backup Knox Data">
+ <task xsi:type="manual">
+ <message>Before continuing, please backup the Knox data. E.g., "cp -RL /var/lib/knox/data/* ~/knox_backup/" on the following host(s): {{hosts.all}}.</message>
+ </task>
+ </execute-stage>
+
+ <execute-stage service="HBASE" component="HBASE_MASTER" title="Snapshot HBASE">
+ <task xsi:type="execute" hosts="master">
+ <script>scripts/hbase_upgrade.py</script>
+ <function>take_snapshot</function>
+ </task>
+ </execute-stage>
+
+ <execute-stage service="HDFS" component="NAMENODE" title="Prepare HDFS">
+ <task xsi:type="execute" hosts="master">
+ <script>scripts/namenode.py</script>
+ <function>prepare_express_upgrade</function>
+ </task>
+ </execute-stage>
+
+ <execute-stage service="RANGER" component="RANGER_ADMIN" title="Backup Ranger Database">
+ <task xsi:type="manual">
+ <message>Before continuing, please backup the Ranger Admin database and Ranger Audit database on the following host(s): {{hosts.all}}.</message>
+ </task>
+ </execute-stage>
+ </group>
+
+ <group xsi:type="stop" name="STOP_LOW_LEVEL_SERVICE_COMPONENTS" title="Stop Components for Core Services">
+ <direction>UPGRADE</direction>
+ <skippable>true</skippable>
+ <service-check>false</service-check>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+ <parallel-scheduler/>
+
+ <service name="HBASE">
+ <component>HBASE_REGIONSERVER</component>
+ <component>HBASE_MASTER</component>
+ </service>
+
+ <service name="HDFS">
+ <component>DATANODE</component>
+ <component>NAMENODE</component>
+ <component>SECONDARY_NAMENODE</component>
+ <component>ZKFC</component>
+ <component>JOURNALNODE</component>
+ </service>
+
+ <service name="RANGER">
+ <component>RANGER_USERSYNC</component>
+ <component>RANGER_ADMIN</component>
+ </service>
+
+ <service name="ZOOKEEPER">
+ <component>ZOOKEEPER_SERVER</component>
+ </service>
+ </group>
+
+ <group xsi:type="cluster" name="Restore Backups" title="Restore Backups">
+ <direction>DOWNGRADE</direction>
+ <skippable>true</skippable>
+
+ <!-- If the user attempts a downgrade after this point, they will need to restore backups
+ before starting any of the services. -->
+
+ <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Restore Oozie Database">
+ <task xsi:type="manual">
+ <message>Before continuing, please restore the Oozie Server database on {{hosts.all}}.</message>
+ </task>
+ </execute-stage>
+
+ <execute-stage service="HIVE" component="HIVE_METASTORE" title="Restore Hive Metastore">
+ <task xsi:type="manual">
+ <message>Before continuing, please restore the Hive Metastore database located on the following host(s): {{hosts.all}}.</message>
+ </task>
+ </execute-stage>
+
+ <execute-stage service="KNOX" component="KNOX_GATEWAY" title="Restore Knox Data">
+ <task xsi:type="manual">
+ <message>Before continuing, please restore the Knox data. E.g., "cp -RL ~/knox_backup/* /var/lib/knox/data/" on the following host(s): {{hosts.all}}.</message>
+ </task>
+ </execute-stage>
+
+ <execute-stage service="RANGER" component="RANGER_ADMIN" title="Restore Ranger Database">
+ <task xsi:type="manual">
+ <message>Before continuing, please restore the Ranger Admin database and Ranger Audit database on the following host(s): {{hosts.all}}.</message>
+ </task>
+ </execute-stage>
+ </group>
+
+ <group xsi:type="cluster" name="RESTORE_CONFIG_DIRS" title="Restore Configuration Directories">
+ <direction>DOWNGRADE</direction>
+ <execute-stage title="Restore configuration directories and remove HDP 2.3 symlinks">
+ <task xsi:type="execute">
+ <script>scripts/ru_set_all.py</script>
+ <function>unlink_all_configs</function>
+ </task>
+ </execute-stage>
+ </group>
+
+ <!-- After processing this group, will change the effective Stack of the UpgradeContext object. -->
+ <group xsi:type="update-stack" name="UPDATE_DESIRED_STACK_ID" title="Update Target Stack">
+ <execute-stage title="Update Target Stack" service="" component="">
+ <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.UpdateDesiredStackAction">
+ </task>
+ </execute-stage>
+ </group>
+
+ <group xsi:type="cluster" name="Upgrade service configs" title="Upgrade service configs">
+ <direction>UPGRADE</direction> <!-- prevent config changes on downgrade -->
+ <skippable>true</skippable> <!-- May fix configuration problems manually -->
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+
+ <execute-stage service="ZOOKEEPER" component="ZOOKEEPER_SERVER" title="Apply config changes for Zookeeper">
+ <task xsi:type="configure" id="hdp_2_1_1_zk_post_upgrade"/>
+ </execute-stage>
+
+ <execute-stage service="ZOOKEEPER" component="ZOOKEEPER_SERVER" title="Apply config changes for Zookeeper">
+ <task xsi:type="configure" id="hdp_2_1_1_zookeeper_new_config_type"/>
+ </execute-stage>
+
+ <execute-stage service="HDFS" component="NAMENODE" title="Apply config changes for NameNode">
+ <task xsi:type="configure" id="hdp_2_1_1_nn_pre_upgrade"/>
+ </execute-stage>
+
+ <execute-stage service="HDFS" component="NAMENODE" title="Apply config changes for NameNode">
+ <task xsi:type="configure" id="hdp_2_1_1_nn_test"/>
+ </execute-stage>
+
+ <execute-stage service="HDFS" component="NAMENODE" title="Apply config changes for NameNode">
+ <task xsi:type="configure" id="hdp_2_1_1_hdfs_new_config_type"/>
+ </execute-stage>
+
+ <execute-stage service="YARN" component="NODEMANAGER" title="Apply config changes for NodeManager">
+ <task xsi:type="configure" id="hdp_2_1_1_nm_pre_upgrade"/>
+ </execute-stage>
+ </group>
+
+ <!-- Now, restart all of the services. -->
+ <group xsi:type="restart" name="ZOOKEEPER" title="ZooKeeper">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+ <parallel-scheduler/>
+ <service name="ZOOKEEPER">
+ <service-check>false</service-check>
+ <component>ZOOKEEPER_SERVER</component>
+ <component>ZOOKEEPER_CLIENT</component>
+ </service>
+ </group>
+
+ <group xsi:type="restart" name="RANGER" title="Ranger">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+ <parallel-scheduler/>
+ <service name="RANGER">
+ <component>RANGER_ADMIN</component>
+ <component>RANGER_USERSYNC</component>
+ </service>
+ </group>
+
+ <group xsi:type="restart" name="HDFS" title="HDFS">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+ <parallel-scheduler/>
+ <service name="HDFS">
+ <component>JOURNALNODE</component>
+ <component>ZKFC</component>
+ <component>NAMENODE</component>
+ <component>SECONDARY_NAMENODE</component>
+ <component>HDFS_CLIENT</component>
+ </service>
+ </group>
+
+ <group xsi:type="restart" name="HDFS_DATANODES" title="HDFS DataNodes">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <parallel-scheduler/>
+ <service name="HDFS">
+ <component>DATANODE</component>
+ </service>
+ </group>
+
+ <group xsi:type="cluster" name="HDFS_LEAVE_SAFEMODE" title="HDFS - Wait to leave Safemode">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+ <direction>UPGRADE</direction>
+
+ <execute-stage service="HDFS" component="NAMENODE" title="Wait to leave Safemode">
+ <task xsi:type="execute" hosts="all" summary="Wait for NameNode to leave Safemode">
+ <script>scripts/namenode.py</script>
+ <function>wait_for_safemode_off</function>
+ </task>
+ </execute-stage>
+ </group>
+
+ <group xsi:type="restart" name="YARN_AND_MAPR" title="YARN and MapReduce2">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+ <parallel-scheduler/>
+
+ <service name="MAPREDUCE2">
+ <component>HISTORYSERVER</component>
+ <component>MAPREDUCE2_CLIENT</component>
+ </service>
+
+ <service name="YARN">
+ <component>APP_TIMELINE_SERVER</component>
+ <component>RESOURCEMANAGER</component>
+ <component>YARN_CLIENT</component>
+ </service>
+ </group>
+
+ <group xsi:type="restart" name="YARN_NODEMANAGERS" title="YARN NodeManagers">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <parallel-scheduler/>
+
+ <service name="YARN">
+ <component>NODEMANAGER</component>
+ </service>
+ </group>
+
+ <group xsi:type="restart" name="HBASE" title="HBASE">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+ <parallel-scheduler/>
+ <service name="HBASE">
+ <component>HBASE_MASTER</component>
+ <component>HBASE_REGIONSERVER</component>
+ <component>HBASE_CLIENT</component>
+ </service>
+ </group>
+
+ <group xsi:type="restart" name="CLIENTS" title="Tez, Pig, Sqoop Clients">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <parallel-scheduler/>
+ <service name="TEZ">
+ <component>TEZ_CLIENT</component>
+ </service>
+
+ <service name="PIG">
+ <component>PIG</component>
+ </service>
+
+ <service name="SQOOP">
+ <component>SQOOP</component>
+ </service>
+ </group>
+
+ <group name="SERVICE_CHECK" title="All Service Checks" xsi:type="service-check">
+ <skippable>true</skippable>
+ <direction>UPGRADE</direction>
+ <priority>
+ <service>HDFS</service>
+ <service>YARN</service>
+ <service>MAPREDUCE2</service>
+ <service>HBASE</service>
+ </priority>
+ </group>
+
+ <group xsi:type="restart" name="HIVE" title="Hive">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+ <parallel-scheduler/>
+ <service name="HIVE">
+ <component>HIVE_METASTORE</component>
+ <component>HIVE_SERVER</component>
+ <component>WEBHCAT_SERVER</component>
+ <component>HIVE_CLIENT</component>
+ <component>HCAT</component>
+ </service>
+ </group>
+
+ <group xsi:type="restart" name="SPARK" title="Spark">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+ <parallel-scheduler/>
+ <service name="SPARK">
+ <component>SPARK_JOBHISTORYSERVER</component>
+ </service>
+ </group>
+
+ <group xsi:type="restart" name="SPARK_CLIENTS" title="Spark Clients">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <parallel-scheduler/>
+ <service name="SPARK">
+ <component>SPARK_CLIENT</component>
+ </service>
+ </group>
+
+ <!-- Upgrade Oozie DB only on Upgrade direction, and always create a new ShareLib. -->
+ <group name="UPGRADE_OOZIE" title="Upgrade Oozie Database">
+ <direction>UPGRADE</direction>
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+ <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Upgrade Oozie Database">
+ <task xsi:type="execute" hosts="any" summary="Upgrading the database and creating a new sharelib">
+ <script>scripts/oozie_server_upgrade.py</script>
+ <function>upgrade_oozie_database_and_sharelib</function>
+ </task>
+ </execute-stage>
+ </group>
+
+ <!-- Only create the ShareLib folder during a Downgrade. -->
+ <group name="DOWNGRADE_OOZIE" title="Downgrade Oozie ShareLib">
+ <direction>DOWNGRADE</direction>
+ <skippable>true</skippable>
+ <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Downgrade Oozie ShareLib">
+ <task xsi:type="execute" hosts="any" summary="Upgrading the database and creating a new sharelib">
+ <script>scripts/oozie_server_upgrade.py</script>
+ <function>create_sharelib</function>
+ </task>
+ </execute-stage>
+ </group>
+
+ <group xsi:type="restart" name="OOZIE" title="Oozie">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+ <parallel-scheduler/>
+ <service name="OOZIE">
+ <component>OOZIE_SERVER</component>
+ </service>
+ </group>
+
+ <group xsi:type="restart" name="OOZIE_CLIENTS" title="Oozie Clients">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <parallel-scheduler/>
+ <service name="OOZIE">
+ <component>OOZIE_CLIENT</component>
+ </service>
+ </group>
+
+ <group xsi:type="restart" name="FALCON" title="Falcon">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+ <parallel-scheduler/>
+ <service name="FALCON">
+ <component>FALCON_SERVER</component>
+ </service>
+ </group>
+
+ <group xsi:type="restart" name="FALCON_CLIENTS" title="Falcon Clients">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <parallel-scheduler/>
+ <service name="FALCON">
+ <component>FALCON_CLIENT</component>
+ </service>
+ </group>
+
+ <group xsi:type="restart" name="KAFKA" title="Kafka">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <parallel-scheduler/>
+ <service name="KAFKA">
+ <component>KAFKA_BROKER</component>
+ </service>
+ </group>
+
+ <group xsi:type="restart" name="KNOX" title="Knox">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <parallel-scheduler/>
+ <service name="KNOX">
+ <component>KNOX_GATEWAY</component>
+ </service>
+ </group>
+
+ <group xsi:type="restart" name="STORM" title="Storm">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <parallel-scheduler/>
+ <service name="STORM">
+ <component>NIMBUS</component>
+ <component>SUPERVISOR</component>
+ <component>STORM_UI_SERVER</component>
+ <component>DRPC_SERVER</component>
+ </service>
+
+ <execute-stage service="STORM" component="DRPC_SERVER" title="Rebuild Storm Topology">
+ <task xsi:type="manual">
+ <message>Please rebuild your topology using the new Storm version dependencies and resubmit it using the newly created jar.</message>
+ </task>
+ </execute-stage>
+ </group>
+
+ <group xsi:type="restart" name="SLIDER" title="Slider">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <parallel-scheduler/>
+ <service name="SLIDER">
+ <component>SLIDER</component>
+ </service>
+ </group>
+
+ <group xsi:type="restart" name="FLUME" title="Flume">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <parallel-scheduler/>
+ <service name="FLUME">
+ <component>FLUME_HANDLER</component>
+ </service>
+ </group>
+
+ <group xsi:type="restart" name="ACCUMULO" title="Accumulo">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <parallel-scheduler/>
+ <service name="ACCUMULO">
+ <component>ACCUMULO_MASTER</component>
+ <component>ACCUMULO_TSERVER</component>
+ <component>ACCUMULO_MONITOR</component>
+ <component>ACCUMULO_GC</component>
+ <component>ACCUMULO_TRACER</component>
+ <component>ACCUMULO_CLIENT</component>
+ </service>
+ </group>
+
+ <!--
+ Invoke "hdp-select set all" to change any components we may have missed
+ that are installed on the hosts but not known by Ambari.
+ -->
+ <group xsi:type="cluster" name="ALL_HOST_OPS" title="Set Version On All Hosts">
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+
+ <execute-stage title="Update stack to {{version}}">
+ <task xsi:type="execute">
+ <script>scripts/ru_set_all.py</script>
+ <function>actionexecute</function>
+ </task>
+ </execute-stage>
+ </group>
+
+ <group xsi:type="cluster" name="FINALIZE_PRE_CHECK" title="Finalize {{direction.text.proper}} Pre-Check">
+ <direction>UPGRADE</direction>
+
+ <execute-stage title="Check Component Versions">
+ <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.ComponentVersionCheckAction" />
+ </execute-stage>
+ </group>
+
+ <group xsi:type="cluster" name="POST_CLUSTER" title="Finalize {{direction.text.proper}}">
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+
+ <execute-stage title="Confirm Finalize">
+ <direction>UPGRADE</direction>
+ <task xsi:type="manual">
+ <message>Please confirm you are ready to finalize.</message>
+ </task>
+ </execute-stage>
+
+ <execute-stage service="HDFS" component="NAMENODE" title="Execute HDFS Finalize">
+ <task xsi:type="execute" hosts="master">
+ <script>scripts/namenode.py</script>
+ <function>finalize_non_rolling_upgrade</function>
+ </task>
+ </execute-stage>
+
+ <execute-stage title="Save Cluster State" service="" component="">
+ <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.FinalizeUpgradeAction">
+ </task>
+ </execute-stage>
+ </group>
+ </order>
+
+ <processing>
+ <service name="ZOOKEEPER">
+ <component name="ZOOKEEPER_SERVER">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="ZOOKEEPER_CLIENT">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="RANGER">
+ <component name="RANGER_ADMIN">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="RANGER_USERSYNC">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="HDFS">
+ <component name="NAMENODE">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="DATANODE">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="HDFS_CLIENT">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="JOURNALNODE">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="ZKFC">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="MAPREDUCE2">
+ <component name="HISTORYSERVER">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="MAPREDUCE2_CLIENT">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="YARN">
+ <component name="APP_TIMELINE_SERVER">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="RESOURCEMANAGER">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="NODEMANAGER">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="YARN_CLIENT">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="HBASE">
+ <component name="HBASE_MASTER">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="HBASE_REGIONSERVER">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="HBASE_CLIENT">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="TEZ">
+ <component name="TEZ_CLIENT">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="PIG">
+ <component name="PIG">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="SQOOP">
+ <component name="SQOOP">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="HIVE">
+ <component name="HIVE_METASTORE">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="HIVE_SERVER">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="WEBHCAT_SERVER">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="HIVE_CLIENT">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="HCAT">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="SPARK">
+ <component name="SPARK_JOBHISTORYSERVER">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ <component name="SPARK_CLIENT">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="OOZIE">
+ <component name="OOZIE_SERVER">
+ <pre-upgrade>
+ <task xsi:type="execute" hosts="any" summary="Upgrading the database and creating a new sharelib">
+ <script>scripts/oozie_server_upgrade.py</script>
+ <function>upgrade_oozie_database_and_sharelib</function>
+ </task>
+ </pre-upgrade>
+
+ <pre-downgrade>
+ <task xsi:type="execute" hosts="any" summary="Create a new sharelib">
+ <script>scripts/oozie_server_upgrade.py</script>
+ <function>create_sharelib</function>
+ </task>
+ </pre-downgrade>
+
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="OOZIE_CLIENT">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="FALCON">
+ <component name="FALCON_SERVER">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ <component name="FALCON_CLIENT">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="KAFKA">
+ <component name="KAFKA_BROKER">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="KNOX">
+ <component name="KNOX_GATEWAY">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="STORM">
+ <component name="NIMBUS">
+ <pre-upgrade>
+ <task xsi:type="execute" summary="Removing Storm data from ZooKeeper">
+ <script>scripts/storm_upgrade.py</script>
+ <function>delete_storm_zookeeper_data</function>
+ </task>
+
+ <task xsi:type="execute" summary="Removing local Storm data">
+ <script>scripts/storm_upgrade.py</script>
+ <function>delete_storm_local_data</function>
+ </task>
+ </pre-upgrade>
+
+ <pre-downgrade>
+ <task xsi:type="manual">
+ <message>Before continuing, please deactivate and kill any currently running topologies.</message>
+ </task>
+
+ <task xsi:type="execute" summary="Removing Storm data from ZooKeeper">
+ <script>scripts/storm_upgrade.py</script>
+ <function>delete_storm_zookeeper_data</function>
+ </task>
+
+ <task xsi:type="execute" summary="Removing local Storm data">
+ <script>scripts/storm_upgrade.py</script>
+ <function>delete_storm_local_data</function>
+ </task>
+ </pre-downgrade>
+
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="SUPERVISOR">
+ <pre-upgrade>
+ <task xsi:type="execute" summary="Removing local Storm data">
+ <script>scripts/storm_upgrade.py</script>
+ <function>delete_storm_local_data</function>
+ </task>
+ </pre-upgrade>
+
+ <pre-downgrade>
+ <task xsi:type="manual">
+ <message>Before continuing, please deactivate and kill any currently running topologies.</message>
+ </task>
+
+ <task xsi:type="execute" summary="Removing local Storm data">
+ <script>scripts/storm_upgrade.py</script>
+ <function>delete_storm_local_data</function>
+ </task>
+ </pre-downgrade>
+
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="STORM_UI_SERVER">
+ <pre-upgrade>
+ <task xsi:type="execute" summary="Removing local Storm data">
+ <script>scripts/storm_upgrade.py</script>
+ <function>delete_storm_local_data</function>
+ </task>
+ </pre-upgrade>
+
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="DRPC_SERVER">
+ <pre-upgrade>
+ <task xsi:type="execute" summary="Removing local Storm data">
+ <script>scripts/storm_upgrade.py</script>
+ <function>delete_storm_local_data</function>
+ </task>
+ </pre-upgrade>
+
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+
+ <post-upgrade>
+ <task xsi:type="manual">
+ <message>Please rebuild your topology using the new Storm version dependencies and resubmit it using the newly created jar.</message>
+ </task>
+ </post-upgrade>
+ </component>
+ </service>
+
+ <service name="SLIDER">
+ <component name="SLIDER">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="FLUME">
+ <component name="FLUME_HANDLER">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ </service>
+ </processing>
+</upgrade>