You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by rl...@apache.org on 2015/03/03 16:00:06 UTC
ambari git commit: AMBARI-9852. Kerberos: Kerberos Service Check
needs to generate and destroy it's own unique identity for testing (rlevas)
Repository: ambari
Updated Branches:
refs/heads/trunk c313023bd -> 954b96e1e
AMBARI-9852. Kerberos: Kerberos Service Check needs to generate and destroy it's own unique identity for testing (rlevas)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/954b96e1
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/954b96e1
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/954b96e1
Branch: refs/heads/trunk
Commit: 954b96e1edf8fc313ce06d57928b3e26e714770a
Parents: c313023
Author: Robert Levas <rl...@hortonworks.com>
Authored: Tue Mar 3 09:59:50 2015 -0500
Committer: Robert Levas <rl...@hortonworks.com>
Committed: Tue Mar 3 09:59:59 2015 -0500
----------------------------------------------------------------------
.../AmbariManagementControllerImpl.java | 51 +-
.../server/controller/KerberosHelper.java | 636 ++++++++++++++-----
.../1.10.3-10/package/scripts/params.py | 9 +-
.../1.10.3-10/package/scripts/service_check.py | 46 +-
.../server/controller/KerberosHelperTest.java | 393 ++++++++++++
5 files changed, 946 insertions(+), 189 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/954b96e1/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 2bf0cbf..dcbbed2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -40,7 +40,6 @@ import java.io.IOException;
import java.net.InetAddress;
import java.text.MessageFormat;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumMap;
@@ -55,6 +54,7 @@ import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.TimeUnit;
+import com.google.gson.reflect.TypeToken;
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.ClusterNotFoundException;
import org.apache.ambari.server.DuplicateResourceException;
@@ -2902,23 +2902,30 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
actionManager,
actionRequest);
+ ExecuteCommandJson jsons = customCommandExecutionHelper.getCommandJson(actionExecContext, cluster);
+ String commandParamsForStage = jsons.getCommandParamsForStage();
+
// If the request is to perform the Kerberos service check, set up the stages to
// ensure that the (cluster-level) smoke user principal and keytab is available on all hosts
- if (Role.KERBEROS_SERVICE_CHECK.name().equals(actionRequest.getCommandName())) {
+ boolean kerberosServiceCheck = Role.KERBEROS_SERVICE_CHECK.name().equals(actionRequest.getCommandName());
+ if (kerberosServiceCheck) {
+ // Parse the command parameters into a map so that additional values may be added to it
+ Map<String, String> commandParamsStage = gson.fromJson(commandParamsForStage,
+ new TypeToken<Map<String, String>>() {
+ }.getType());
+
try {
- requestStageContainer = kerberosHelper.ensureIdentities(cluster,
- Collections.<String, Collection<String>>singletonMap(Service.Type.KERBEROS.name(), null),
- Collections.singleton("/smokeuser"), requestStageContainer);
+ requestStageContainer = kerberosHelper.createTestIdentity(cluster, commandParamsStage, requestStageContainer);
} catch (KerberosOperationException e) {
throw new IllegalArgumentException(e.getMessage(), e);
}
- }
- ExecuteCommandJson jsons = customCommandExecutionHelper.getCommandJson(
- actionExecContext, cluster);
+ // Recreate commandParamsForStage with the added values
+ commandParamsForStage = gson.toJson(commandParamsStage);
+ }
Stage stage = createNewStage(requestStageContainer.getLastStageId(), cluster, requestId, requestContext,
- jsons.getClusterHostInfo(), jsons.getCommandParamsForStage(), jsons.getHostParamsForStage());
+ jsons.getClusterHostInfo(), commandParamsForStage, jsons.getHostParamsForStage());
if (actionRequest.isCommand()) {
customCommandExecutionHelper.addExecutionCommandsToStage(actionExecContext, stage, requestProperties, false);
@@ -2938,18 +2945,28 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
List<Stage> stages = rg.getStages();
if (stages != null && !stages.isEmpty()) {
+ // If this is a Kerberos service check, set the service check stage(s) to be skip-able so that
+ // the clean up stages will still be triggered in the event of a failure.
+ if (kerberosServiceCheck) {
+ for (Stage s : stages) {
+ s.setSkippable(true);
+ }
+ }
+
requestStageContainer.addStages(stages);
}
- // If the request is to perform the Kerberos service check and (Kerberos) security is not enabled,
- // delete that the (cluster-level) smoke user principal and keytab that was created for the
- // service check
- if (Role.KERBEROS_SERVICE_CHECK.name().equals(actionRequest.getCommandName()) &&
- !kerberosHelper.isClusterKerberosEnabled(cluster)) {
+ // If the request is to perform the Kerberos service check, delete the test-specific principal
+ // and keytab that was created for this service check
+ if (kerberosServiceCheck) {
+ // Parse the command parameters into a map so that existing values may be accessed and
+ // additional values may be added to it.
+ Map<String, String> commandParamsStage = gson.fromJson(commandParamsForStage,
+ new TypeToken<Map<String, String>>() {
+ }.getType());
+
try {
- requestStageContainer = kerberosHelper.deleteIdentities(cluster,
- Collections.<String, Collection<String>>singletonMap(Service.Type.KERBEROS.name(), null),
- Collections.singleton("/smokeuser"), requestStageContainer);
+ requestStageContainer = kerberosHelper.deleteTestIdentity(cluster, commandParamsStage, requestStageContainer);
} catch (KerberosOperationException e) {
throw new IllegalArgumentException(e.getMessage(), e);
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/954b96e1/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
index 8dd6c4d..c4a5f4f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
@@ -107,6 +107,7 @@ import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
+import java.util.Random;
import java.util.Set;
public class KerberosHelper {
@@ -124,6 +125,12 @@ public class KerberosHelper {
*/
private static final String SECURITY_ENABLED_PROPERTY_NAME = "security_enabled";
+ /**
+ * name of the property used to hold the service check identifier value, used when creating and
+ * destroying the (unique) service check identity.
+ */
+ private static final String SERVICE_CHECK_IDENTIFIER = "_kerberos_internal_service_check_identifier";
+
@Inject
private AmbariCustomCommandExecutionHelper customCommandExecutionHelper;
@@ -262,7 +269,6 @@ public class KerberosHelper {
return requestStageContainer;
}
-
/**
* Ensures the set of filtered principals and keytabs exist on the cluster.
* <p/>
@@ -400,6 +406,133 @@ public class KerberosHelper {
}
/**
+ * Create a unique identity to use for testing the general Kerberos configuration.
+ *
+ * @param cluster the relevant Cluster
+ * @param commandParamsStage the command parameters map that will be sent to the agent side command
+ * @param requestStageContainer a RequestStageContainer to place generated stages, if needed -
+ * if null a new RequestStageContainer will be created.
+ * @return the updated or a new RequestStageContainer containing the stages that need to be
+ * executed to complete this task; or null if no stages need to be executed.
+ */
+ public RequestStageContainer createTestIdentity(Cluster cluster, Map<String, String> commandParamsStage,
+ RequestStageContainer requestStageContainer)
+ throws KerberosOperationException, AmbariException {
+ return handleTestIdentity(cluster, getKerberosDetails(cluster), commandParamsStage, requestStageContainer, new CreatePrincipalsAndKeytabsHandler(false));
+ }
+
+ /**
+ * Deletes the unique identity to use for testing the general Kerberos configuration.
+ *
+ * @param cluster the relevant Cluster
+ * @param commandParamsStage the command parameters map that will be sent to the agent side command
+ * @param requestStageContainer a RequestStageContainer to place generated stages, if needed -
+ * if null a new RequestStageContainer will be created.
+ * @return the updated or a new RequestStageContainer containing the stages that need to be
+ * executed to complete this task; or null if no stages need to be executed.
+ */
+ public RequestStageContainer deleteTestIdentity(Cluster cluster, Map<String, String> commandParamsStage,
+ RequestStageContainer requestStageContainer)
+ throws KerberosOperationException, AmbariException {
+ requestStageContainer = handleTestIdentity(cluster, getKerberosDetails(cluster), commandParamsStage, requestStageContainer, new DeletePrincipalsAndKeytabsHandler());
+
+ // Clear the Kerberos service check identifier
+ setKerberosServiceCheckIdentifier(cluster, null);
+
+ return requestStageContainer;
+ }
+
+ /**
+ * Validate the KDC admin credentials.
+ *
+ * @param cluster associated cluster
+ *
+ * @throws AmbariException if any other error occurs while trying to validate the credentials
+ */
+ public void validateKDCCredentials(Cluster cluster) throws KerberosMissingAdminCredentialsException,
+ KerberosAdminAuthenticationException,
+ KerberosInvalidConfigurationException,
+ AmbariException {
+ String credentials = getEncryptedAdministratorCredentials(cluster);
+ if (credentials == null) {
+ throw new KerberosMissingAdminCredentialsException(
+ "Missing KDC administrator credentials.\n" +
+ "The KDC administrator credentials must be set in session by updating the relevant Cluster resource." +
+ "This may be done by issuing a PUT to the api/v1/clusters/(cluster name) API entry point with the following payload:\n" +
+ "{\n" +
+ " \"session_attributes\" : {\n" +
+ " \"kerberos_admin\" : {\"principal\" : \"(PRINCIPAL)\", \"password\" : \"(PASSWORD)\"}\n" +
+ " }\n" +
+ "}"
+ );
+ } else {
+ KerberosDetails kerberosDetails = getKerberosDetails(cluster);
+ KerberosOperationHandler operationHandler = kerberosOperationHandlerFactory.getKerberosOperationHandler(kerberosDetails.getKdcType());
+
+ if (operationHandler == null) {
+ throw new AmbariException("Failed to get an appropriate Kerberos operation handler.");
+ } else {
+ byte[] key = Integer.toHexString(cluster.hashCode()).getBytes();
+ KerberosCredential kerberosCredentials = KerberosCredential.decrypt(credentials, key);
+
+ boolean missingCredentials = false;
+ try {
+ operationHandler.open(kerberosCredentials, kerberosDetails.getDefaultRealm(), kerberosDetails.getKerberosEnvProperties());
+ // todo: this is really odd that open doesn't throw an exception if the credentials are missing
+ missingCredentials = ! operationHandler.testAdministratorCredentials();
+ } catch (KerberosAdminAuthenticationException e) {
+ throw new KerberosAdminAuthenticationException(
+ "Invalid KDC administrator credentials.\n" +
+ "The KDC administrator credentials must be set in session by updating the relevant Cluster resource." +
+ "This may be done by issuing a PUT to the api/v1/clusters/(cluster name) API entry point with the following payload:\n" +
+ "{\n" +
+ " \"session_attributes\" : {\n" +
+ " \"kerberos_admin\" : {\"principal\" : \"(PRINCIPAL)\", \"password\" : \"(PASSWORD)\"}\n" +
+ " }\n" +
+ "}", e);
+ } catch (KerberosKDCConnectionException e) {
+ throw new KerberosInvalidConfigurationException(
+ "Failed to connect to KDC - " + e.getMessage() + "\n" +
+ "Update the KDC settings in krb5-conf and kerberos-env configurations to correct this issue.",
+ e);
+ } catch (KerberosRealmException e) {
+ throw new KerberosInvalidConfigurationException(
+ "Failed to find a KDC for the specified realm - " + e.getMessage() + "\n" +
+ "Update the KDC settings in krb5-conf and kerberos-env configurations to correct this issue.",
+ e);
+ } catch (KerberosLDAPContainerException e) {
+ throw new KerberosInvalidConfigurationException(
+ "The principal container was not specified\n" +
+ "Set the 'container_dn' value in the kerberos-env configuration to correct this issue.",
+ e);
+ } catch (KerberosOperationException e) {
+ throw new AmbariException(e.getMessage(), e);
+ } finally {
+ try {
+ operationHandler.close();
+ } catch (KerberosOperationException e) {
+ // Ignore this...
+ }
+ }
+
+ // need to throw this outside of the try/catch so it isn't caught
+ if (missingCredentials) {
+ throw new KerberosMissingAdminCredentialsException(
+ "Invalid KDC administrator credentials.\n" +
+ "The KDC administrator credentials must be set in session by updating the relevant Cluster resource." +
+ "This may be done by issuing a PUT to the api/v1/clusters/(cluster name) API entry point with the following payload:\n" +
+ "{\n" +
+ " \"session_attributes\" : {\n" +
+ " \"kerberos_admin\" : {\"principal\" : \"(PRINCIPAL)\", \"password\" : \"(PASSWORD)\"}\n" +
+ " }\n" +
+ "}"
+ );
+ }
+ }
+ }
+ }
+
+ /**
* Sets the relevant auth-to-local rule configuration properties using the services installed on
* the cluster and their relevant Kerberos descriptors to determine the rules to be created.
*
@@ -534,7 +667,6 @@ public class KerberosHelper {
if ((hosts != null) && !hosts.isEmpty()) {
List<ServiceComponentHost> serviceComponentHostsToProcess = new ArrayList<ServiceComponentHost>();
- File indexFile;
KerberosDescriptor kerberosDescriptor = getKerberosDescriptor(cluster);
KerberosActionDataFileBuilder kerberosActionDataFileBuilder = null;
Map<String, String> kerberosDescriptorProperties = kerberosDescriptor.getProperties();
@@ -549,17 +681,10 @@ public class KerberosHelper {
// such as which principals and keytabs files to create as well as what configurations need
// to be update are stored in data files in this directory. Any keytab files are stored in
// this directory until they are distributed to their appropriate hosts.
- File dataDirectory;
- try {
- dataDirectory = createTemporaryDirectory();
- } catch (IOException e) {
- String message = "Failed to create the temporary data directory.";
- LOG.error(message, e);
- throw new AmbariException(message, e);
- }
+ File dataDirectory = createTemporaryDirectory();
// Create the file used to store details about principals and keytabs to create
- indexFile = new File(dataDirectory, KerberosActionDataFile.DATA_FILE_NAME);
+ File indexFile = new File(dataDirectory, KerberosActionDataFile.DATA_FILE_NAME);
try {
// Iterate over the hosts in the cluster to find the components installed in each. For each
@@ -711,25 +836,8 @@ public class KerberosHelper {
requestStageContainer, serviceComponentHostsToProcess, hostsWithValidKerberosClient);
// Add the cleanup stage...
- Map<String, String> commandParameters = new HashMap<String, String>();
- commandParameters.put(KerberosServerAction.AUTHENTICATED_USER_NAME, ambariManagementController.getAuthName());
- commandParameters.put(KerberosServerAction.DATA_DIRECTORY, dataDirectory.getAbsolutePath());
-
- Stage stage = createServerActionStage(requestStageContainer.getLastStageId(),
- cluster,
- requestStageContainer.getId(),
- "Finalize Operations",
- clusterHostInfoJson,
- "{}",
- hostParamsJson,
- FinalizeKerberosServerAction.class,
- event,
- commandParameters,
- "Finalize Operations", 300);
-
- RoleGraph roleGraph = new RoleGraph(roleCommandOrder);
- roleGraph.build(stage);
- requestStageContainer.addStages(roleGraph.getStages());
+ handler.addFinalizeOperationStage(cluster, clusterHostInfoJson, hostParamsJson, event,
+ dataDirectory, roleCommandOrder, requestStageContainer);
// If all goes well, set the appropriate states on the relevant ServiceComponentHosts
for (ServiceComponentHost sch : serviceComponentHostsToProcess) {
@@ -763,95 +871,218 @@ public class KerberosHelper {
}
/**
- * Validate the KDC admin credentials.
+ * Performs operations needed to process Kerberos related tasks to manage a (unique) test identity
+ * on the relevant cluster.
*
- * @param cluster associated cluster
- *
- * @throws AmbariException if any other error occurs while trying to validate the credentials
+ * @param cluster the relevant Cluster
+ * @param kerberosDetails a KerberosDetails containing information about relevant Kerberos
+ * configuration
+ * @param commandParameters the command parameters map used to read and/or write attributes
+ * related to this operation
+ * @param requestStageContainer a RequestStageContainer to place generated stages, if needed -
+ * if null a new RequestStageContainer will be created.
+ * @param handler a Handler to use to provide guidance and set up stages
+ * to perform the work needed to complete the relative action
+ * @return the updated or a new RequestStageContainer containing the stages that need to be
+ * executed to complete this task; or null if no stages need to be executed.
+ * @throws AmbariException
+ * @throws KerberosOperationException
*/
- public void validateKDCCredentials(Cluster cluster) throws KerberosMissingAdminCredentialsException,
- KerberosAdminAuthenticationException,
- KerberosInvalidConfigurationException,
- AmbariException {
- String credentials = getEncryptedAdministratorCredentials(cluster);
- if (credentials == null) {
- throw new KerberosMissingAdminCredentialsException(
- "Missing KDC administrator credentials.\n" +
- "The KDC administrator credentials must be set in session by updating the relevant Cluster resource." +
- "This may be done by issuing a PUT to the api/v1/clusters/(cluster name) API entry point with the following payload:\n" +
- "{\n" +
- " \"session_attributes\" : {\n" +
- " \"kerberos_admin\" : {\"principal\" : \"(PRINCIPAL)\", \"password\" : \"(PASSWORD)\"}\n" +
- " }\n" +
- "}"
- );
- } else {
- KerberosDetails kerberosDetails = getKerberosDetails(cluster);
- KerberosOperationHandler operationHandler = kerberosOperationHandlerFactory.getKerberosOperationHandler(kerberosDetails.getKdcType());
+ private RequestStageContainer handleTestIdentity(Cluster cluster,
+ KerberosDetails kerberosDetails,
+ Map<String, String> commandParameters, RequestStageContainer requestStageContainer,
+ Handler handler) throws AmbariException, KerberosOperationException {
- if (operationHandler == null) {
- throw new AmbariException("Failed to get an appropriate Kerberos operation handler.");
- } else {
- byte[] key = Integer.toHexString(cluster.hashCode()).getBytes();
- KerberosCredential kerberosCredentials = KerberosCredential.decrypt(credentials, key);
+ if(commandParameters == null) {
+ throw new AmbariException("The properties map must not be null. It is needed to store data related to the service check identity");
+ }
+
+ Map<String, Service> services = cluster.getServices();
+
+ if ((services != null) && !services.isEmpty()) {
+ String clusterName = cluster.getClusterName();
+ Map<String, Host> hosts = clusters.getHostsForCluster(clusterName);
+
+ if ((hosts != null) && !hosts.isEmpty()) {
+ List<ServiceComponentHost> serviceComponentHostsToProcess = new ArrayList<ServiceComponentHost>();
+ KerberosDescriptor kerberosDescriptor = getKerberosDescriptor(cluster);
+ KerberosActionDataFileBuilder kerberosActionDataFileBuilder = null;
+ Map<String, String> kerberosDescriptorProperties = kerberosDescriptor.getProperties();
+
+ // While iterating over all the ServiceComponentHosts find hosts that have KERBEROS_CLIENT
+ // components in the INSTALLED state and add them to the hostsWithValidKerberosClient Set.
+ // This is needed to help determine which hosts to perform actions for and create tasks for.
+ Set<String> hostsWithValidKerberosClient = new HashSet<String>();
+
+ // Create a temporary directory to store metadata needed to complete this task. Information
+ // such as which principals and keytabs files to create as well as what configurations need
+ // to be update are stored in data files in this directory. Any keytab files are stored in
+ // this directory until they are distributed to their appropriate hosts.
+ File dataDirectory = createTemporaryDirectory();
+
+ // Create the file used to store details about principals and keytabs to create
+ File indexFile = new File(dataDirectory, KerberosActionDataFile.DATA_FILE_NAME);
+
+ // Create a special identity for the test user
+ KerberosIdentityDescriptor identity = new KerberosIdentityDescriptor(new HashMap<String, Object>() {
+ {
+ put("principal",
+ new HashMap<String, Object>() {
+ {
+ put("value", "${cluster-env/smokeuser}_${service_check_id}@${realm}");
+ put("type", "user");
+ }
+ });
+ put("keytab",
+ new HashMap<String, Object>() {
+ {
+ put("file", "${keytab_dir}/kerberos.service_check.${service_check_id}.keytab");
+
+ put("owner", new HashMap<String, Object>() {{
+ put("name", "${cluster-env/smokeuser}");
+ put("access", "rw");
+ }});
+
+ put("group", new HashMap<String, Object>() {{
+ put("name", "${cluster-env/user_group}");
+ put("access", "r");
+ }});
+ }
+ });
+ }
+ });
+
+ // Get or create the unique service check identifier
+ String serviceCheckId = getKerberosServiceCheckIdentifier(cluster, true);
- boolean missingCredentials = false;
try {
- operationHandler.open(kerberosCredentials, kerberosDetails.getDefaultRealm(), kerberosDetails.getKerberosEnvProperties());
- // todo: this is really odd that open doesn't throw an exception if the credentials are missing
- missingCredentials = ! operationHandler.testAdministratorCredentials();
- } catch (KerberosAdminAuthenticationException e) {
- throw new KerberosAdminAuthenticationException(
- "Invalid KDC administrator credentials.\n" +
- "The KDC administrator credentials must be set in session by updating the relevant Cluster resource." +
- "This may be done by issuing a PUT to the api/v1/clusters/(cluster name) API entry point with the following payload:\n" +
- "{\n" +
- " \"session_attributes\" : {\n" +
- " \"kerberos_admin\" : {\"principal\" : \"(PRINCIPAL)\", \"password\" : \"(PASSWORD)\"}\n" +
- " }\n" +
- "}", e);
- } catch (KerberosKDCConnectionException e) {
- throw new KerberosInvalidConfigurationException(
- "Failed to connect to KDC - " + e.getMessage() + "\n" +
- "Update the KDC settings in krb5-conf and kerberos-env configurations to correct this issue.",
- e);
- } catch (KerberosRealmException e) {
- throw new KerberosInvalidConfigurationException(
- "Failed to find a KDC for the specified realm - " + e.getMessage() + "\n" +
- "Update the KDC settings in krb5-conf and kerberos-env configurations to correct this issue.",
- e);
- } catch (KerberosLDAPContainerException e) {
- throw new KerberosInvalidConfigurationException(
- "The principal container was not specified\n" +
- "Set the 'container_dn' value in the kerberos-env configuration to correct this issue.",
- e);
- } catch (KerberosOperationException e) {
- throw new AmbariException(e.getMessage(), e);
+ // Iterate over the hosts in the cluster to find the components installed in each. For each
+ // component (aka service component host - sch) determine the configuration updates and
+ // and the principals an keytabs to create.
+ for (Host host : hosts.values()) {
+ String hostname = host.getHostName();
+
+ // Get a list of components on the current host
+ List<ServiceComponentHost> serviceComponentHosts = cluster.getServiceComponentHosts(hostname);
+
+ if ((serviceComponentHosts != null) && !serviceComponentHosts.isEmpty()) {
+ // Calculate the current host-specific configurations. These will be used to replace
+ // variables within the Kerberos descriptor data
+ Map<String, Map<String, String>> configurations = calculateConfigurations(cluster, hostname, kerberosDescriptorProperties);
+
+ // Set the unique service check identifier
+ configurations.get("").put("service_check_id", serviceCheckId);
+
+ // Iterate over the components installed on the current host to get the service and
+ // component-level Kerberos descriptors in order to determine which principals,
+ // keytab files, and configurations need to be created or updated.
+ for (ServiceComponentHost sch : serviceComponentHosts) {
+ String serviceName = sch.getServiceName();
+ String componentName = sch.getServiceComponentName();
+
+ // If the current ServiceComponentHost represents the KERBEROS/KERBEROS_CLIENT and
+ // indicates that the KERBEROS_CLIENT component is in the INSTALLED state, add the
+ // current host to the set of hosts that should be handled...
+ if(Service.Type.KERBEROS.name().equals(serviceName) &&
+ Role.KERBEROS_CLIENT.name().equals(componentName) &&
+ (sch.getState() == State.INSTALLED)) {
+ hostsWithValidKerberosClient.add(hostname);
+
+ int identitiesAdded = 0;
+
+ // Lazily create the KerberosActionDataFileBuilder instance...
+ if (kerberosActionDataFileBuilder == null) {
+ kerberosActionDataFileBuilder = new KerberosActionDataFileBuilder(indexFile);
+ }
+
+ // Add service-level principals (and keytabs)
+ identitiesAdded += addIdentities(kerberosActionDataFileBuilder, Collections.singleton(identity),
+ null, hostname, serviceName, componentName, configurations);
+
+ if (identitiesAdded > 0) {
+ // Add the relevant principal name and keytab file data to the command params state
+ if(!commandParameters.containsKey("principal_name") || !commandParameters.containsKey("keytab_file")) {
+ commandParameters.put("principal_name",
+ KerberosDescriptor.replaceVariables(identity.getPrincipalDescriptor().getValue(), configurations));
+ commandParameters.put("keytab_file",
+ KerberosDescriptor.replaceVariables(identity.getKeytabDescriptor().getFile(), configurations));
+ }
+
+ serviceComponentHostsToProcess.add(sch);
+ }
+ }
+ }
+ }
+ }
+ } catch (IOException e) {
+ String message = String.format("Failed to write index file - %s", indexFile.getAbsolutePath());
+ LOG.error(message);
+ throw new AmbariException(message, e);
} finally {
+ if (kerberosActionDataFileBuilder != null) {
+ // Make sure the data file is closed
+ try {
+ kerberosActionDataFileBuilder.close();
+ } catch (IOException e) {
+ LOG.warn("Failed to close the index file writer", e);
+ }
+ }
+ }
+
+ // If there are ServiceComponentHosts to process, make sure the administrator credentials
+ // are available
+ if (!serviceComponentHostsToProcess.isEmpty()) {
try {
- operationHandler.close();
+ validateKDCCredentials(cluster);
} catch (KerberosOperationException e) {
- // Ignore this...
+ try {
+ FileUtils.deleteDirectory(dataDirectory);
+ } catch (Throwable t) {
+ LOG.warn(String.format("The data directory (%s) was not deleted due to an error condition - {%s}",
+ dataDirectory.getAbsolutePath(), t.getMessage()), t);
+ }
+
+ throw e;
}
}
- // need to throw this outside of the try/catch so it isn't caught
- if (missingCredentials) {
- throw new KerberosMissingAdminCredentialsException(
- "Invalid KDC administrator credentials.\n" +
- "The KDC administrator credentials must be set in session by updating the relevant Cluster resource." +
- "This may be done by issuing a PUT to the api/v1/clusters/(cluster name) API entry point with the following payload:\n" +
- "{\n" +
- " \"session_attributes\" : {\n" +
- " \"kerberos_admin\" : {\"principal\" : \"(PRINCIPAL)\", \"password\" : \"(PASSWORD)\"}\n" +
- " }\n" +
- "}"
- );
+ // Always set up the necessary stages to perform the tasks needed to complete the operation.
+ // Some stages may be no-ops, this is expected.
+ // Gather data needed to create stages and tasks...
+ Map<String, Set<String>> clusterHostInfo = StageUtils.getClusterHostInfo(hosts, cluster);
+ String clusterHostInfoJson = StageUtils.getGson().toJson(clusterHostInfo);
+ Map<String, String> hostParams = customCommandExecutionHelper.createDefaultHostParams(cluster);
+ String hostParamsJson = StageUtils.getGson().toJson(hostParams);
+ String ambariServerHostname = StageUtils.getHostName();
+ ServiceComponentHostServerActionEvent event = new ServiceComponentHostServerActionEvent(
+ "AMBARI_SERVER",
+ ambariServerHostname, // TODO: Choose a random hostname from the cluster. All tasks for the AMBARI_SERVER service will be executed on this Ambari server
+ System.currentTimeMillis());
+ RoleCommandOrder roleCommandOrder = ambariManagementController.getRoleCommandOrder(cluster);
+
+ // If a RequestStageContainer does not already exist, create a new one...
+ if (requestStageContainer == null) {
+ requestStageContainer = new RequestStageContainer(
+ actionManager.getNextRequestId(),
+ null,
+ requestFactory,
+ actionManager);
}
+
+ // Use the handler implementation to setup the relevant stages.
+ handler.createStages(cluster, hosts, Collections.<String, Map<String, String>>emptyMap(),
+ clusterHostInfoJson, hostParamsJson, event, roleCommandOrder, kerberosDetails,
+ dataDirectory, requestStageContainer, serviceComponentHostsToProcess, hostsWithValidKerberosClient);
+
+ handler.addFinalizeOperationStage(cluster, clusterHostInfoJson, hostParamsJson, event,
+ dataDirectory, roleCommandOrder, requestStageContainer);
}
}
+
+ return requestStageContainer;
}
+
/**
* Gathers the Kerberos-related data from configurations and stores it in a new KerberosDetails
* instance.
@@ -1028,41 +1259,47 @@ public class KerberosHelper {
// -------------------------------
}
-
/**
* Creates a temporary directory within the system temporary directory
* <p/>
* The resulting directory is to be removed by the caller when desired.
*
* @return a File pointing to the new temporary directory, or null if one was not created
- * @throws java.io.IOException if a new temporary directory cannot be created
+ * @throws AmbariException if a new temporary directory cannot be created
*/
- private File createTemporaryDirectory() throws IOException {
+ private File createTemporaryDirectory() throws AmbariException {
String tempDirectoryPath = System.getProperty("java.io.tmpdir");
- if (tempDirectoryPath == null) {
- throw new IOException("The System property 'java.io.tmpdir' does not specify a temporary directory");
- }
+ try {
+ if (tempDirectoryPath == null) {
+ throw new IOException("The System property 'java.io.tmpdir' does not specify a temporary directory");
+ }
- File directory;
- int tries = 0;
- long now = System.currentTimeMillis();
+ File directory;
+ int tries = 0;
+ long now = System.currentTimeMillis();
- do {
- directory = new File(tempDirectoryPath, String.format("%s%d-%d.d",
- KerberosServerAction.DATA_DIRECTORY_PREFIX, now, tries));
+ do {
+ directory = new File(tempDirectoryPath, String.format("%s%d-%d.d",
+ KerberosServerAction.DATA_DIRECTORY_PREFIX, now, tries));
- if ((directory.exists()) || !directory.mkdirs()) {
- directory = null; // Rest and try again...
- } else {
- LOG.debug("Created temporary directory: {}", directory.getAbsolutePath());
+ if ((directory.exists()) || !directory.mkdirs()) {
+ directory = null; // Rest and try again...
+ } else {
+ LOG.debug("Created temporary directory: {}", directory.getAbsolutePath());
+ }
+ } while ((directory == null) && (++tries < 100));
+
+ if (directory == null) {
+ throw new IOException(String.format("Failed to create a temporary directory in %s", tempDirectoryPath));
}
- } while ((directory == null) && (++tries < 100));
- if (directory == null) {
- throw new IOException(String.format("Failed to create a temporary directory in %s", tempDirectoryPath));
+ return directory;
+ }
+ catch (IOException e) {
+ String message = "Failed to create the temporary data directory.";
+ LOG.error(message, e);
+ throw new AmbariException(message, e);
}
-
- return directory;
}
/**
@@ -1420,6 +1657,60 @@ public class KerberosHelper {
}
/**
+ * Using the session data from the relevant Cluster object, gets the previously stored
+ * Kerberos service check identifier value or creates a new one if indicated to do so.
+ * <p/>
+ * This value is used intended to be used by the KerberosHelper to manage uniquely crated
+ * principals for use in service checks.
+ *
+ * @param cluster the relevant Cluster
+ * @return the previously stored Kerberos service check identifier value, or null if
+ * not previously stored
+ */
+ private String getKerberosServiceCheckIdentifier(Cluster cluster, boolean createIfNull) {
+ Map<String, Object> sessionAttributes = cluster.getSessionAttributes();
+ Object value = (sessionAttributes == null) ? null : sessionAttributes.get(SERVICE_CHECK_IDENTIFIER);
+ String serviceCheckIdentifier = (value instanceof String) ? (String) value : null;
+
+ if ((serviceCheckIdentifier == null) && createIfNull) {
+ // Create a new (ideally) unique(ish) identifier
+ Random random = new Random(System.currentTimeMillis());
+ char[] chars = new char[8];
+
+ for (int i = 0; i < 8; i++) {
+ chars[i] = (char) ((int) 'a' + random.nextInt(26));
+ }
+
+ serviceCheckIdentifier = String.valueOf(chars);
+ setKerberosServiceCheckIdentifier(cluster, serviceCheckIdentifier);
+ }
+
+ return serviceCheckIdentifier;
+ }
+
+ /**
+ * Stores the Kerberos service check identifier value into the session data from the
+ * relevant Cluster object.
+ * <p/>
+ * This value is used intended to be used by the KerberosHelper to manage uniquely crated
+ * principals for use in service checks.
+ *
+ * @param cluster the relevant Cluster
+ * @param value the Kerberos service check identifier to store or null to clear any previously set value
+ */
+ private void setKerberosServiceCheckIdentifier(Cluster cluster, String value) {
+ Map<String, Object> sessionAttributes = cluster.getSessionAttributes();
+
+ if (sessionAttributes != null) {
+ if (value == null) {
+ sessionAttributes.remove(SERVICE_CHECK_IDENTIFIER);
+ } else {
+ sessionAttributes.put(SERVICE_CHECK_IDENTIFIER, value);
+ }
+ }
+ }
+
+ /**
* Given a Collection of ServiceComponentHosts generates a unique list of hosts.
*
* @param serviceComponentHosts a Collection of ServiceComponentHosts from which to to retrieve host names
@@ -1707,17 +1998,6 @@ public class KerberosHelper {
Set<String> hostsWithValidKerberosClient)
throws AmbariException {
- Iterator<ServiceComponentHost> iterator = new HashSet<ServiceComponentHost>(serviceComponentHosts).iterator();
- //Filter out ServiceComponentHosts not ready for processing from serviceComponentHostsToProcess
- // by pruning off the ones that on hosts that are not in hostsWithValidKerberosClient
- while(iterator.hasNext()) {
- ServiceComponentHost sch = iterator.next();
-
- if(!hostsWithValidKerberosClient.contains(sch.getHostName())) {
- iterator.remove();
- }
- }
-
Stage stage = createNewStage(requestStageContainer.getLastStageId(),
cluster,
requestStageContainer.getId(),
@@ -1726,8 +2006,11 @@ public class KerberosHelper {
StageUtils.getGson().toJson(commandParameters),
hostParamsJson);
- if (!serviceComponentHosts.isEmpty()) {
- List<String> hostsToUpdate = createUniqueHostList(serviceComponentHosts, Collections.singleton(HostState.HEALTHY));
+ Collection<ServiceComponentHost> filteredComponents = filterServiceComponentHostsForHosts(
+ new ArrayList<ServiceComponentHost>(serviceComponentHosts), hostsWithValidKerberosClient);
+
+ if (!filteredComponents.isEmpty()) {
+ List<String> hostsToUpdate = createUniqueHostList(filteredComponents, Collections.singleton(HostState.HEALTHY));
Map<String, String> requestParams = new HashMap<String, String>();
List<RequestResourceFilter> requestResourceFilters = new ArrayList<RequestResourceFilter>();
RequestResourceFilter reqResFilter = new RequestResourceFilter(Service.Type.KERBEROS.name(), Role.KERBEROS_CLIENT.name(), hostsToUpdate);
@@ -1746,12 +2029,40 @@ public class KerberosHelper {
requestStageContainer.addStages(roleGraph.getStages());
}
+ /**
+ * Filter out ServiceComponentHosts that are on on hosts in the specified set of host names.
+ * <p/>
+ * It is expected that the supplied collection is modifiable. It will be modified inplace.
+ *
+ * @param serviceComponentHosts a collection of ServiceComponentHost items to test
+ * @param hosts a set of host names indicating valid hosts
+ * @return a collection of filtered ServiceComponentHost items
+ */
+ private Collection<ServiceComponentHost> filterServiceComponentHostsForHosts(Collection<ServiceComponentHost> serviceComponentHosts,
+ Set<String> hosts) {
+
+ if ((serviceComponentHosts != null) && (hosts != null)) {
+ Iterator<ServiceComponentHost> iterator = serviceComponentHosts.iterator();
+ while (iterator.hasNext()) {
+ ServiceComponentHost sch = iterator.next();
+
+ if (!hosts.contains(sch.getHostName())) {
+ iterator.remove();
+ }
+ }
+ }
+
+ return serviceComponentHosts;
+ }
+
public void addDeleteKeytabFilesStage(Cluster cluster, List<ServiceComponentHost> serviceComponentHosts,
- String clusterHostInfoJson, String hostParamsJson,
- Map<String, String> commandParameters,
- RoleCommandOrder roleCommandOrder,
- RequestStageContainer requestStageContainer)
+ String clusterHostInfoJson, String hostParamsJson,
+ Map<String, String> commandParameters,
+ RoleCommandOrder roleCommandOrder,
+ RequestStageContainer requestStageContainer,
+ Set<String> hostsWithValidKerberosClient)
throws AmbariException {
+
Stage stage = createNewStage(requestStageContainer.getLastStageId(),
cluster,
requestStageContainer.getId(),
@@ -1760,8 +2071,11 @@ public class KerberosHelper {
StageUtils.getGson().toJson(commandParameters),
hostParamsJson);
- if (!serviceComponentHosts.isEmpty()) {
- List<String> hostsToUpdate = createUniqueHostList(serviceComponentHosts, Collections.singleton(HostState.HEALTHY));
+ Collection<ServiceComponentHost> filteredComponents = filterServiceComponentHostsForHosts(
+ new ArrayList<ServiceComponentHost>(serviceComponentHosts), hostsWithValidKerberosClient);
+
+ if (!filteredComponents.isEmpty()) {
+ List<String> hostsToUpdate = createUniqueHostList(filteredComponents, Collections.singleton(HostState.HEALTHY));
Map<String, String> requestParams = new HashMap<String, String>();
List<RequestResourceFilter> requestResourceFilters = new ArrayList<RequestResourceFilter>();
RequestResourceFilter reqResFilter = new RequestResourceFilter("KERBEROS", "KERBEROS_CLIENT", hostsToUpdate);
@@ -1802,6 +2116,34 @@ public class KerberosHelper {
roleGraph.build(stage);
requestStageContainer.addStages(roleGraph.getStages());
}
+
+ public void addFinalizeOperationStage(Cluster cluster, String clusterHostInfoJson,
+ String hostParamsJson, ServiceComponentHostServerActionEvent event,
+ File dataDirectory,
+ RoleCommandOrder roleCommandOrder, RequestStageContainer requestStageContainer)
+ throws AmbariException {
+
+ // Add the cleanup stage...
+ Map<String, String> commandParameters = new HashMap<String, String>();
+ commandParameters.put(KerberosServerAction.AUTHENTICATED_USER_NAME, ambariManagementController.getAuthName());
+ commandParameters.put(KerberosServerAction.DATA_DIRECTORY, dataDirectory.getAbsolutePath());
+
+ Stage stage = createServerActionStage(requestStageContainer.getLastStageId(),
+ cluster,
+ requestStageContainer.getId(),
+ "Finalize Operations",
+ clusterHostInfoJson,
+ "{}",
+ hostParamsJson,
+ FinalizeKerberosServerAction.class,
+ event,
+ commandParameters,
+ "Finalize Operations", 300);
+
+ RoleGraph roleGraph = new RoleGraph(roleCommandOrder);
+ roleGraph.build(stage);
+ requestStageContainer.addStages(roleGraph.getStages());
+ }
}
/**
@@ -2082,7 +2424,7 @@ public class KerberosHelper {
// *****************************************************************
// Create stage to delete keytabs
addDeleteKeytabFilesStage(cluster, serviceComponentHosts, clusterHostInfoJson,
- hostParamsJson, commandParameters, roleCommandOrder, requestStageContainer);
+ hostParamsJson, commandParameters, roleCommandOrder, requestStageContainer, hostsWithValidKerberosClient);
return requestStageContainer.getLastStageId();
}
@@ -2199,7 +2541,6 @@ public class KerberosHelper {
* <li>remove keytab files</li>
* </ol>
*/
-
private class DeletePrincipalsAndKeytabsHandler extends Handler {
@Override
@@ -2260,14 +2601,12 @@ public class KerberosHelper {
// *****************************************************************
// Create stage to delete keytabs
addDeleteKeytabFilesStage(cluster, serviceComponentHosts, clusterHostInfoJson,
- hostParamsJson, commandParameters, roleCommandOrder, requestStageContainer);
+ hostParamsJson, commandParameters, roleCommandOrder, requestStageContainer, hostsWithValidKerberosClient);
return requestStageContainer.getLastStageId();
}
}
-
-
/**
* KerberosDetails is a helper class to hold the details of the relevant Kerberos-specific
* configurations so they may be passed around more easily.
@@ -2310,5 +2649,4 @@ public class KerberosHelper {
return securityType;
}
}
-
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/954b96e1/ambari-server/src/main/resources/common-services/KERBEROS/1.10.3-10/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KERBEROS/1.10.3-10/package/scripts/params.py b/ambari-server/src/main/resources/common-services/KERBEROS/1.10.3-10/package/scripts/params.py
index 3705cfe..3ccbc3e 100644
--- a/ambari-server/src/main/resources/common-services/KERBEROS/1.10.3-10/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/KERBEROS/1.10.3-10/package/scripts/params.py
@@ -40,6 +40,7 @@ kadm5_acl_file = 'kadm5.acl'
kadm5_acl_path = kadm5_acl_dir + '/' + kadm5_acl_file
config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
command_params = None
configurations = None
@@ -68,6 +69,8 @@ if config is not None:
command_params = get_property_value(config, 'commandParams')
if command_params is not None:
keytab_details = get_unstructured_data(command_params, 'keytab')
+ smoke_test_principal = get_property_value(command_params, 'principal_name', None, True, None)
+ smoke_test_keytab_file = get_property_value(command_params, 'keytab_file', None, True, None)
kerberos_command_params = get_property_value(config, 'kerberosCommandParams')
@@ -76,8 +79,10 @@ if config is not None:
cluster_env = get_property_value(configurations, 'cluster-env')
if cluster_env is not None:
- smoke_test_principal = get_property_value(cluster_env, 'smokeuser_principal_name', None, True, None)
- smoke_test_keytab_file = get_property_value(cluster_env, 'smokeuser_keytab', None, True, None)
+ if smoke_test_principal is None:
+ smoke_test_principal = get_property_value(cluster_env, 'smokeuser_principal_name', None, True, None)
+ if smoke_test_keytab_file is None:
+ smoke_test_keytab_file = get_property_value(cluster_env, 'smokeuser_keytab', None, True, None)
default_group = get_property_value(cluster_env, 'user_group')
http://git-wip-us.apache.org/repos/asf/ambari/blob/954b96e1/ambari-server/src/main/resources/common-services/KERBEROS/1.10.3-10/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KERBEROS/1.10.3-10/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/KERBEROS/1.10.3-10/package/scripts/service_check.py
index ee4a4c3..53ab52f 100644
--- a/ambari-server/src/main/resources/common-services/KERBEROS/1.10.3-10/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/KERBEROS/1.10.3-10/package/scripts/service_check.py
@@ -22,36 +22,40 @@ Ambari Agent
from kerberos_common import *
from resource_management import *
+# hashlib is supplied as of Python 2.5 as the replacement interface for md5
+# and other secure hashes. In 2.6, md5 is deprecated. Import hashlib if
+# available, avoiding a deprecation warning under 2.6. Import md5 otherwise,
+# preserving 2.4 compatibility.
+try:
+ import hashlib
+ _md5 = hashlib.md5
+except ImportError:
+ import md5
+ _md5 = md5.new
+
class KerberosServiceCheck(KerberosScript):
def service_check(self, env):
import params
- if params.smoke_test_principal is None:
- params.smoke_test_principal = params.smoke_user
-
- # First attempt to test using the smoke test user, if data is available
if ((params.smoke_test_principal is not None) and
(params.smoke_test_keytab_file is not None) and
os.path.isfile(params.smoke_test_keytab_file)):
- print "Performing kinit using smoke test user: %s" % params.smoke_test_principal
- code, out = self.test_kinit({
- 'principal': params.smoke_test_principal,
- 'keytab_file': params.smoke_test_keytab_file
- }, user=params.smoke_user)
- test_performed = True
+ print "Performing kinit using %s" % params.smoke_test_principal
+ ccache_file_name = _md5("{0}|{1}".format(params.smoke_test_principal,params.smoke_test_keytab_file)).hexdigest()
+ ccache_file_path = "{0}{1}kerberos_service_check_cc_{2}".format(params.tmp_dir, os.sep, ccache_file_name)
+
+ kinit_path_local = functions.get_kinit_path()
+ kinit_command = "{0} -c {1} -kt {2} {3}".format(kinit_path_local, ccache_file_path, params.smoke_test_keytab_file, params.smoke_test_principal)
+
+ try:
+ # kinit
+ Execute(kinit_command)
+ finally:
+ os.remove(ccache_file_path)
else:
- code = -1
- out = 'No principal or keytab found'
- test_performed = False
-
- if test_performed:
- if code == 0:
- print "Test executed successfully."
- else:
- print "Test failed with error code %d: %s." % (code, out)
- else:
- print "Test not performed - no test principal was available"
+ err_msg = Logger.filter_text("Failed to execute kinit test due to principal or keytab not found or available")
+ raise Fail(err_msg)
if __name__ == "__main__":
KerberosServiceCheck().execute()
http://git-wip-us.apache.org/repos/asf/ambari/blob/954b96e1/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
index e16f22f..8e1c0e8 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
@@ -358,6 +358,16 @@ public class KerberosHelperTest extends EasyMockSupport {
testDisableKerberos(new KerberosCredential("principal", "password", "keytab"), false, true);
}
+ @Test
+ public void testCreateTestIdentity() throws Exception {
+ testCreateTestIdentity(new KerberosCredential("principal", "password", "keytab"));
+ }
+
+ @Test
+ public void testDeleteTestIdentity() throws Exception {
+ testDeleteTestIdentity(new KerberosCredential("principal", "password", "keytab"));
+ }
+
private void testEnableKerberos(final KerberosCredential kerberosCredential,
boolean getClusterDescriptor,
boolean getStackDescriptor) throws Exception {
@@ -1835,4 +1845,387 @@ public class KerberosHelperTest extends EasyMockSupport {
verifyAll();
}
+
+ private void testCreateTestIdentity(final KerberosCredential kerberosCredential) throws Exception {
+ KerberosHelper kerberosHelper = injector.getInstance(KerberosHelper.class);
+
+ final ServiceComponentHost schKerberosClient = createMock(ServiceComponentHost.class);
+ expect(schKerberosClient.getServiceName()).andReturn(Service.Type.KERBEROS.name()).anyTimes();
+ expect(schKerberosClient.getServiceComponentName()).andReturn(Role.KERBEROS_CLIENT.name()).anyTimes();
+ expect(schKerberosClient.getHostName()).andReturn("host1").anyTimes();
+ expect(schKerberosClient.getState()).andReturn(State.INSTALLED).anyTimes();
+
+ final ServiceComponentHost sch1 = createMock(ServiceComponentHost.class);
+ expect(sch1.getServiceName()).andReturn("SERVICE1").anyTimes();
+ expect(sch1.getServiceComponentName()).andReturn("COMPONENT1").anyTimes();
+ expect(sch1.getHostName()).andReturn("host1").anyTimes();
+
+ final ServiceComponentHost sch2 = createStrictMock(ServiceComponentHost.class);
+ expect(sch2.getServiceName()).andReturn("SERVICE2").anyTimes();
+ expect(sch2.getServiceComponentName()).andReturn("COMPONENT3").anyTimes();
+
+ final ServiceComponentHost sch3 = createStrictMock(ServiceComponentHost.class);
+ expect(sch3.getServiceName()).andReturn("SERVICE3").anyTimes();
+ expect(sch3.getServiceComponentName()).andReturn("COMPONENT3").anyTimes();
+ expect(sch3.getHostName()).andReturn("host1").anyTimes();
+
+ final Host host = createNiceMock(Host.class);
+ expect(host.getHostName()).andReturn("host1").anyTimes();
+ expect(host.getState()).andReturn(HostState.HEALTHY).anyTimes();
+
+ final ServiceComponent serviceComponentKerberosClient = createNiceMock(ServiceComponent.class);
+ expect(serviceComponentKerberosClient.getName()).andReturn(Role.KERBEROS_CLIENT.name()).anyTimes();
+ expect(serviceComponentKerberosClient.getServiceComponentHosts()).andReturn(Collections.singletonMap("host1", schKerberosClient)).anyTimes();
+
+ final Service serviceKerberos = createStrictMock(Service.class);
+ expect(serviceKerberos.getName()).andReturn(Service.Type.KERBEROS.name()).anyTimes();
+ expect(serviceKerberos.getServiceComponents())
+ .andReturn(Collections.singletonMap(Role.KERBEROS_CLIENT.name(), serviceComponentKerberosClient))
+ .times(2);
+
+ final Service service1 = createStrictMock(Service.class);
+ expect(service1.getName()).andReturn("SERVICE1").anyTimes();
+ expect(service1.getServiceComponents())
+ .andReturn(Collections.<String, ServiceComponent>emptyMap())
+ .times(2);
+
+ final Service service2 = createStrictMock(Service.class);
+ expect(service2.getName()).andReturn("SERVICE2").anyTimes();
+ expect(service2.getServiceComponents())
+ .andReturn(Collections.<String, ServiceComponent>emptyMap())
+ .times(2);
+
+ final Map<String, String> kerberosEnvProperties = createNiceMock(Map.class);
+ expect(kerberosEnvProperties.get("kdc_type")).andReturn("mit-kdc").anyTimes();
+ expect(kerberosEnvProperties.get("realm")).andReturn("FOOBAR.COM").anyTimes();
+
+ final Config kerberosEnvConfig = createNiceMock(Config.class);
+ expect(kerberosEnvConfig.getProperties()).andReturn(kerberosEnvProperties).anyTimes();
+
+ final Map<String, String> krb5ConfProperties = createNiceMock(Map.class);
+
+ final Config krb5ConfConfig = createNiceMock(Config.class);
+ expect(krb5ConfConfig.getProperties()).andReturn(krb5ConfProperties).anyTimes();
+
+ final Cluster cluster = createNiceMock(Cluster.class);
+ expect(cluster.getDesiredConfigByType("krb5-conf")).andReturn(krb5ConfConfig).anyTimes();
+ expect(cluster.getDesiredConfigByType("kerberos-env")).andReturn(kerberosEnvConfig).anyTimes();
+ expect(cluster.getClusterName()).andReturn("c1").anyTimes();
+ expect(cluster.getServices())
+ .andReturn(new HashMap<String, Service>() {
+ {
+ put(Service.Type.KERBEROS.name(), serviceKerberos);
+ put("SERVICE1", service1);
+ put("SERVICE2", service2);
+ }
+ })
+ .anyTimes();
+ expect(cluster.getServiceComponentHosts("host1"))
+ .andReturn(new ArrayList<ServiceComponentHost>() {
+ {
+ add(sch1);
+ add(sch2);
+ add(sch3);
+ add(schKerberosClient);
+ }
+ })
+ .once();
+ expect(cluster.getCurrentStackVersion())
+ .andReturn(new StackId("HDP", "2.2"))
+ .anyTimes();
+ expect(cluster.getSessionAttributes()).andReturn(new HashMap<String, Object>() {{
+ if (kerberosCredential != null) {
+ put("kerberos_admin/" + KerberosCredential.KEY_NAME_PRINCIPAL, kerberosCredential.getPrincipal());
+ put("kerberos_admin/" + KerberosCredential.KEY_NAME_PASSWORD, kerberosCredential.getPassword());
+ put("kerberos_admin/" + KerberosCredential.KEY_NAME_KEYTAB, kerberosCredential.getKeytab());
+ }
+ }}).anyTimes();
+
+ final Clusters clusters = injector.getInstance(Clusters.class);
+ expect(clusters.getHostsForCluster("c1"))
+ .andReturn(new HashMap<String, Host>() {
+ {
+ put("host1", host);
+ }
+ })
+ .once();
+ expect(clusters.getHost("host1"))
+ .andReturn(host)
+ .once();
+
+ final AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
+ expect(ambariManagementController.findConfigurationTagsWithOverrides(cluster, "host1"))
+ .andReturn(Collections.<String, Map<String, String>>emptyMap())
+ .once();
+ expect(ambariManagementController.findConfigurationTagsWithOverrides(cluster, null))
+ .andReturn(Collections.<String, Map<String, String>>emptyMap())
+ .once();
+ expect(ambariManagementController.getRoleCommandOrder(cluster))
+ .andReturn(createNiceMock(RoleCommandOrder.class))
+ .once();
+
+ final ConfigHelper configHelper = injector.getInstance(ConfigHelper.class);
+ expect(configHelper.getEffectiveConfigProperties(anyObject(Cluster.class), anyObject(Map.class)))
+ .andReturn(new HashMap<String, Map<String, String>>() {
+ {
+ put("cluster-env", new HashMap<String, String>() {{
+ put("kerberos_domain", "FOOBAR.COM");
+ }});
+ }
+ })
+ .times(1);
+
+ final KerberosDescriptor kerberosDescriptor = createStrictMock(KerberosDescriptor.class);
+ expect(kerberosDescriptor.getProperties()).andReturn(null).once();
+
+ setupGetDescriptorFromCluster(kerberosDescriptor);
+
+ final StageFactory stageFactory = injector.getInstance(StageFactory.class);
+ expect(stageFactory.createNew(anyLong(), anyObject(String.class), anyObject(String.class),
+ anyLong(), anyObject(String.class), anyObject(String.class), anyObject(String.class),
+ anyObject(String.class)))
+ .andAnswer(new IAnswer<Stage>() {
+ @Override
+ public Stage answer() throws Throwable {
+ Stage stage = createNiceMock(Stage.class);
+
+ expect(stage.getHostRoleCommands())
+ .andReturn(Collections.<String, Map<String, HostRoleCommand>>emptyMap())
+ .anyTimes();
+ replay(stage);
+ return stage;
+ }
+ })
+ .anyTimes();
+
+ // This is a STRICT mock to help ensure that the end result is what we want.
+ final RequestStageContainer requestStageContainer = createStrictMock(RequestStageContainer.class);
+ // Create Principals Stage
+ expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
+ expect(requestStageContainer.getId()).andReturn(1L).once();
+ requestStageContainer.addStages(anyObject(List.class));
+ expectLastCall().once();
+ // Create Keytabs Stage
+ expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
+ expect(requestStageContainer.getId()).andReturn(1L).once();
+ requestStageContainer.addStages(anyObject(List.class));
+ expectLastCall().once();
+ // Distribute Keytabs Stage
+ expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
+ expect(requestStageContainer.getId()).andReturn(1L).once();
+ requestStageContainer.addStages(anyObject(List.class));
+ expectLastCall().once();
+ // Clean-up/Finalize Stage
+ expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
+ expect(requestStageContainer.getId()).andReturn(1L).once();
+ requestStageContainer.addStages(anyObject(List.class));
+ expectLastCall().once();
+
+ replayAll();
+
+ // Needed by infrastructure
+ injector.getInstance(AmbariMetaInfo.class).init();
+
+ Map<String, String> commandParamsStage = new HashMap<String, String>();
+ kerberosHelper.createTestIdentity(cluster, commandParamsStage, requestStageContainer);
+
+ verifyAll();
+
+ String serviceCheckID = (String)cluster.getSessionAttributes().get("_kerberos_internal_service_check_identifier");
+ Assert.assertNotNull(serviceCheckID);
+
+ Assert.assertTrue(commandParamsStage.containsKey("principal_name"));
+ Assert.assertEquals("${cluster-env/smokeuser}_" + serviceCheckID + "@${realm}", commandParamsStage.get("principal_name"));
+
+ Assert.assertTrue(commandParamsStage.containsKey("keytab_file"));
+ Assert.assertEquals("${keytab_dir}/kerberos.service_check." + serviceCheckID + ".keytab", commandParamsStage.get("keytab_file"));
+ }
+
+ private void testDeleteTestIdentity(final KerberosCredential kerberosCredential) throws Exception {
+ KerberosHelper kerberosHelper = injector.getInstance(KerberosHelper.class);
+
+ final ServiceComponentHost schKerberosClient = createMock(ServiceComponentHost.class);
+ expect(schKerberosClient.getServiceName()).andReturn(Service.Type.KERBEROS.name()).anyTimes();
+ expect(schKerberosClient.getServiceComponentName()).andReturn(Role.KERBEROS_CLIENT.name()).anyTimes();
+ expect(schKerberosClient.getHostName()).andReturn("host1").anyTimes();
+ expect(schKerberosClient.getState()).andReturn(State.INSTALLED).anyTimes();
+
+ final ServiceComponentHost sch1 = createMock(ServiceComponentHost.class);
+ expect(sch1.getServiceName()).andReturn("SERVICE1").anyTimes();
+ expect(sch1.getServiceComponentName()).andReturn("COMPONENT1").anyTimes();
+ expect(sch1.getHostName()).andReturn("host1").anyTimes();
+
+ final ServiceComponentHost sch2 = createStrictMock(ServiceComponentHost.class);
+ expect(sch2.getServiceName()).andReturn("SERVICE2").anyTimes();
+ expect(sch2.getServiceComponentName()).andReturn("COMPONENT3").anyTimes();
+
+ final ServiceComponentHost sch3 = createStrictMock(ServiceComponentHost.class);
+ expect(sch3.getServiceName()).andReturn("SERVICE3").anyTimes();
+ expect(sch3.getServiceComponentName()).andReturn("COMPONENT3").anyTimes();
+ expect(sch3.getHostName()).andReturn("host1").anyTimes();
+
+ final Host host = createNiceMock(Host.class);
+ expect(host.getHostName()).andReturn("host1").anyTimes();
+ expect(host.getState()).andReturn(HostState.HEALTHY).anyTimes();
+
+ final ServiceComponent serviceComponentKerberosClient = createNiceMock(ServiceComponent.class);
+ expect(serviceComponentKerberosClient.getName()).andReturn(Role.KERBEROS_CLIENT.name()).anyTimes();
+ expect(serviceComponentKerberosClient.getServiceComponentHosts()).andReturn(Collections.singletonMap("host1", schKerberosClient)).anyTimes();
+
+ final Service serviceKerberos = createStrictMock(Service.class);
+ expect(serviceKerberos.getName()).andReturn(Service.Type.KERBEROS.name()).anyTimes();
+ expect(serviceKerberos.getServiceComponents())
+ .andReturn(Collections.singletonMap(Role.KERBEROS_CLIENT.name(), serviceComponentKerberosClient))
+ .times(2);
+
+ final Service service1 = createStrictMock(Service.class);
+ expect(service1.getName()).andReturn("SERVICE1").anyTimes();
+ expect(service1.getServiceComponents())
+ .andReturn(Collections.<String, ServiceComponent>emptyMap())
+ .times(2);
+
+ final Service service2 = createStrictMock(Service.class);
+ expect(service2.getName()).andReturn("SERVICE2").anyTimes();
+ expect(service2.getServiceComponents())
+ .andReturn(Collections.<String, ServiceComponent>emptyMap())
+ .times(2);
+
+ final Map<String, String> kerberosEnvProperties = createNiceMock(Map.class);
+ expect(kerberosEnvProperties.get("kdc_type")).andReturn("mit-kdc").anyTimes();
+ expect(kerberosEnvProperties.get("realm")).andReturn("FOOBAR.COM").anyTimes();
+
+ final Config kerberosEnvConfig = createNiceMock(Config.class);
+ expect(kerberosEnvConfig.getProperties()).andReturn(kerberosEnvProperties).anyTimes();
+
+ final Map<String, String> krb5ConfProperties = createNiceMock(Map.class);
+
+ final Config krb5ConfConfig = createNiceMock(Config.class);
+ expect(krb5ConfConfig.getProperties()).andReturn(krb5ConfProperties).anyTimes();
+
+ final Cluster cluster = createNiceMock(Cluster.class);
+ expect(cluster.getDesiredConfigByType("krb5-conf")).andReturn(krb5ConfConfig).anyTimes();
+ expect(cluster.getDesiredConfigByType("kerberos-env")).andReturn(kerberosEnvConfig).anyTimes();
+ expect(cluster.getClusterName()).andReturn("c1").anyTimes();
+ expect(cluster.getServices())
+ .andReturn(new HashMap<String, Service>() {
+ {
+ put(Service.Type.KERBEROS.name(), serviceKerberos);
+ put("SERVICE1", service1);
+ put("SERVICE2", service2);
+ }
+ })
+ .anyTimes();
+ expect(cluster.getServiceComponentHosts("host1"))
+ .andReturn(new ArrayList<ServiceComponentHost>() {
+ {
+ add(sch1);
+ add(sch2);
+ add(sch3);
+ add(schKerberosClient);
+ }
+ })
+ .once();
+ expect(cluster.getCurrentStackVersion())
+ .andReturn(new StackId("HDP", "2.2"))
+ .anyTimes();
+ expect(cluster.getSessionAttributes()).andReturn(new HashMap<String, Object>() {{
+ if (kerberosCredential != null) {
+ put("kerberos_admin/" + KerberosCredential.KEY_NAME_PRINCIPAL, kerberosCredential.getPrincipal());
+ put("kerberos_admin/" + KerberosCredential.KEY_NAME_PASSWORD, kerberosCredential.getPassword());
+ put("kerberos_admin/" + KerberosCredential.KEY_NAME_KEYTAB, kerberosCredential.getKeytab());
+ }
+ }}).anyTimes();
+
+ final Clusters clusters = injector.getInstance(Clusters.class);
+ expect(clusters.getHostsForCluster("c1"))
+ .andReturn(new HashMap<String, Host>() {
+ {
+ put("host1", host);
+ }
+ })
+ .once();
+ expect(clusters.getHost("host1"))
+ .andReturn(host)
+ .once();
+
+ final AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
+ expect(ambariManagementController.findConfigurationTagsWithOverrides(cluster, "host1"))
+ .andReturn(Collections.<String, Map<String, String>>emptyMap())
+ .once();
+ expect(ambariManagementController.findConfigurationTagsWithOverrides(cluster, null))
+ .andReturn(Collections.<String, Map<String, String>>emptyMap())
+ .once();
+ expect(ambariManagementController.getRoleCommandOrder(cluster))
+ .andReturn(createNiceMock(RoleCommandOrder.class))
+ .once();
+
+ final ConfigHelper configHelper = injector.getInstance(ConfigHelper.class);
+ expect(configHelper.getEffectiveConfigProperties(anyObject(Cluster.class), anyObject(Map.class)))
+ .andReturn(new HashMap<String, Map<String, String>>() {
+ {
+ put("cluster-env", new HashMap<String, String>() {{
+ put("kerberos_domain", "FOOBAR.COM");
+ }});
+ }
+ })
+ .times(1);
+
+ final KerberosDescriptor kerberosDescriptor = createStrictMock(KerberosDescriptor.class);
+ expect(kerberosDescriptor.getProperties()).andReturn(null).once();
+
+ setupGetDescriptorFromCluster(kerberosDescriptor);
+
+ final StageFactory stageFactory = injector.getInstance(StageFactory.class);
+ expect(stageFactory.createNew(anyLong(), anyObject(String.class), anyObject(String.class),
+ anyLong(), anyObject(String.class), anyObject(String.class), anyObject(String.class),
+ anyObject(String.class)))
+ .andAnswer(new IAnswer<Stage>() {
+ @Override
+ public Stage answer() throws Throwable {
+ Stage stage = createNiceMock(Stage.class);
+
+ expect(stage.getHostRoleCommands())
+ .andReturn(Collections.<String, Map<String, HostRoleCommand>>emptyMap())
+ .anyTimes();
+ replay(stage);
+ return stage;
+ }
+ })
+ .anyTimes();
+
+ // This is a STRICT mock to help ensure that the end result is what we want.
+ final RequestStageContainer requestStageContainer = createStrictMock(RequestStageContainer.class);
+ // Delete Principals Stage
+ expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
+ expect(requestStageContainer.getId()).andReturn(1L).once();
+ requestStageContainer.addStages(anyObject(List.class));
+ expectLastCall().once();
+ // Delete Keytabs Stage
+ expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
+ expect(requestStageContainer.getId()).andReturn(1L).once();
+ requestStageContainer.addStages(anyObject(List.class));
+ expectLastCall().once();
+ // Clean-up/Finalize Stage
+ expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
+ expect(requestStageContainer.getId()).andReturn(1L).once();
+ requestStageContainer.addStages(anyObject(List.class));
+ expectLastCall().once();
+
+ replayAll();
+
+ // Needed by infrastructure
+ injector.getInstance(AmbariMetaInfo.class).init();
+
+ String serviceCheckID = "some_random_value";
+
+ cluster.getSessionAttributes().put("_kerberos_internal_service_check_identifier", serviceCheckID);
+
+ Map<String, String> commandParamsStage = new HashMap<String, String>();
+ commandParamsStage.put("principal_name", "${cluster-env/smokeuser}_" + serviceCheckID + "@${realm}");
+ commandParamsStage.put("keytab_file", "${keytab_dir}/kerberos.service_check." + serviceCheckID + ".keytab");
+
+ kerberosHelper.deleteTestIdentity(cluster, commandParamsStage, requestStageContainer);
+
+ verifyAll();
+ }
}