You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by lp...@apache.org on 2017/09/05 09:41:25 UTC

[30/50] [abbrv] ambari git commit: AMBARI-21809. Pre-configure services during stack upgrade if Kerberos is enabled to reduce number of core service restarts when services are added (rlevas)

http://git-wip-us.apache.org/repos/asf/ambari/blob/5230d935/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/PreconfigureKerberosActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/PreconfigureKerberosActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/PreconfigureKerberosActionTest.java
new file mode 100644
index 0000000..a7bf33c
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/PreconfigureKerberosActionTest.java
@@ -0,0 +1,596 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.serveraction.upgrades;
+
+import static org.apache.ambari.server.serveraction.upgrades.PreconfigureKerberosAction.UPGRADE_DIRECTION_KEY;
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.anyString;
+import static org.easymock.EasyMock.capture;
+import static org.easymock.EasyMock.eq;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.expectLastCall;
+import static org.easymock.EasyMock.getCurrentArguments;
+import static org.easymock.EasyMock.newCapture;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileReader;
+import java.io.IOException;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import javax.persistence.EntityManager;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.actionmanager.ActionManager;
+import org.apache.ambari.server.actionmanager.HostRoleCommandFactory;
+import org.apache.ambari.server.actionmanager.HostRoleCommandFactoryImpl;
+import org.apache.ambari.server.actionmanager.RequestFactory;
+import org.apache.ambari.server.actionmanager.StageFactory;
+import org.apache.ambari.server.agent.ExecutionCommand;
+import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.api.services.stackadvisor.StackAdvisorHelper;
+import org.apache.ambari.server.api.services.stackadvisor.StackAdvisorRequest;
+import org.apache.ambari.server.api.services.stackadvisor.recommendations.RecommendationResponse;
+import org.apache.ambari.server.audit.AuditLogger;
+import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.controller.AmbariCustomCommandExecutionHelper;
+import org.apache.ambari.server.controller.AmbariManagementController;
+import org.apache.ambari.server.controller.KerberosHelper;
+import org.apache.ambari.server.controller.KerberosHelperImpl;
+import org.apache.ambari.server.metadata.CachedRoleCommandOrderProvider;
+import org.apache.ambari.server.metadata.RoleCommandOrderProvider;
+import org.apache.ambari.server.orm.DBAccessor;
+import org.apache.ambari.server.orm.dao.ArtifactDAO;
+import org.apache.ambari.server.orm.dao.HostRoleCommandDAO;
+import org.apache.ambari.server.orm.dao.KerberosPrincipalDAO;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
+import org.apache.ambari.server.orm.entities.UpgradeEntity;
+import org.apache.ambari.server.security.encryption.CredentialStoreService;
+import org.apache.ambari.server.stack.StackManagerFactory;
+import org.apache.ambari.server.stageplanner.RoleGraphFactory;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.ComponentInfo;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigFactory;
+import org.apache.ambari.server.state.ConfigHelper;
+import org.apache.ambari.server.state.Host;
+import org.apache.ambari.server.state.HostComponentAdminState;
+import org.apache.ambari.server.state.PropertyInfo;
+import org.apache.ambari.server.state.SecurityType;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.ServiceComponent;
+import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.ServiceInfo;
+import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.UpgradeContext;
+import org.apache.ambari.server.state.UpgradeContextFactory;
+import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosDescriptorFactory;
+import org.apache.ambari.server.state.scheduler.RequestExecutionFactory;
+import org.apache.ambari.server.state.stack.OsFamily;
+import org.apache.ambari.server.state.stack.upgrade.Direction;
+import org.apache.ambari.server.topology.PersistedState;
+import org.apache.ambari.server.topology.TopologyManager;
+import org.apache.ambari.server.topology.tasks.ConfigureClusterTaskFactory;
+import org.apache.ambari.server.utils.StageUtils;
+import org.apache.commons.collections.MapUtils;
+import org.easymock.Capture;
+import org.easymock.EasyMockSupport;
+import org.easymock.IAnswer;
+import org.junit.Assert;
+import org.junit.Test;
+
+import com.google.gson.Gson;
+import com.google.gson.reflect.TypeToken;
+import com.google.inject.AbstractModule;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+
+public class PreconfigureKerberosActionTest extends EasyMockSupport {
+
+  private static final String CLUSTER_NAME = "c1";
+
+  @Test
+  public void testSkipWhenDowngrade() throws Exception {
+
+    Injector injector = getInjector();
+
+    Map<String, String> commandParams = getDefaultCommandParams();
+    commandParams.put(UPGRADE_DIRECTION_KEY, Direction.DOWNGRADE.name());
+
+    ExecutionCommand executionCommand = createMockExecutionCommand(commandParams);
+
+    replayAll();
+
+    injector.getInstance(AmbariMetaInfo.class).init();
+
+    PreconfigureKerberosAction action = injector.getInstance(PreconfigureKerberosAction.class);
+    ConcurrentMap<String, Object> context = new ConcurrentHashMap<>();
+    action.setExecutionCommand(executionCommand);
+    action.execute(context);
+
+    verifyAll();
+  }
+
+  @Test
+  public void testSkipWhenNotKerberos() throws Exception {
+    Injector injector = getInjector();
+
+    ExecutionCommand executionCommand = createMockExecutionCommand(getDefaultCommandParams());
+
+    Cluster cluster = createMockCluster(SecurityType.NONE, Collections.<Host>emptyList(),
+        Collections.<String, Service>emptyMap(), Collections.<String, List<ServiceComponentHost>>emptyMap(),
+        createNiceMock(StackId.class), Collections.<String, Config>emptyMap());
+
+    Clusters clusters = injector.getInstance(Clusters.class);
+    expect(clusters.getCluster(CLUSTER_NAME)).andReturn(cluster).atLeastOnce();
+
+    replayAll();
+
+    injector.getInstance(AmbariMetaInfo.class).init();
+
+    PreconfigureKerberosAction action = injector.getInstance(PreconfigureKerberosAction.class);
+    ConcurrentMap<String, Object> context = new ConcurrentHashMap<>();
+    action.setExecutionCommand(executionCommand);
+    action.execute(context);
+
+    verifyAll();
+  }
+
+  private Host createMockHost(String hostname) {
+    Host host = createNiceMock(Host.class);
+    expect(host.getHostName()).andReturn(hostname).anyTimes();
+    return host;
+  }
+
+  @Test
+  public void testUpgrade() throws Exception {
+    Capture<? extends Map<String, String>> captureCoreSiteProperties = newCapture();
+
+    Injector injector = getInjector();
+
+    ExecutionCommand executionCommand = createMockExecutionCommand(getDefaultCommandParams());
+
+    UpgradeEntity upgradeProgress = createMock(UpgradeEntity.class);
+
+    StackId targetStackId = createMock(StackId.class);
+    expect(targetStackId.getStackId()).andReturn("HDP-2.6").anyTimes();
+    expect(targetStackId.getStackName()).andReturn("HDP").anyTimes();
+    expect(targetStackId.getStackVersion()).andReturn("2.6").anyTimes();
+
+    final String hostName1 = "c6401.ambari.apache.org";
+    final String hostName2 = "c6402.ambari.apache.org";
+    final String hostName3 = "c6403.ambari.apache.org";
+
+    final Host host1 = createMockHost(hostName1);
+    Host host2 = createMockHost(hostName2);
+    Host host3 = createMockHost(hostName3);
+    Map<String, Host> hosts = new HashMap<>();
+    hosts.put(hostName1, host1);
+    hosts.put(hostName2, host2);
+    hosts.put(hostName3, host3);
+
+    Map<String, ServiceComponentHost> nnSchs = Collections.singletonMap(hostName1, createMockServiceComponentHost("HDFS", "NAMENODE", hostName1, host1));
+    Map<String, ServiceComponentHost> rmSchs = Collections.singletonMap(hostName2, createMockServiceComponentHost("YARN", "RESOURCEMANAGER", hostName2, host2));
+    Map<String, ServiceComponentHost> nmSchs = Collections.singletonMap(hostName2, createMockServiceComponentHost("YARN", "NODEMANAGER", hostName2, host2));
+    Map<String, ServiceComponentHost> dnSchs = new HashMap<>();
+    final Map<String, ServiceComponentHost> hcSchs = new HashMap<>();
+    Map<String, ServiceComponentHost> zkSSchs = new HashMap<>();
+    Map<String, ServiceComponentHost> zkCSchs = new HashMap<>();
+    Map<String, List<ServiceComponentHost>> serviceComponentHosts = new HashMap<>();
+
+    for (Map.Entry<String, Host> entry : hosts.entrySet()) {
+      String hostname = entry.getKey();
+      List<ServiceComponentHost> list = new ArrayList<>();
+      ServiceComponentHost sch;
+
+      sch = createMockServiceComponentHost("HDFS", "DATANODE", hostname, entry.getValue());
+      dnSchs.put(hostname, sch);
+      list.add(sch);
+
+      sch = createMockServiceComponentHost("HDFS", "HDFS_CLIENT", hostname, entry.getValue());
+      hcSchs.put(hostname, sch);
+      list.add(sch);
+
+      sch = createMockServiceComponentHost("ZOOKEEPER", "ZOOKEEPER_SERVER", hostname, entry.getValue());
+      zkSSchs.put(hostname, sch);
+      list.add(sch);
+
+      sch = createMockServiceComponentHost("ZOOKEEPER", "ZOOKEEPER_CLIENT", hostname, entry.getValue());
+      zkCSchs.put(hostname, sch);
+      list.add(sch);
+
+      serviceComponentHosts.put(hostname, list);
+    }
+
+
+    Map<String, ServiceComponent> hdfsComponents = new HashMap<>();
+    hdfsComponents.put("NAMENODE", createMockServiceComponent("NAMENODE", false, nnSchs));
+    hdfsComponents.put("DATANODE", createMockServiceComponent("DATANODE", false, dnSchs));
+    hdfsComponents.put("HDFS_CLIENT", createMockServiceComponent("HDFS_CLIENT", true, hcSchs));
+
+    Map<String, ServiceComponent> yarnComponents = new HashMap<>();
+    yarnComponents.put("RESOURCEMANAGER", createMockServiceComponent("RESOURCEMANAGER", false, rmSchs));
+    yarnComponents.put("NODEMANAGER", createMockServiceComponent("NODEMANAGER", false, nmSchs));
+
+    Map<String, ServiceComponent> zkCompnents = new HashMap<>();
+    yarnComponents.put("ZOOKEEPER_SERVER", createMockServiceComponent("ZOOKEEPER_SERVER", false, zkSSchs));
+    yarnComponents.put("ZOOKEEPER_CLIENT", createMockServiceComponent("ZOOKEEPER_CLIENT", true, zkCSchs));
+
+    Service hdfsService = createMockService("HDFS", hdfsComponents, targetStackId);
+    Service yarnService = createMockService("YARN", yarnComponents, targetStackId);
+    Service zkService = createMockService("ZOOKEEPER", zkCompnents, targetStackId);
+
+    Map<String, Service> installedServices = new HashMap<>();
+    installedServices.put("HDFS", hdfsService);
+    installedServices.put("YARN", yarnService);
+    installedServices.put("ZOOKEEPER", zkService);
+
+    Map<String, Map<String, String>> clusterConfig = getClusterConfig();
+
+    Map<String, Config> clusterConfigs = new HashMap<>();
+    for (Map.Entry<String, Map<String, String>> entry : clusterConfig.entrySet()) {
+      clusterConfigs.put(entry.getKey(), createMockConfig(entry.getValue()));
+    }
+
+    Cluster cluster = createMockCluster(SecurityType.KERBEROS, hosts.values(), installedServices, serviceComponentHosts, targetStackId, clusterConfigs);
+    expect(cluster.getUpgradeInProgress()).andReturn(upgradeProgress).once();
+
+    RepositoryVersionEntity targetRepositoryVersion = createMock(RepositoryVersionEntity.class);
+    expect(targetRepositoryVersion.getStackId()).andReturn(targetStackId).atLeastOnce();
+
+    UpgradeContext upgradeContext = createMock(UpgradeContext.class);
+    expect(upgradeContext.getTargetRepositoryVersion(anyString())).andReturn(targetRepositoryVersion).atLeastOnce();
+
+    UpgradeContextFactory upgradeContextFactory = injector.getInstance(UpgradeContextFactory.class);
+    expect(upgradeContextFactory.create(cluster, upgradeProgress)).andReturn(upgradeContext).once();
+
+    createMockClusters(injector, cluster);
+
+    List<PropertyInfo> knoxProperties = Arrays.asList(
+        crateMockPropertyInfo("knox-env.xml", "knox_user", "knox"),
+        crateMockPropertyInfo("knox-env.xml", "knox_group", "knox"),
+        crateMockPropertyInfo("knox-env.xml", "knox_principal_name", "KERBEROS_PRINCIPAL"),
+        crateMockPropertyInfo("gateway-site.xml", "gateway.port", "8443"),
+        crateMockPropertyInfo("gateway-site.xml", "gateway.path", "gateway")
+    );
+
+    AmbariMetaInfo ambariMetaInfo = injector.getInstance(AmbariMetaInfo.class);
+    expect(ambariMetaInfo.getKerberosDescriptor("HDP", "2.6", false)).
+        andReturn(getKerberosDescriptor(false)).once();
+    expect(ambariMetaInfo.getKerberosDescriptor("HDP", "2.6", true)).
+        andReturn(getKerberosDescriptor(true)).once();
+    expect(ambariMetaInfo.isValidService("HDP", "2.6", "BEACON"))
+        .andReturn(false).anyTimes();
+    expect(ambariMetaInfo.isValidService("HDP", "2.6", "KNOX"))
+        .andReturn(true).anyTimes();
+    expect(ambariMetaInfo.getService("HDP", "2.6", "KNOX"))
+        .andReturn(createMockServiceInfo("KNOX", knoxProperties, Collections.singletonList(createMockComponentInfo("KNOX_GATEWAY")))).anyTimes();
+
+    AmbariManagementController managementController = injector.getInstance(AmbariManagementController.class);
+    expect(managementController.findConfigurationTagsWithOverrides(cluster, null))
+        .andReturn(clusterConfig).once();
+    expect(managementController.getAuthName()).andReturn("admin").anyTimes();
+
+    ConfigHelper configHelper = injector.getInstance(ConfigHelper.class);
+    expect(configHelper.getEffectiveConfigProperties(cluster, clusterConfig)).andReturn(clusterConfig).anyTimes();
+    configHelper.updateConfigType(eq(cluster), eq(targetStackId), eq(managementController), eq("core-site"), capture(captureCoreSiteProperties), anyObject(Collection.class), eq("admin"), anyString());
+    expectLastCall().once();
+
+    TopologyManager topologyManager = injector.getInstance(TopologyManager.class);
+    expect(topologyManager.getPendingHostComponents()).andReturn(Collections.<String, Collection<String>>emptyMap()).anyTimes();
+
+    StackAdvisorHelper stackAdvisorHelper = injector.getInstance(StackAdvisorHelper.class);
+    expect(stackAdvisorHelper.recommend(anyObject(StackAdvisorRequest.class)))
+        .andAnswer(new IAnswer<RecommendationResponse>() {
+          @Override
+          public RecommendationResponse answer() throws Throwable {
+            Object[] args = getCurrentArguments();
+            StackAdvisorRequest request = (StackAdvisorRequest) args[0];
+            StackAdvisorRequest.StackAdvisorRequestType requestType = request.getRequestType();
+
+            if (requestType == StackAdvisorRequest.StackAdvisorRequestType.HOST_GROUPS) {
+              RecommendationResponse.Blueprint blueprint = new RecommendationResponse.Blueprint();
+              blueprint.setHostGroups(new HashSet<>(Arrays.asList(
+                  createRecommendationHostGroup(hostName1,
+                      Arrays.asList("ZOOKEEPER_SERVER", "ZOOKEEPER_CLIENT", "HDFS_CLIENT", "DATANODE", "NAMENODE", "KNOX_GATEWAY")),
+                  createRecommendationHostGroup(hostName2,
+                      Arrays.asList("ZOOKEEPER_SERVER", "ZOOKEEPER_CLIENT", "HDFS_CLIENT", "DATANODE", "RESOURCEMANAGER", "NODEMANAGER")),
+                  createRecommendationHostGroup(hostName3,
+                      Arrays.asList("ZOOKEEPER_SERVER", "ZOOKEEPER_CLIENT", "HDFS_CLIENT", "DATANODE"))
+              )));
+              Set<RecommendationResponse.BindingHostGroup> bindingHostGroups = new HashSet<>(Arrays.asList(
+                  createBindingHostGroup(hostName1),
+                  createBindingHostGroup(hostName2),
+                  createBindingHostGroup(hostName3)
+              ));
+
+              RecommendationResponse.BlueprintClusterBinding binding = new RecommendationResponse.BlueprintClusterBinding();
+              binding.setHostGroups(bindingHostGroups);
+
+              RecommendationResponse.Recommendation recommendation = new RecommendationResponse.Recommendation();
+              recommendation.setBlueprint(blueprint);
+              recommendation.setBlueprintClusterBinding(binding);
+
+              RecommendationResponse response = new RecommendationResponse();
+              response.setRecommendations(recommendation);
+              return response;
+            } else {
+              return null;
+            }
+          }
+        })
+        .anyTimes();
+
+    replayAll();
+
+    ambariMetaInfo.init();
+    StageUtils.setTopologyManager(injector.getInstance(TopologyManager.class));
+    StageUtils.setConfiguration(injector.getInstance(Configuration.class));
+
+    PreconfigureKerberosAction action = injector.getInstance(PreconfigureKerberosAction.class);
+    ConcurrentMap<String, Object> context = new ConcurrentHashMap<>();
+    action.setExecutionCommand(executionCommand);
+    action.execute(context);
+
+    verifyAll();
+
+    Assert.assertTrue(captureCoreSiteProperties.hasCaptured());
+
+    Map<String, String> capturedProperties = captureCoreSiteProperties.getValue();
+    Assert.assertFalse(MapUtils.isEmpty(capturedProperties));
+
+
+    String expectedAuthToLocalRules = "" +
+        "RULE:[1:$1@$0](ambari-qa-c1@EXAMPLE.COM)s/.*/ambari-qa/\n" +
+        "RULE:[1:$1@$0](hdfs-c1@EXAMPLE.COM)s/.*/hdfs/\n" +
+        "RULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\n" +
+        "RULE:[2:$1@$0](beacon@EXAMPLE.COM)s/.*/beacon/\n" +
+        "RULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\n" +
+        "RULE:[2:$1@$0](knox@EXAMPLE.COM)s/.*/knox/\n" +
+        "RULE:[2:$1@$0](nm@EXAMPLE.COM)s/.*/${yarn-env/yarn_user}/\n" +
+        "RULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\n" +
+        "RULE:[2:$1@$0](rm@EXAMPLE.COM)s/.*/${yarn-env/yarn_user}/\n" +
+        "DEFAULT";
+
+    Assert.assertEquals(3, capturedProperties.size());
+    Assert.assertEquals("users", capturedProperties.get("hadoop.proxyuser.knox.groups"));
+    Assert.assertEquals("c6401.ambari.apache.org", capturedProperties.get("hadoop.proxyuser.knox.hosts"));
+    Assert.assertEquals(expectedAuthToLocalRules, capturedProperties.get("hadoop.security.auth_to_local"));
+  }
+
+  private RecommendationResponse.BindingHostGroup createBindingHostGroup(String hostName) {
+    RecommendationResponse.BindingHostGroup bindingHostGroup = new RecommendationResponse.BindingHostGroup();
+    bindingHostGroup.setName(hostName);
+    bindingHostGroup.setHosts(Collections.singleton(Collections.singletonMap("fqdn", hostName)));
+    return bindingHostGroup;
+  }
+
+  private RecommendationResponse.HostGroup createRecommendationHostGroup(String hostName, List<String> components) {
+    Set<Map<String, String>> componentDetails = new HashSet<>();
+    for (String component : components) {
+      componentDetails.add(Collections.singletonMap("name", component));
+    }
+
+    RecommendationResponse.HostGroup hostGroup = new RecommendationResponse.HostGroup();
+    hostGroup.setComponents(componentDetails);
+    hostGroup.setName(hostName);
+    return hostGroup;
+  }
+
+  private ComponentInfo createMockComponentInfo(String componentName) {
+    ComponentInfo componentInfo = createMock(ComponentInfo.class);
+    expect(componentInfo.getName()).andReturn(componentName).anyTimes();
+    return componentInfo;
+  }
+
+  private PropertyInfo crateMockPropertyInfo(String fileName, String propertyName, String propertyValue) {
+    PropertyInfo propertyInfo = createMock(PropertyInfo.class);
+    expect(propertyInfo.getFilename()).andReturn(fileName).anyTimes();
+    expect(propertyInfo.getName()).andReturn(propertyName).anyTimes();
+    expect(propertyInfo.getValue()).andReturn(propertyValue).anyTimes();
+    return propertyInfo;
+  }
+
+  private ServiceInfo createMockServiceInfo(String name, List<PropertyInfo> properties, List<ComponentInfo> components) {
+    ServiceInfo serviceInfo = createMock(ServiceInfo.class);
+    expect(serviceInfo.getName()).andReturn(name).anyTimes();
+    expect(serviceInfo.getProperties()).andReturn(properties).anyTimes();
+    expect(serviceInfo.getComponents()).andReturn(components).anyTimes();
+    return serviceInfo;
+  }
+
+  private Map<String, Map<String, String>> getClusterConfig() throws URISyntaxException, FileNotFoundException {
+    URL url = ClassLoader.getSystemResource("PreconfigureActionTest_cluster_config.json");
+    return new Gson().fromJson(new FileReader(new File(url.toURI())),
+        new TypeToken<Map<String, Map<String, String>>>() {
+        }.getType());
+  }
+
+  private KerberosDescriptor getKerberosDescriptor(boolean includePreconfigureData) throws URISyntaxException, IOException {
+    URL url;
+
+    if (includePreconfigureData) {
+      url = ClassLoader.getSystemResource("PreconfigureActionTest_kerberos_descriptor_stack_preconfigure.json");
+    } else {
+      url = ClassLoader.getSystemResource("PreconfigureActionTest_kerberos_descriptor_stack.json");
+    }
+
+    return new KerberosDescriptorFactory().createInstance(new File(url.toURI()));
+  }
+
+  private ServiceComponent createMockServiceComponent(String name, Boolean isClientComponent, Map<String, ServiceComponentHost> serviceComponentHostMap) throws AmbariException {
+    ServiceComponent serviceComponent = createMock(ServiceComponent.class);
+    expect(serviceComponent.getName()).andReturn(name).anyTimes();
+    expect(serviceComponent.isClientComponent()).andReturn(isClientComponent).anyTimes();
+
+    for (Map.Entry<String, ServiceComponentHost> entry : serviceComponentHostMap.entrySet()) {
+      expect(serviceComponent.getServiceComponentHost(entry.getKey())).andReturn(serviceComponentHostMap.get(entry.getKey())).anyTimes();
+    }
+
+    expect(serviceComponent.getServiceComponentHosts()).andReturn(serviceComponentHostMap).anyTimes();
+
+    return serviceComponent;
+  }
+
+  private ServiceComponentHost createMockServiceComponentHost(String serviceName, String componentName, String hostname, Host host) {
+    ServiceComponentHost serviceComponentHost = createMock(ServiceComponentHost.class);
+    expect(serviceComponentHost.getServiceName()).andReturn(serviceName).anyTimes();
+    expect(serviceComponentHost.getServiceComponentName()).andReturn(componentName).anyTimes();
+    expect(serviceComponentHost.getHostName()).andReturn(hostname).anyTimes();
+    expect(serviceComponentHost.getHost()).andReturn(host).anyTimes();
+    expect(serviceComponentHost.getComponentAdminState()).andReturn(HostComponentAdminState.INSERVICE).anyTimes();
+
+    return serviceComponentHost;
+  }
+
+  private Service createMockService(String name, Map<String, ServiceComponent> components, StackId desiredStackId) {
+    Service service = createMock(Service.class);
+    expect(service.getName()).andReturn(name).anyTimes();
+    expect(service.getServiceComponents()).andReturn(components).anyTimes();
+    expect(service.getDesiredStackId()).andReturn(desiredStackId).anyTimes();
+    return service;
+  }
+
+  private Clusters createMockClusters(Injector injector, Cluster cluster) throws AmbariException {
+    Clusters clusters = injector.getInstance(Clusters.class);
+    expect(clusters.getCluster(CLUSTER_NAME)).andReturn(cluster).atLeastOnce();
+    return clusters;
+  }
+
+  private Cluster createMockCluster(SecurityType securityType, Collection<Host> hosts,
+                                    Map<String, Service> services,
+                                    Map<String, List<ServiceComponentHost>> serviceComponentHosts,
+                                    StackId currentStackId, final Map<String, Config> clusterConfigs) {
+    final Cluster cluster = createMock(Cluster.class);
+    expect(cluster.getSecurityType()).andReturn(securityType).anyTimes();
+    expect(cluster.getClusterName()).andReturn(CLUSTER_NAME).anyTimes();
+    expect(cluster.getClusterId()).andReturn(1L).anyTimes();
+    expect(cluster.getHosts()).andReturn(hosts).anyTimes();
+    expect(cluster.getServices()).andReturn(services).anyTimes();
+    expect(cluster.getCurrentStackVersion()).andReturn(currentStackId).anyTimes();
+
+    for (Map.Entry<String, List<ServiceComponentHost>> entry : serviceComponentHosts.entrySet()) {
+      expect(cluster.getServiceComponentHosts(entry.getKey())).andReturn(entry.getValue()).atLeastOnce();
+    }
+
+    expect(cluster.getServiceComponentHostMap(null, new HashSet<>(Arrays.asList("HDFS", "ZOOKEEPER", "YARN", "KNOX"))))
+        .andReturn(null)
+        .anyTimes();
+    expect(cluster.getServiceComponentHostMap(null, new HashSet<>(Arrays.asList("HDFS", "ZOOKEEPER", "YARN"))))
+        .andReturn(null)
+        .anyTimes();
+
+    Map<String, String> configTypeService = new HashMap<>();
+    configTypeService.put("hdfs-site", "HDFS");
+    configTypeService.put("core-site", "HDFS");
+    configTypeService.put("hadoop-env", "HDFS");
+    configTypeService.put("cluster-env", null);
+    configTypeService.put("kerberos-env", "KERBEROS");
+    configTypeService.put("ranger-hdfs-audit", "RANGER");
+    configTypeService.put("zookeeper-env", "ZOOKEEPER");
+    configTypeService.put("gateway-site", "KNOX");
+
+    for (Map.Entry<String, String> entry : configTypeService.entrySet()) {
+      expect(cluster.getServiceByConfigType(entry.getKey())).andReturn(entry.getValue()).anyTimes();
+    }
+
+    for (Map.Entry<String, Config> entry : clusterConfigs.entrySet()) {
+      expect(cluster.getDesiredConfigByType(entry.getKey())).andReturn(entry.getValue()).anyTimes();
+      expect(cluster.getConfigsByType(entry.getKey())).andReturn(Collections.singletonMap(entry.getKey(), entry.getValue())).anyTimes();
+      expect(cluster.getConfigPropertiesTypes(entry.getKey())).andReturn(Collections.<PropertyInfo.PropertyType, Set<String>>emptyMap()).anyTimes();
+    }
+
+    return cluster;
+  }
+
+  private Config createMockConfig(Map<String, String> properties) {
+    Config config = createMock(Config.class);
+    expect(config.getProperties()).andReturn(properties).anyTimes();
+    expect(config.getPropertiesAttributes()).andReturn(Collections.<String, Map<String, String>>emptyMap()).anyTimes();
+    return config;
+  }
+
+  private Map<String, String> getDefaultCommandParams() {
+    Map<String, String> commandParams = new HashMap<>();
+    commandParams.put("clusterName", CLUSTER_NAME);
+    commandParams.put(UPGRADE_DIRECTION_KEY, Direction.UPGRADE.name());
+    return commandParams;
+  }
+
+  private ExecutionCommand createMockExecutionCommand(Map<String, String> commandParams) {
+    ExecutionCommand executionCommand = createMock(ExecutionCommand.class);
+    expect(executionCommand.getCommandParams()).andReturn(commandParams).atLeastOnce();
+    return executionCommand;
+  }
+
+  private Injector getInjector() {
+    return Guice.createInjector(new AbstractModule() {
+      @Override
+      protected void configure() {
+        bind(EntityManager.class).toInstance(createMock(EntityManager.class));
+        bind(DBAccessor.class).toInstance(createMock(DBAccessor.class));
+        bind(UpgradeContextFactory.class).toInstance(createMock(UpgradeContextFactory.class));
+        bind(OsFamily.class).toInstance(createMock(OsFamily.class));
+        bind(StackManagerFactory.class).toInstance(createMock(StackManagerFactory.class));
+        bind(StageFactory.class).toInstance(createMock(StageFactory.class));
+        bind(AmbariMetaInfo.class).toInstance(createMock(AmbariMetaInfo.class));
+        bind(AmbariCustomCommandExecutionHelper.class).toInstance(createMock(AmbariCustomCommandExecutionHelper.class));
+        bind(ActionManager.class).toInstance(createMock(ActionManager.class));
+        bind(HostRoleCommandDAO.class).toInstance(createNiceMock(HostRoleCommandDAO.class));
+        bind(AuditLogger.class).toInstance(createNiceMock(AuditLogger.class));
+        bind(ArtifactDAO.class).toInstance(createNiceMock(ArtifactDAO.class));
+        bind(KerberosPrincipalDAO.class).toInstance(createNiceMock(KerberosPrincipalDAO.class));
+        bind(RoleCommandOrderProvider.class).to(CachedRoleCommandOrderProvider.class);
+        bind(HostRoleCommandFactory.class).to(HostRoleCommandFactoryImpl.class);
+        bind(RoleCommandOrderProvider.class).to(CachedRoleCommandOrderProvider.class);
+        bind(HostRoleCommandFactory.class).to(HostRoleCommandFactoryImpl.class);
+        bind(RoleGraphFactory.class).toInstance(createMock(RoleGraphFactory.class));
+        bind(RequestFactory.class).toInstance(createMock(RequestFactory.class));
+        bind(RequestExecutionFactory.class).toInstance(createMock(RequestExecutionFactory.class));
+        bind(CredentialStoreService.class).toInstance(createMock(CredentialStoreService.class));
+        bind(TopologyManager.class).toInstance(createNiceMock(TopologyManager.class));
+        bind(ConfigFactory.class).toInstance(createMock(ConfigFactory.class));
+        bind(PersistedState.class).toInstance(createMock(PersistedState.class));
+        bind(ConfigureClusterTaskFactory.class).toInstance(createNiceMock(ConfigureClusterTaskFactory.class));
+        bind(Configuration.class).toInstance(new Configuration(new Properties()));
+
+        bind(AmbariManagementController.class).toInstance(createMock(AmbariManagementController.class));
+        bind(KerberosHelper.class).to(KerberosHelperImpl.class);
+        bind(Clusters.class).toInstance(createMock(Clusters.class));
+        bind(StackAdvisorHelper.class).toInstance(createMock(StackAdvisorHelper.class));
+        bind(ConfigHelper.class).toInstance(createMock(ConfigHelper.class));
+      }
+    });
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/5230d935/ambari-server/src/test/resources/PreconfigureActionTest_cluster_config.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/PreconfigureActionTest_cluster_config.json b/ambari-server/src/test/resources/PreconfigureActionTest_cluster_config.json
new file mode 100644
index 0000000..2a744c7
--- /dev/null
+++ b/ambari-server/src/test/resources/PreconfigureActionTest_cluster_config.json
@@ -0,0 +1,110 @@
+{
+  "core-site": {
+    "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020",
+    "fs.trash.interval": "360",
+    "ha.failover-controller.active-standby-elector.zk.op.retries": "120",
+    "hadoop.custom-extensions.root": "/hdp/ext/{{major_stack_version}}/hadoop",
+    "hadoop.http.authentication.simple.anonymous.allowed": "true",
+    "hadoop.proxyuser.hdfs.groups": "*",
+    "hadoop.proxyuser.hdfs.hosts": "*",
+    "hadoop.proxyuser.root.groups": "*",
+    "hadoop.proxyuser.root.hosts": "c6401.ambari.apache.org",
+    "hadoop.security.auth_to_local": "DEFAULT",
+    "hadoop.security.authentication": "simple",
+    "hadoop.security.authorization": "false",
+    "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec",
+    "io.file.buffer.size": "131072",
+    "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
+    "ipc.client.connect.max.retries": "50",
+    "ipc.client.connection.maxidletime": "30000",
+    "ipc.client.idlethreshold": "8000",
+    "ipc.server.tcpnodelay": "true",
+    "mapreduce.jobtracker.webinterface.trusted": "false",
+    "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py"
+  },
+  "hadoop-env": {
+    "dtnode_heapsize": "1024m",
+    "hadoop_heapsize": "1024",
+    "hadoop_pid_dir_prefix": "/var/run/hadoop",
+    "hadoop_root_logger": "INFO,RFA",
+    "hdfs_log_dir_prefix": "/var/log/hadoop",
+    "hdfs_principal_name": "hdfs-c1@EXAMPLE.COM",
+    "hdfs_tmp_dir": "/tmp",
+    "hdfs_user": "hdfs",
+    "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab",
+    "hdfs_user_nofile_limit": "128000",
+    "hdfs_user_nproc_limit": "65536",
+    "keyserver_host": " ",
+    "keyserver_port": "",
+    "namenode_backup_dir": "/tmp/upgrades",
+    "namenode_heapsize": "1024m",
+    "namenode_opt_maxnewsize": "128m",
+    "namenode_opt_maxpermsize": "256m",
+    "namenode_opt_newsize": "128m",
+    "namenode_opt_permsize": "128m",
+    "nfsgateway_heapsize": "1024",
+    "proxyuser_group": "users"
+  },
+  "cluster-env" : {
+    "agent_mounts_ignore_list": "",
+    "alerts_repeat_tolerance": "1",
+    "ambari_principal_name": "ambari-server-c1@EXAMPLE.COM",
+    "enable_external_ranger": "false",
+    "fetch_nonlocal_groups": "true",
+    "hide_yarn_memory_widget": "false",
+    "ignore_bad_mounts": "false",
+    "ignore_groupsusers_create": "false",
+    "kerberos_domain": "EXAMPLE.COM",
+    "manage_dirs_on_root": "true",
+    "managed_hdfs_resource_property_names": "",
+    "one_dir_per_partition": "false",
+    "override_uid": "true",
+    "recovery_enabled": "false",
+    "recovery_lifetime_max_count": "1024",
+    "recovery_max_count": "6",
+    "recovery_retry_interval": "5",
+    "recovery_type": "AUTO_START",
+    "recovery_window_in_minutes": "60",
+    "repo_ubuntu_template": "{{package_type}} {{base_url}} {{components}}",
+    "security_enabled": "true",
+    "smokeuser": "ambari-qa",
+    "smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab",
+    "smokeuser_principal_name": "ambari-qa-c1@EXAMPLE.COM",
+    "stack_name": "HDP",
+    "sysprep_skip_copy_fast_jar_hdfs": "false",
+    "sysprep_skip_copy_oozie_share_lib_to_hdfs": "false",
+    "sysprep_skip_copy_tarballs_hdfs": "false",
+    "sysprep_skip_create_users_and_groups": "false",
+    "sysprep_skip_setup_jce": "false",
+    "user_group": "hadoop"
+  },
+  "kerberos-env" : {
+    "ad_create_attributes_template": "\n{\n  \"objectClass\": [\"top\", \"person\", \"organizationalPerson\", \"user\"],\n  \"cn\": \"$principal_name\",\n  #if( $is_service )\n  \"servicePrincipalName\": \"$principal_name\",\n  #end\n  \"userPrincipalName\": \"$normalized_principal\",\n  \"unicodePwd\": \"$password\",\n  \"accountExpires\": \"0\",\n  \"userAccountControl\": \"66048\"\n}",
+    "admin_server_host": "c6401",
+    "case_insensitive_username_rules": "false",
+    "container_dn": "",
+    "create_ambari_principal": "true",
+    "encryption_types": "aes des3-cbc-sha1 rc4 des-cbc-md5",
+    "executable_search_paths": "/usr/bin, /usr/kerberos/bin, /usr/sbin, /usr/lib/mit/bin, /usr/lib/mit/sbin",
+    "group": "ambari-managed-principals",
+    "install_packages": "true",
+    "kdc_create_attributes": "",
+    "kdc_hosts": "c6401",
+    "kdc_type": "mit-kdc",
+    "ldap_url": "",
+    "manage_auth_to_local": "true",
+    "manage_identities": "true",
+    "master_kdc": "",
+    "password_chat_timeout": "5",
+    "password_length": "20",
+    "password_min_digits": "1",
+    "password_min_lowercase_letters": "1",
+    "password_min_punctuation": "1",
+    "password_min_uppercase_letters": "1",
+    "password_min_whitespace": "0",
+    "preconfigure_services": "DEFAULT",
+    "realm": "EXAMPLE.COM",
+    "service_check_principal_name": "${cluster_name|toLower()}-${short_date}",
+    "set_password_expiry": "false"
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/5230d935/ambari-server/src/test/resources/PreconfigureActionTest_kerberos_descriptor_stack.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/PreconfigureActionTest_kerberos_descriptor_stack.json b/ambari-server/src/test/resources/PreconfigureActionTest_kerberos_descriptor_stack.json
new file mode 100644
index 0000000..ddd00bd
--- /dev/null
+++ b/ambari-server/src/test/resources/PreconfigureActionTest_kerberos_descriptor_stack.json
@@ -0,0 +1,713 @@
+{
+  "identities": [
+    {
+      "keytab": {
+        "configuration": "cluster-env/smokeuser_keytab",
+        "file": "${keytab_dir}/smokeuser.headless.keytab",
+        "group": {
+          "access": "r",
+          "name": "${cluster-env/user_group}"
+        },
+        "owner": {
+          "access": "r",
+          "name": "${cluster-env/smokeuser}"
+        }
+      },
+      "name": "smokeuser",
+      "principal": {
+        "configuration": "cluster-env/smokeuser_principal_name",
+        "local_username": "${cluster-env/smokeuser}",
+        "type": "user",
+        "value": "${cluster-env/smokeuser}${principal_suffix}@${realm}"
+      }
+    },
+    {
+      "keytab": {
+        "file": "${keytab_dir}/spnego.service.keytab",
+        "group": {
+          "access": "r",
+          "name": "${cluster-env/user_group}"
+        },
+        "owner": {
+          "access": "r",
+          "name": "root"
+        }
+      },
+      "name": "spnego",
+      "principal": {
+        "configuration": null,
+        "local_username": null,
+        "type": "service",
+        "value": "HTTP/_HOST@${realm}"
+      }
+    }
+  ],
+  "services": [
+    {
+      "components": [
+        {
+          "identities": [
+            {
+              "keytab": {
+                "file": "${keytab_dir}/ambari.server.keytab",
+                "group": {},
+                "owner": {
+                  "access": "r"
+                }
+              },
+              "name": "ambari-server",
+              "principal": {
+                "configuration": "cluster-env/ambari_principal_name",
+                "local_username": null,
+                "type": "user",
+                "value": "ambari-server${principal_suffix}@${realm}"
+              }
+            },
+            {
+              "name": "ambari-server_spnego",
+              "reference": "/spnego"
+            }
+          ],
+          "name": "AMBARI_SERVER"
+        }
+      ],
+      "name": "AMBARI"
+    },
+    {
+      "auth_to_local_properties": [
+        "core-site/hadoop.security.auth_to_local"
+      ],
+      "components": [
+        {
+          "configurations": [
+            {
+              "hdfs-site": {
+                "dfs.datanode.address": "0.0.0.0:1019",
+                "dfs.datanode.http.address": "0.0.0.0:1022"
+              }
+            }
+          ],
+          "identities": [
+            {
+              "keytab": {
+                "configuration": "hdfs-site/dfs.datanode.keytab.file",
+                "file": "${keytab_dir}/dn.service.keytab",
+                "group": {
+                  "access": "",
+                  "name": "${cluster-env/user_group}"
+                },
+                "owner": {
+                  "access": "r",
+                  "name": "${hadoop-env/hdfs_user}"
+                }
+              },
+              "name": "datanode_dn",
+              "principal": {
+                "configuration": "hdfs-site/dfs.datanode.kerberos.principal",
+                "local_username": "${hadoop-env/hdfs_user}",
+                "type": "service",
+                "value": "dn/_HOST@${realm}"
+              }
+            }
+          ],
+          "name": "DATANODE"
+        },
+        {
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            }
+          ],
+          "name": "HDFS_CLIENT"
+        },
+        {
+          "identities": [
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "hdfs-site/dfs.journalnode.kerberos.internal.spnego.principal",
+                "local_username": null,
+                "type": null,
+                "value": "HTTP/_HOST@${realm}"
+              }
+            },
+            {
+              "keytab": {
+                "configuration": "hdfs-site/dfs.journalnode.keytab.file",
+                "file": "${keytab_dir}/jn.service.keytab",
+                "group": {
+                  "access": "",
+                  "name": "${cluster-env/user_group}"
+                },
+                "owner": {
+                  "access": "r",
+                  "name": "${hadoop-env/hdfs_user}"
+                }
+              },
+              "name": "journalnode_jn",
+              "principal": {
+                "configuration": "hdfs-site/dfs.journalnode.kerberos.principal",
+                "local_username": "${hadoop-env/hdfs_user}",
+                "type": "service",
+                "value": "jn/_HOST@${realm}"
+              }
+            }
+          ],
+          "name": "JOURNALNODE"
+        },
+        {
+          "configurations": [
+            {
+              "hdfs-site": {
+                "dfs.block.access.token.enable": "true"
+              }
+            }
+          ],
+          "identities": [
+            {
+              "keytab": {
+                "configuration": "ranger-hdfs-audit/xasecure.audit.jaas.Client.option.keyTab",
+                "file": "${keytab_dir}/nn.service.keytab"
+              },
+              "name": "/HDFS/NAMENODE/namenode_nn",
+              "principal": {
+                "configuration": "ranger-hdfs-audit/xasecure.audit.jaas.Client.option.principal",
+                "local_username": null,
+                "type": null,
+                "value": "nn/_HOST@${realm}"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "hdfs-site/dfs.namenode.kerberos.internal.spnego.principal",
+                "local_username": null,
+                "type": null,
+                "value": "HTTP/_HOST@${realm}"
+              }
+            },
+            {
+              "keytab": {
+                "configuration": "hadoop-env/hdfs_user_keytab",
+                "file": "${keytab_dir}/hdfs.headless.keytab",
+                "group": {
+                  "access": "",
+                  "name": "${cluster-env/user_group}"
+                },
+                "owner": {
+                  "access": "r",
+                  "name": "${hadoop-env/hdfs_user}"
+                }
+              },
+              "name": "hdfs",
+              "principal": {
+                "configuration": "hadoop-env/hdfs_principal_name",
+                "local_username": "${hadoop-env/hdfs_user}",
+                "type": "user",
+                "value": "${hadoop-env/hdfs_user}${principal_suffix}@${realm}"
+              }
+            },
+            {
+              "keytab": {
+                "configuration": "hdfs-site/dfs.namenode.keytab.file",
+                "file": "${keytab_dir}/nn.service.keytab",
+                "group": {
+                  "access": "",
+                  "name": "${cluster-env/user_group}"
+                },
+                "owner": {
+                  "access": "r",
+                  "name": "${hadoop-env/hdfs_user}"
+                }
+              },
+              "name": "namenode_nn",
+              "principal": {
+                "configuration": "hdfs-site/dfs.namenode.kerberos.principal",
+                "local_username": "${hadoop-env/hdfs_user}",
+                "type": "service",
+                "value": "nn/_HOST@${realm}"
+              }
+            }
+          ],
+          "name": "NAMENODE"
+        },
+        {
+          "identities": [
+            {
+              "keytab": {
+                "configuration": "hdfs-site/nfs.keytab.file",
+                "file": "${keytab_dir}/nfs.service.keytab",
+                "group": {
+                  "access": "",
+                  "name": "${cluster-env/user_group}"
+                },
+                "owner": {
+                  "access": "r",
+                  "name": "${hadoop-env/hdfs_user}"
+                }
+              },
+              "name": "nfsgateway",
+              "principal": {
+                "configuration": "hdfs-site/nfs.kerberos.principal",
+                "local_username": "${hadoop-env/hdfs_user}",
+                "type": "service",
+                "value": "nfs/_HOST@${realm}"
+              }
+            }
+          ],
+          "name": "NFS_GATEWAY"
+        },
+        {
+          "identities": [
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "hdfs-site/dfs.secondary.namenode.kerberos.internal.spnego.principal",
+                "local_username": null,
+                "type": null,
+                "value": "HTTP/_HOST@${realm}"
+              }
+            },
+            {
+              "keytab": {
+                "configuration": "hdfs-site/dfs.secondary.namenode.keytab.file",
+                "file": "${keytab_dir}/nn.service.keytab",
+                "group": {
+                  "access": "",
+                  "name": "${cluster-env/user_group}"
+                },
+                "owner": {
+                  "access": "r",
+                  "name": "${hadoop-env/hdfs_user}"
+                }
+              },
+              "name": "secondary_namenode_nn",
+              "principal": {
+                "configuration": "hdfs-site/dfs.secondary.namenode.kerberos.principal",
+                "local_username": "${hadoop-env/hdfs_user}",
+                "type": "service",
+                "value": "nn/_HOST@${realm}"
+              }
+            }
+          ],
+          "name": "SECONDARY_NAMENODE"
+        }
+      ],
+      "configurations": [
+        {
+          "core-site": {
+            "ha.zookeeper.acl": "sasl:nn:rwcda",
+            "hadoop.proxyuser.HTTP.groups": "${hadoop-env/proxyuser_group}",
+            "hadoop.security.authentication": "kerberos",
+            "hadoop.security.authorization": "true"
+          }
+        },
+        {
+          "ranger-hdfs-audit": {
+            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true",
+            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "xasecure.audit.jaas.Client.option.serviceName": "solr",
+            "xasecure.audit.jaas.Client.option.storeKey": "false",
+            "xasecure.audit.jaas.Client.option.useKeyTab": "true"
+          }
+        }
+      ],
+      "identities": [
+        {
+          "name": "/smokeuser"
+        },
+        {
+          "keytab": {
+            "configuration": "hdfs-site/dfs.web.authentication.kerberos.keytab",
+            "file": "${keytab_dir}/spnego.service.keytab"
+          },
+          "name": "/spnego",
+          "principal": {
+            "configuration": "hdfs-site/dfs.web.authentication.kerberos.principal",
+            "local_username": null,
+            "type": null,
+            "value": "HTTP/_HOST@${realm}"
+          }
+        }
+      ],
+      "name": "HDFS"
+    },
+    {
+      "components": [
+        {
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "keytab": {
+                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab",
+                "file": "${keytab_dir}/spnego.service.keytab"
+              },
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.principal",
+                "local_username": null,
+                "type": null,
+                "value": "HTTP/_HOST@${realm}"
+              }
+            },
+            {
+              "keytab": {
+                "configuration": "yarn-site/yarn.timeline-service.keytab",
+                "file": "${keytab_dir}/yarn.service.keytab",
+                "group": {
+                  "access": "",
+                  "name": "${cluster-env/user_group}"
+                },
+                "owner": {
+                  "access": "r",
+                  "name": "${yarn-env/yarn_user}"
+                }
+              },
+              "name": "app_timeline_server_yarn",
+              "principal": {
+                "configuration": "yarn-site/yarn.timeline-service.principal",
+                "local_username": "${yarn-env/yarn_user}",
+                "type": "service",
+                "value": "yarn/_HOST@${realm}"
+              }
+            }
+          ],
+          "name": "APP_TIMELINE_SERVER"
+        },
+        {
+          "configurations": [
+            {
+              "yarn-site": {
+                "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor"
+              }
+            }
+          ],
+          "identities": [
+            {
+              "keytab": {
+                "configuration": "hive-interactive-site/hive.llap.daemon.keytab.file",
+                "file": null
+              },
+              "name": "/HIVE/HIVE_SERVER/hive_server_hive",
+              "principal": {
+                "configuration": "hive-interactive-site/hive.llap.daemon.service.principal",
+                "local_username": null,
+                "type": null,
+                "value": null
+              },
+              "when": {
+                "contains": [
+                  "services",
+                  "HIVE"
+                ]
+              }
+            },
+            {
+              "keytab": {
+                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-keytab-file",
+                "file": "${keytab_dir}/spnego.service.keytab"
+              },
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-principal",
+                "local_username": null,
+                "type": null,
+                "value": "HTTP/_HOST@${realm}"
+              }
+            },
+            {
+              "keytab": {
+                "configuration": "hive-interactive-site/hive.llap.zk.sm.keytab.file",
+                "file": "${keytab_dir}/hive.llap.zk.sm.keytab",
+                "group": {
+                  "access": "r",
+                  "name": "${cluster-env/user_group}"
+                },
+                "owner": {
+                  "access": "r",
+                  "name": "${yarn-env/yarn_user}"
+                }
+              },
+              "name": "llap_zk_hive",
+              "principal": {
+                "configuration": "hive-interactive-site/hive.llap.zk.sm.principal",
+                "local_username": null,
+                "type": "service",
+                "value": "hive/_HOST@${realm}"
+              },
+              "when": {
+                "contains": [
+                  "services",
+                  "HIVE"
+                ]
+              }
+            },
+            {
+              "keytab": {
+                "configuration": "yarn-site/yarn.nodemanager.keytab",
+                "file": "${keytab_dir}/nm.service.keytab",
+                "group": {
+                  "access": "",
+                  "name": "${cluster-env/user_group}"
+                },
+                "owner": {
+                  "access": "r",
+                  "name": "${yarn-env/yarn_user}"
+                }
+              },
+              "name": "nodemanager_nm",
+              "principal": {
+                "configuration": "yarn-site/yarn.nodemanager.principal",
+                "local_username": "${yarn-env/yarn_user}",
+                "type": "service",
+                "value": "nm/_HOST@${realm}"
+              }
+            }
+          ],
+          "name": "NODEMANAGER"
+        },
+        {
+          "identities": [
+            {
+              "keytab": {
+                "configuration": "ranger-yarn-audit/xasecure.audit.jaas.Client.option.keyTab",
+                "file": "${keytab_dir}/rm.service.keytab"
+              },
+              "name": "/YARN/RESOURCEMANAGER/resource_manager_rm",
+              "principal": {
+                "configuration": "ranger-yarn-audit/xasecure.audit.jaas.Client.option.principal",
+                "local_username": null,
+                "type": null,
+                "value": "rm/_HOST@${realm}"
+              }
+            },
+            {
+              "keytab": {
+                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file",
+                "file": "${keytab_dir}/spnego.service.keytab"
+              },
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-principal",
+                "local_username": null,
+                "type": null,
+                "value": "HTTP/_HOST@${realm}"
+              }
+            },
+            {
+              "keytab": {
+                "configuration": "yarn-site/yarn.resourcemanager.keytab",
+                "file": "${keytab_dir}/rm.service.keytab",
+                "group": {
+                  "access": "",
+                  "name": "${cluster-env/user_group}"
+                },
+                "owner": {
+                  "access": "r",
+                  "name": "${yarn-env/yarn_user}"
+                }
+              },
+              "name": "resource_manager_rm",
+              "principal": {
+                "configuration": "yarn-site/yarn.resourcemanager.principal",
+                "local_username": "${yarn-env/yarn_user}",
+                "type": "service",
+                "value": "rm/_HOST@${realm}"
+              }
+            }
+          ],
+          "name": "RESOURCEMANAGER"
+        }
+      ],
+      "configurations": [
+        {
+          "capacity-scheduler": {
+            "yarn.scheduler.capacity.root.acl_administer_jobs": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.acl_administer_queue": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.default.acl_administer_jobs": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.default.acl_administer_queue": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.default.acl_submit_applications": "${yarn-env/yarn_user}"
+          }
+        },
+        {
+          "core-site": {
+            "hadoop.proxyuser.${yarn-env/yarn_user}.groups": "*",
+            "hadoop.proxyuser.${yarn-env/yarn_user}.hosts": "${clusterHostInfo/rm_host}"
+          }
+        },
+        {
+          "ranger-yarn-audit": {
+            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true",
+            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "xasecure.audit.jaas.Client.option.serviceName": "solr",
+            "xasecure.audit.jaas.Client.option.storeKey": "false",
+            "xasecure.audit.jaas.Client.option.useKeyTab": "true"
+          }
+        },
+        {
+          "yarn-site": {
+            "hadoop.registry.client.auth": "kerberos",
+            "hadoop.registry.jaas.context": "Client",
+            "hadoop.registry.secure": "true",
+            "hadoop.registry.system.accounts": "sasl:${principals/YARN/APP_TIMELINE_SERVER/app_timeline_server_yarn|principalPrimary()},sasl:${principals/MAPREDUCE2/HISTORYSERVER/history_server_jhs|principalPrimary()},sasl:${principals/HDFS/NAMENODE/hdfs|principalPrimary()},sasl:${principals/YARN/RESOURCEMANAGER/resource_manager_rm|principalPrimary()},sasl:${principals/HIVE/HIVE_SERVER/hive_server_hive|principalPrimary()}",
+            "yarn.acl.enable": "true",
+            "yarn.admin.acl": "${activity-conf/global.activity.analyzer.user},dr.who,${yarn-env/yarn_user}",
+            "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
+            "yarn.resourcemanager.proxyuser.*.groups": "",
+            "yarn.resourcemanager.proxyuser.*.hosts": "",
+            "yarn.resourcemanager.proxyuser.*.users": "",
+            "yarn.resourcemanager.zk-acl": "sasl:${principals/YARN/RESOURCEMANAGER/resource_manager_rm|principalPrimary()}:rwcda",
+            "yarn.timeline-service.enabled": "true",
+            "yarn.timeline-service.http-authentication.cookie.domain": "",
+            "yarn.timeline-service.http-authentication.cookie.path": "",
+            "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.groups": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.users": "",
+            "yarn.timeline-service.http-authentication.signature.secret": "",
+            "yarn.timeline-service.http-authentication.signature.secret.file": "",
+            "yarn.timeline-service.http-authentication.signer.secret.provider": "",
+            "yarn.timeline-service.http-authentication.signer.secret.provider.object": "",
+            "yarn.timeline-service.http-authentication.token.validity": "",
+            "yarn.timeline-service.http-authentication.type": "kerberos"
+          }
+        }
+      ],
+      "identities": [
+        {
+          "name": "/smokeuser"
+        },
+        {
+          "name": "/spnego"
+        }
+      ],
+      "name": "YARN"
+    },
+    {
+      "components": [
+        {
+          "configurations": [
+            {
+              "core-site": {
+                "hadoop.proxyuser.${knox-env/knox_user}.groups": "${hadoop-env/proxyuser_group}",
+                "hadoop.proxyuser.${knox-env/knox_user}.hosts": "${clusterHostInfo/knox_gateway_hosts}"
+              }
+            },
+            {
+              "gateway-site": {
+                "gateway.hadoop.kerberos.secured": "true",
+                "java.security.krb5.conf": "/etc/krb5.conf"
+              }
+            },
+            {
+              "oozie-site": {
+                "oozie.service.ProxyUserService.proxyuser.${knox-env/knox_user}.groups": "${hadoop-env/proxyuser_group}",
+                "oozie.service.ProxyUserService.proxyuser.${knox-env/knox_user}.hosts": "${clusterHostInfo/knox_gateway_hosts}"
+              }
+            },
+            {
+              "ranger-knox-audit": {
+                "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true",
+                "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+                "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+                "xasecure.audit.jaas.Client.option.serviceName": "solr",
+                "xasecure.audit.jaas.Client.option.storeKey": "false",
+                "xasecure.audit.jaas.Client.option.useKeyTab": "true"
+              }
+            },
+            {
+              "webhcat-site": {
+                "webhcat.proxyuser.${knox-env/knox_user}.groups": "${hadoop-env/proxyuser_group}",
+                "webhcat.proxyuser.${knox-env/knox_user}.hosts": "${clusterHostInfo/knox_gateway_hosts}"
+              }
+            }
+          ],
+          "identities": [
+            {
+              "keytab": {
+                "configuration": "ranger-knox-audit/xasecure.audit.jaas.Client.option.keyTab",
+                "file": null
+              },
+              "name": "/KNOX/KNOX_GATEWAY/knox_principal",
+              "principal": {
+                "configuration": "ranger-knox-audit/xasecure.audit.jaas.Client.option.principal",
+                "local_username": null,
+                "type": null,
+                "value": null
+              }
+            },
+            {
+              "keytab": {
+                "configuration": "knox-env/knox_keytab_path",
+                "file": "${keytab_dir}/knox.service.keytab",
+                "group": {
+                  "access": "",
+                  "name": "${cluster-env/user_group}"
+                },
+                "owner": {
+                  "access": "r",
+                  "name": "${knox-env/knox_user}"
+                }
+              },
+              "name": "knox_principal",
+              "principal": {
+                "configuration": "knox-env/knox_principal_name",
+                "local_username": "${knox-env/knox_user}",
+                "type": "service",
+                "value": "${knox-env/knox_user}/_HOST@${realm}"
+              }
+            }
+          ],
+          "name": "KNOX_GATEWAY"
+        }
+      ],
+      "name": "KNOX"
+    },
+    {
+      "components": [
+        {
+          "identities": [
+            {
+              "keytab": {
+                "configuration": "zookeeper-env/zookeeper_keytab_path",
+                "file": "${keytab_dir}/zk.service.keytab",
+                "group": {
+                  "access": "",
+                  "name": "${cluster-env/user_group}"
+                },
+                "owner": {
+                  "access": "r",
+                  "name": "${zookeeper-env/zk_user}"
+                }
+              },
+              "name": "zookeeper_zk",
+              "principal": {
+                "configuration": "zookeeper-env/zookeeper_principal_name",
+                "local_username": null,
+                "type": "service",
+                "value": "zookeeper/_HOST@${realm}"
+              }
+            }
+          ],
+          "name": "ZOOKEEPER_SERVER"
+        }
+      ],
+      "identities": [
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "name": "ZOOKEEPER"
+    }
+  ],
+  "properties": {
+    "additional_realms": "",
+    "keytab_dir": "/etc/security/keytabs",
+    "principal_suffix": "-${cluster_name|toLower()}",
+    "realm": "EXAMPLE.COM"
+  }
+}
\ No newline at end of file