You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by lp...@apache.org on 2017/07/14 12:53:59 UTC

[01/36] ambari git commit: AMBARI-21370: Support VIPs instead of Host Names - addendum (jluniya) [Forced Update!]

Repository: ambari
Updated Branches:
  refs/heads/feature-branch-AMBARI-21307 b4b586a10 -> 63186bf3e (forced update)


AMBARI-21370: Support VIPs instead of Host Names - addendum (jluniya)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/42560676
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/42560676
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/42560676

Branch: refs/heads/feature-branch-AMBARI-21307
Commit: 425606769e15589a9a51ab2fc89e56bbf158d638
Parents: 88cba7f
Author: Jayush Luniya <jl...@hortonworks.com>
Authored: Thu Jul 6 12:33:47 2017 -0700
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Thu Jul 6 12:33:47 2017 -0700

----------------------------------------------------------------------
 .../server/controller/internal/ClientConfigResourceProvider.java   | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/42560676/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
index bd445eb..15c2d81 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
@@ -225,6 +225,7 @@ public class ClientConfigResourceProvider extends AbstractControllerResourceProv
         String serviceName = response.getServiceName();
         String componentName = response.getComponentName();
         String hostName = response.getHostname();
+        String publicHostName = response.getPublicHostname();
         ComponentInfo componentInfo = null;
         String packageFolder = null;
 
@@ -441,6 +442,7 @@ public class ClientConfigResourceProvider extends AbstractControllerResourceProv
         jsonContent.put("clusterHostInfo", clusterHostInfo);
         jsonContent.put("hostLevelParams", hostLevelParams);
         jsonContent.put("hostname", hostName);
+        jsonContent.put("public_hostname", publicHostName);
         jsonContent.put("clusterName", cluster.getClusterName());
         jsonConfigurations = gson.toJson(jsonContent);
 


[17/36] ambari git commit: AMBARI-21430 - Allow Multiple Versions of Stack Tools to Co-Exist (jonathanhurley)

Posted by lp...@apache.org.
AMBARI-21430 - Allow Multiple Versions of Stack Tools to Co-Exist (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f33a250c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f33a250c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f33a250c

Branch: refs/heads/feature-branch-AMBARI-21307
Commit: f33a250c0e7624b6cbc0a11ffce12506eaa95d9a
Parents: a795f38
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Fri Jul 7 14:36:05 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Fri Jul 7 23:00:23 2017 -0400

----------------------------------------------------------------------
 .../libraries/functions/stack_features.py       |   13 +
 .../libraries/functions/stack_tools.py          |   39 +
 .../libraries/script/script.py                  |   19 +-
 .../server/api/query/JpaPredicateVisitor.java   |    8 +-
 .../controller/ActionExecutionContext.java      |   26 +
 .../controller/AmbariActionExecutionHelper.java |   26 +-
 .../BlueprintConfigurationProcessor.java        |   59 +-
 .../ClusterStackVersionResourceProvider.java    |  163 ++-
 .../ambari/server/state/ConfigHelper.java       |   32 +
 .../ambari/server/topology/AmbariContext.java   |   18 +
 .../server/upgrade/UpgradeCatalog252.java       |   61 +
 .../package/alerts/alert_hive_metastore.py      |   11 +-
 .../package/alerts/alert_llap_app_status.py     |   12 +-
 .../package/alerts/alert_check_oozie_server.py  |    8 +-
 .../resources/host_scripts/alert_disk_space.py  |   10 +-
 .../host_scripts/alert_version_select.py        |   16 +-
 .../HDP/2.0.6/configuration/cluster-env.xml     |   16 +-
 .../HDP/2.0.6/properties/stack_features.json    |  852 +++++------
 .../HDP/2.0.6/properties/stack_tools.json       |   16 +-
 .../PERF/1.0/configuration/cluster-env.xml      |   16 +-
 .../PERF/1.0/properties/stack_features.json     |   38 +-
 .../stacks/PERF/1.0/properties/stack_tools.json |   16 +-
 .../BlueprintConfigurationProcessorTest.java    |   41 +-
 ...ClusterStackVersionResourceProviderTest.java |    4 +-
 .../ClusterConfigurationRequestTest.java        |   60 +-
 .../common-services/configs/hawq_default.json   |    6 +-
 .../python/host_scripts/TestAlertDiskSpace.py   |   16 +-
 .../2.5/configs/ranger-admin-default.json       |  990 ++++++-------
 .../2.5/configs/ranger-admin-secured.json       | 1108 +++++++--------
 .../stacks/2.5/configs/ranger-kms-default.json  | 1158 +++++++--------
 .../stacks/2.5/configs/ranger-kms-secured.json  | 1320 +++++++++---------
 .../2.6/configs/ranger-admin-default.json       |  953 +++++++------
 .../2.6/configs/ranger-admin-secured.json       | 1066 +++++++-------
 .../src/test/python/stacks/utils/RMFTestCase.py |    8 +-
 34 files changed, 4353 insertions(+), 3852 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py b/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
index cbd32e7..576c138 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
@@ -43,6 +43,12 @@ def check_stack_feature(stack_feature, stack_version):
 
   from resource_management.libraries.functions.default import default
   from resource_management.libraries.functions.version import compare_versions
+
+  stack_name = default("/hostLevelParams/stack_name", None)
+  if stack_name is None:
+    Logger.warning("Cannot find the stack name in the command. Stack features cannot be loaded")
+    return False
+
   stack_features_config = default("/configurations/cluster-env/stack_features", None)
 
   if not stack_version:
@@ -51,6 +57,13 @@ def check_stack_feature(stack_feature, stack_version):
 
   if stack_features_config:
     data = json.loads(stack_features_config)
+
+    if stack_name not in data:
+      Logger.warning("Cannot find stack features for the stack named {0}".format(stack_name))
+      return False
+
+    data = data[stack_name]
+
     for feature in data["stack_features"]:
       if feature["name"] == stack_feature:
         if "min_version" in feature:

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py b/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
index 02ae62d..420ae11 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
@@ -39,15 +39,33 @@ def get_stack_tool(name):
   :return: tool_name, tool_path, tool_package
   """
   from resource_management.libraries.functions.default import default
+
+  stack_name = default("/hostLevelParams/stack_name", None)
+  if stack_name is None:
+    Logger.warning("Cannot find the stack name in the command. Stack tools cannot be loaded")
+    return (None, None, None)
+
   stack_tools = None
   stack_tools_config = default("/configurations/cluster-env/stack_tools", None)
   if stack_tools_config:
     stack_tools = json.loads(stack_tools_config)
 
+  if stack_tools is None:
+    Logger.warning("The stack tools could not be found in cluster-env")
+    return (None, None, None)
+
+  if stack_name not in stack_tools:
+    Logger.warning("Cannot find stack tools for the stack named {0}".format(stack_name))
+    return (None, None, None)
+
+  # load the stack tooks keyed by the stack name
+  stack_tools = stack_tools[stack_name]
+
   if not stack_tools or not name or name.lower() not in stack_tools:
     Logger.warning("Cannot find config for {0} stack tool in {1}".format(str(name), str(stack_tools)))
     return (None, None, None)
 
+
   tool_config = stack_tools[name.lower()]
 
   # Return fixed length (tool_name, tool_path tool_package) tuple
@@ -81,3 +99,24 @@ def get_stack_tool_package(name):
   """
   (tool_name, tool_path, tool_package) = get_stack_tool(name)
   return tool_package
+
+
+def get_stack_root(stack_name, stack_root_json):
+  """
+  Get the stack-specific install root directory from the raw, JSON-escaped properties.
+  :param stack_name:
+  :param stack_root_json:
+  :return: stack_root
+  """
+  from resource_management.libraries.functions.default import default
+
+  if stack_root_json is None:
+    return "/usr/{0}".format(stack_name.lower())
+
+  stack_root = json.loads(stack_root_json)
+
+  if stack_name not in stack_root:
+    Logger.warning("Cannot determine stack root for stack named {0}".format(stack_name))
+    return "/usr/{0}".format(stack_name.lower())
+
+  return stack_root[stack_name]

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-common/src/main/python/resource_management/libraries/script/script.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/script/script.py b/ambari-common/src/main/python/resource_management/libraries/script/script.py
index 2c56a13..2b374c5 100644
--- a/ambari-common/src/main/python/resource_management/libraries/script/script.py
+++ b/ambari-common/src/main/python/resource_management/libraries/script/script.py
@@ -597,7 +597,11 @@ class Script(object):
     :return: a stack name or None
     """
     from resource_management.libraries.functions.default import default
-    return default("/hostLevelParams/stack_name", "HDP")
+    stack_name = default("/hostLevelParams/stack_name", None)
+    if stack_name is None:
+      stack_name = default("/configurations/cluster-env/stack_name", "HDP")
+
+    return stack_name
 
   @staticmethod
   def get_stack_root():
@@ -607,7 +611,18 @@ class Script(object):
     """
     from resource_management.libraries.functions.default import default
     stack_name = Script.get_stack_name()
-    return default("/configurations/cluster-env/stack_root", "/usr/{0}".format(stack_name.lower()))
+    stack_root_json = default("/configurations/cluster-env/stack_root", None)
+
+    if stack_root_json is None:
+      return "/usr/{0}".format(stack_name.lower())
+
+    stack_root = json.loads(stack_root_json)
+
+    if stack_name not in stack_root:
+      Logger.warning("Cannot determine stack root for stack named {0}".format(stack_name))
+      return "/usr/{0}".format(stack_name.lower())
+
+    return stack_root[stack_name]
 
   @staticmethod
   def get_stack_version():

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaPredicateVisitor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaPredicateVisitor.java b/ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaPredicateVisitor.java
index 984dc3b..84e9dd9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaPredicateVisitor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaPredicateVisitor.java
@@ -63,11 +63,6 @@ public abstract class JpaPredicateVisitor<T> implements PredicateVisitor {
   final private CriteriaQuery<T> m_query;
 
   /**
-   * The entity class that the root of the query is built from.
-   */
-  final private Class<T> m_entityClass;
-
-  /**
    * The last calculated predicate.
    */
   private javax.persistence.criteria.Predicate m_lastPredicate = null;
@@ -92,7 +87,6 @@ public abstract class JpaPredicateVisitor<T> implements PredicateVisitor {
   public JpaPredicateVisitor(EntityManager entityManager, Class<T> entityClass) {
     m_entityManager = entityManager;
     m_builder = m_entityManager.getCriteriaBuilder();
-    m_entityClass = entityClass;
     m_query = m_builder.createQuery(entityClass);
     m_root = m_query.from(entityClass);
   }
@@ -178,7 +172,7 @@ public abstract class JpaPredicateVisitor<T> implements PredicateVisitor {
     }
 
     String operator = predicate.getOperator();
-    Comparable<?> value = predicate.getValue();
+    Comparable value = predicate.getValue();
 
     // convert string to enum for proper JPA comparisons
     if (lastSingularAttribute != null) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
index 42a95c0..34d6db9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
@@ -27,6 +27,7 @@ import org.apache.ambari.server.actionmanager.TargetHostType;
 import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.controller.internal.RequestOperationLevel;
 import org.apache.ambari.server.controller.internal.RequestResourceFilter;
+import org.apache.ambari.server.state.StackId;
 
 /**
  * The context required to create tasks and stages for a custom action
@@ -43,6 +44,7 @@ public class ActionExecutionContext {
   private String expectedComponentName;
   private boolean hostsInMaintenanceModeExcluded = true;
   private boolean allowRetry = false;
+  private StackId stackId;
 
   private List<ExecutionCommandVisitor> m_visitors = new ArrayList<>();
 
@@ -173,6 +175,30 @@ public class ActionExecutionContext {
   }
 
   /**
+   * Gets the stack to use for generating stack-associated values for a command.
+   * In some cases the cluster's stack is not the correct one to use, such as
+   * when distributing a repository.
+   *
+   * @return the stackId the stack to use when generating stack-specific content
+   *         for the command.
+   */
+  public StackId getStackId() {
+    return stackId;
+  }
+
+  /**
+   * Sets the stack to use for generating stack-associated values for a command.
+   * In some cases the cluster's stack is not the correct one to use, such as
+   * when distributing a repository.
+   *
+   * @param stackId
+   *          the stackId to use for stack-based properties on the command.
+   */
+  public void setStackId(StackId stackId) {
+    this.stackId = stackId;
+  }
+
+  /**
    * Adds a command visitor that will be invoked after a command is created.  Provides access
    * to the command.
    *

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
index 8f522b0..391daa9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
@@ -465,7 +465,10 @@ public class AmbariActionExecutionHelper {
 
       if (StringUtils.isNotBlank(serviceName)) {
         Service service = cluster.getService(serviceName);
-        addRepoInfoToHostLevelParams(service.getDesiredRepositoryVersion(), hostLevelParams, hostName);
+        addRepoInfoToHostLevelParams(actionContext, service.getDesiredRepositoryVersion(),
+            hostLevelParams, hostName);
+      } else {
+        addRepoInfoToHostLevelParams(actionContext, null, hostLevelParams, hostName);
       }
 
 
@@ -529,9 +532,19 @@ public class AmbariActionExecutionHelper {
   *
   * */
 
-  private void addRepoInfoToHostLevelParams(RepositoryVersionEntity repositoryVersion,
-      Map<String, String> hostLevelParams, String hostName) throws AmbariException {
+  private void addRepoInfoToHostLevelParams(ActionExecutionContext actionContext,
+      RepositoryVersionEntity repositoryVersion, Map<String, String> hostLevelParams,
+      String hostName) throws AmbariException {
+
+    // if the repo is null, see if any values from the context should go on the
+    // host params and then return
     if (null == repositoryVersion) {
+      if (null != actionContext.getStackId()) {
+        StackId stackId = actionContext.getStackId();
+        hostLevelParams.put(STACK_NAME, stackId.getStackName());
+        hostLevelParams.put(STACK_VERSION, stackId.getStackVersion());
+      }
+
       return;
     }
 
@@ -557,7 +570,10 @@ public class AmbariActionExecutionHelper {
 
     hostLevelParams.put(REPO_INFO, rootJsonObject.toString());
 
-    hostLevelParams.put(STACK_NAME, repositoryVersion.getStackName());
-    hostLevelParams.put(STACK_VERSION, repositoryVersion.getStackVersion());
+    // set the host level params if not already set by whoever is creating this command
+    if (!hostLevelParams.containsKey(STACK_NAME) || !hostLevelParams.containsKey(STACK_VERSION)) {
+      hostLevelParams.put(STACK_NAME, repositoryVersion.getStackName());
+      hostLevelParams.put(STACK_VERSION, repositoryVersion.getStackVersion());
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
index e93b2f7..37284be 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
@@ -36,7 +36,9 @@ import java.util.regex.Pattern;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.PropertyDependencyInfo;
+import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.ValueAttributesInfo;
 import org.apache.ambari.server.topology.AdvisedConfiguration;
 import org.apache.ambari.server.topology.Blueprint;
@@ -356,7 +358,7 @@ public class BlueprintConfigurationProcessor {
             final String originalValue = typeMap.get(propertyName);
             final String updatedValue =
               updater.updateForClusterCreate(propertyName, originalValue, clusterProps, clusterTopology);
-            
+
             if(updatedValue == null ) {
               continue;
             }
@@ -419,6 +421,7 @@ public class BlueprintConfigurationProcessor {
     }
 
     // Explicitly set any properties that are required but not currently provided in the stack definition.
+    setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
     setRetryConfiguration(clusterConfig, configTypesUpdated);
     setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
     addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
@@ -531,7 +534,7 @@ public class BlueprintConfigurationProcessor {
     try {
       String clusterName = clusterTopology.getAmbariContext().getClusterName(clusterTopology.getClusterId());
       Cluster cluster = clusterTopology.getAmbariContext().getController().getClusters().getCluster(clusterName);
-      authToLocalPerClusterMap = new HashMap<Long, Set<String>>();
+      authToLocalPerClusterMap = new HashMap<>();
       authToLocalPerClusterMap.put(Long.valueOf(clusterTopology.getClusterId()), clusterTopology.getAmbariContext().getController().getKerberosHelper().getKerberosDescriptor(cluster).getAllAuthToLocalProperties());
       } catch (AmbariException e) {
         LOG.error("Error while getting authToLocal properties. ", e);
@@ -2186,8 +2189,9 @@ public class BlueprintConfigurationProcessor {
       StringBuilder sb = new StringBuilder();
 
       Matcher m = REGEX_IN_BRACKETS.matcher(origValue);
-      if (m.matches())
+      if (m.matches()) {
         origValue = m.group("INNER");
+      }
 
       if (origValue != null) {
         sb.append("[");
@@ -2195,8 +2199,9 @@ public class BlueprintConfigurationProcessor {
         for (String value : origValue.split(",")) {
 
           m = REGEX_IN_QUOTES.matcher(value);
-          if (m.matches())
+          if (m.matches()) {
             value = m.group("INNER");
+          }
 
           if (!isFirst) {
             sb.append(",");
@@ -2230,6 +2235,7 @@ public class BlueprintConfigurationProcessor {
    */
   private static class OriginalValuePropertyUpdater implements PropertyUpdater {
 
+    @Override
     public String updateForClusterCreate(String propertyName,
                                          String origValue,
                                          Map<String, Map<String, String>> properties,
@@ -2950,6 +2956,49 @@ public class BlueprintConfigurationProcessor {
 
 
   /**
+   * Sets the read-only properties for stack features & tools, overriding
+   * anything provided in the blueprint.
+   *
+   * @param configuration
+   *          the configuration to update with values from the stack.
+   * @param configTypesUpdated
+   *          the list of configuration types updated (cluster-env will be added
+   *          to this).
+   * @throws ConfigurationTopologyException
+   */
+  private void setStackToolsAndFeatures(Configuration configuration, Set<String> configTypesUpdated)
+      throws ConfigurationTopologyException {
+    ConfigHelper configHelper = clusterTopology.getAmbariContext().getConfigHelper();
+    Stack stack = clusterTopology.getBlueprint().getStack();
+    String stackName = stack.getName();
+    String stackVersion = stack.getVersion();
+
+    StackId stackId = new StackId(stackName, stackVersion);
+
+    Set<String> properties = Sets.newHashSet(ConfigHelper.CLUSTER_ENV_STACK_NAME_PROPERTY,
+        ConfigHelper.CLUSTER_ENV_STACK_ROOT_PROPERTY, ConfigHelper.CLUSTER_ENV_STACK_TOOLS_PROPERTY,
+        ConfigHelper.CLUSTER_ENV_STACK_FEATURES_PROPERTY);
+
+    try {
+      Map<String, Map<String, String>> defaultStackProperties = configHelper.getDefaultStackProperties(stackId);
+      Map<String,String> clusterEnvDefaultProperties = defaultStackProperties.get(CLUSTER_ENV_CONFIG_TYPE_NAME);
+
+      for( String property : properties ){
+        if (defaultStackProperties.containsKey(property)) {
+          configuration.setProperty(CLUSTER_ENV_CONFIG_TYPE_NAME, property,
+              clusterEnvDefaultProperties.get(property));
+
+          // make sure to include the configuration type as being updated
+          configTypesUpdated.add(CLUSTER_ENV_CONFIG_TYPE_NAME);
+        }
+      }
+    } catch( AmbariException ambariException ){
+      throw new ConfigurationTopologyException("Unable to retrieve the stack tools and features",
+          ambariException);
+    }
+  }
+
+  /**
    * Ensure that the specified property exists.
    * If not, set a default value.
    *
@@ -3099,7 +3148,7 @@ public class BlueprintConfigurationProcessor {
 
     @Override
     public boolean isPropertyIncluded(String propertyName, String propertyValue, String configType, ClusterTopology topology) {
-      return !(this.propertyConfigType.equals(configType) &&
+      return !(propertyConfigType.equals(configType) &&
              this.propertyName.equals(propertyName));
     }
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
index 93c02be..c4fce8a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
@@ -67,11 +67,13 @@ import org.apache.ambari.server.security.authorization.RoleAuthorization;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.ComponentInfo;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.RepositoryType;
 import org.apache.ambari.server.state.RepositoryVersionState;
-import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.ServiceOsSpecific;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.repository.VersionDefinitionXml;
 import org.apache.ambari.server.state.stack.upgrade.RepositoryVersionHelper;
@@ -83,6 +85,7 @@ import org.apache.commons.lang.math.NumberUtils;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
+import com.google.gson.Gson;
 import com.google.inject.Inject;
 import com.google.inject.Provider;
 import com.google.inject.persist.Transactional;
@@ -171,12 +174,20 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
   @Inject
   private static RepositoryVersionHelper repoVersionHelper;
 
-
+  @Inject
+  private static Gson gson;
 
   @Inject
   private static Provider<Clusters> clusters;
 
   /**
+   * Used for updating the existing stack tools with those of the stack being
+   * distributed.
+   */
+  @Inject
+  private static Provider<ConfigHelper> configHelperProvider;
+
+  /**
    * Constructor.
    */
   public ClusterStackVersionResourceProvider(
@@ -287,8 +298,6 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
 
     String clName;
     final String desiredRepoVersion;
-    String stackName;
-    String stackVersion;
 
     Map<String, Object> propertyMap = iterator.next();
 
@@ -327,30 +336,30 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
           cluster.getClusterName(), entity.getDirection().getText(false)));
     }
 
-    Set<StackId> stackIds = new HashSet<>();
-    if (propertyMap.containsKey(CLUSTER_STACK_VERSION_STACK_PROPERTY_ID) &&
-            propertyMap.containsKey(CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID)) {
-      stackName = (String) propertyMap.get(CLUSTER_STACK_VERSION_STACK_PROPERTY_ID);
-      stackVersion = (String) propertyMap.get(CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID);
-      StackId stackId = new StackId(stackName, stackVersion);
-      if (! ami.isSupportedStack(stackName, stackVersion)) {
-        throw new NoSuchParentResourceException(String.format("Stack %s is not supported",
-                stackId));
-      }
-      stackIds.add(stackId);
-    } else { // Using stack that is current for cluster
-      for (Service service : cluster.getServices().values()) {
-        stackIds.add(service.getDesiredStackId());
-      }
+    String stackName = (String) propertyMap.get(CLUSTER_STACK_VERSION_STACK_PROPERTY_ID);
+    String stackVersion = (String) propertyMap.get(CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID);
+    if (StringUtils.isBlank(stackName) || StringUtils.isBlank(stackVersion)) {
+      String message = String.format(
+          "Both the %s and %s properties are required when distributing a new stack",
+          CLUSTER_STACK_VERSION_STACK_PROPERTY_ID, CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID);
+
+      throw new SystemException(message);
     }
 
-    if (stackIds.size() > 1) {
-      throw new SystemException("Could not determine stack to add out of " + StringUtils.join(stackIds, ','));
+    StackId stackId = new StackId(stackName, stackVersion);
+
+    if (!ami.isSupportedStack(stackName, stackVersion)) {
+      throw new NoSuchParentResourceException(String.format("Stack %s is not supported", stackId));
     }
 
-    StackId stackId = stackIds.iterator().next();
-    stackName = stackId.getStackName();
-    stackVersion = stackId.getStackVersion();
+    // bootstrap the stack tools if necessary for the stack which is being
+    // distributed
+    try {
+      bootstrapStackTools(stackId, cluster);
+    } catch (AmbariException ambariException) {
+      throw new SystemException("Unable to modify stack tools for new stack being distributed",
+          ambariException);
+    }
 
     RepositoryVersionEntity repoVersionEntity = repositoryVersionDAO.findByStackAndVersion(
         stackId, desiredRepoVersion);
@@ -580,6 +589,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
     }
 
     // determine packages for all services that are installed on host
+    List<ServiceOsSpecific.Package> packages = new ArrayList<>();
     Set<String> servicesOnHost = new HashSet<>();
     List<ServiceComponentHost> components = cluster.getServiceComponentHosts(host.getHostName());
     for (ServiceComponentHost component : components) {
@@ -600,16 +610,15 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
     RequestResourceFilter filter = new RequestResourceFilter(null, null,
             Collections.singletonList(host.getHostName()));
 
-    ActionExecutionContext actionContext = new ActionExecutionContext(
-            cluster.getClusterName(), INSTALL_PACKAGES_ACTION,
-            Collections.singletonList(filter),
-            roleParams);
+    ActionExecutionContext actionContext = new ActionExecutionContext(cluster.getClusterName(),
+        INSTALL_PACKAGES_ACTION, Collections.singletonList(filter), roleParams);
+
+    actionContext.setStackId(stackId);
     actionContext.setTimeout(Short.valueOf(configuration.getDefaultAgentTaskTimeout(true)));
 
     repoVersionHelper.addCommandRepository(actionContext, osFamily, repoVersion, repoInfo);
 
     return actionContext;
-
   }
 
 
@@ -698,4 +707,100 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
   }
 
 
+  /**
+   * Ensures that the stack tools and stack features are set on
+   * {@link ConfigHelper#CLUSTER_ENV} for the stack of the repository being
+   * distributed. This step ensures that the new repository can be distributed
+   * with the correct tools.
+   * <p/>
+   * If the cluster's current stack name matches that of the new stack or the
+   * new stack's tools are already added in the configuration, then this method
+   * will not change anything.
+   *
+   * @param stackId
+   *          the stack of the repository being distributed (not {@code null}).
+   * @param cluster
+   *          the cluster the new stack/repo is being distributed for (not
+   *          {@code null}).
+   * @throws AmbariException
+   */
+  private void bootstrapStackTools(StackId stackId, Cluster cluster) throws AmbariException {
+    // if the stack name is the same as the cluster's current stack name, then
+    // there's no work to do
+    if (StringUtils.equals(stackId.getStackName(),
+        cluster.getCurrentStackVersion().getStackName())) {
+      return;
+    }
+
+    ConfigHelper configHelper = configHelperProvider.get();
+
+    // get the stack tools/features for the stack being distributed
+    Map<String, Map<String, String>> defaultStackConfigurationsByType = configHelper.getDefaultStackProperties(stackId);
+
+    Map<String, String> clusterEnvDefaults = defaultStackConfigurationsByType.get(
+        ConfigHelper.CLUSTER_ENV);
+
+    Config clusterEnv = cluster.getDesiredConfigByType(ConfigHelper.CLUSTER_ENV);
+    Map<String, String> clusterEnvProperties = clusterEnv.getProperties();
+
+    // the 3 properties we need to check and update
+    Set<String> properties = Sets.newHashSet(ConfigHelper.CLUSTER_ENV_STACK_ROOT_PROPERTY,
+        ConfigHelper.CLUSTER_ENV_STACK_TOOLS_PROPERTY,
+        ConfigHelper.CLUSTER_ENV_STACK_FEATURES_PROPERTY);
+
+    // any updates are stored here and merged into the existing config type
+    Map<String, String> updatedProperties = new HashMap<>();
+
+    for (String property : properties) {
+      // determine if the property exists in the stack being distributed (it
+      // kind of has to, but we'll be safe if it's not found)
+      String newStackDefaultJson = clusterEnvDefaults.get(property);
+      if (StringUtils.isBlank(newStackDefaultJson)) {
+        continue;
+      }
+
+      String existingPropertyJson = clusterEnvProperties.get(property);
+
+      // if the stack tools/features property doesn't exist, then just set the
+      // one from the new stack
+      if (StringUtils.isBlank(existingPropertyJson)) {
+        updatedProperties.put(property, newStackDefaultJson);
+        continue;
+      }
+
+      // now is the hard part - we need to check to see if the new stack tools
+      // exists alongside the current tools and if it doesn't, then add the new
+      // tools in
+      final Map<String, Object> existingJson;
+      final Map<String, ?> newStackJsonAsObject;
+      if (StringUtils.equals(property, ConfigHelper.CLUSTER_ENV_STACK_ROOT_PROPERTY)) {
+        existingJson = gson.<Map<String, Object>> fromJson(existingPropertyJson, Map.class);
+        newStackJsonAsObject = gson.<Map<String, String>> fromJson(newStackDefaultJson, Map.class);
+      } else {
+        existingJson = gson.<Map<String, Object>> fromJson(existingPropertyJson,
+            Map.class);
+
+        newStackJsonAsObject = gson.<Map<String, Map<Object, Object>>> fromJson(newStackDefaultJson,
+            Map.class);
+      }
+
+      if (existingJson.keySet().contains(stackId.getStackName())) {
+        continue;
+      }
+
+      existingJson.put(stackId.getStackName(), newStackJsonAsObject.get(stackId.getStackName()));
+
+      String newJson = gson.toJson(existingJson);
+      updatedProperties.put(property, newJson);
+    }
+
+    if (!updatedProperties.isEmpty()) {
+      AmbariManagementController amc = getManagementController();
+      String serviceNote = String.format(
+          "Adding stack tools for %s while distributing a new repository", stackId.toString());
+
+      configHelper.updateConfigType(cluster, stackId, amc, clusterEnv.getType(), updatedProperties,
+          null, amc.getAuthName(), serviceNote);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
index 9f75bf9..a3a676d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
@@ -88,8 +88,10 @@ public class ConfigHelper {
   public static final String CLUSTER_ENV_RETRY_COMMANDS = "commands_to_retry";
   public static final String CLUSTER_ENV_RETRY_MAX_TIME_IN_SEC = "command_retry_max_time_in_sec";
   public static final String COMMAND_RETRY_MAX_TIME_IN_SEC_DEFAULT = "600";
+  public static final String CLUSTER_ENV_STACK_NAME_PROPERTY = "stack_name";
   public static final String CLUSTER_ENV_STACK_FEATURES_PROPERTY = "stack_features";
   public static final String CLUSTER_ENV_STACK_TOOLS_PROPERTY = "stack_tools";
+  public static final String CLUSTER_ENV_STACK_ROOT_PROPERTY = "stack_root";
 
   public static final String HTTP_ONLY = "HTTP_ONLY";
   public static final String HTTPS_ONLY = "HTTPS_ONLY";
@@ -1148,6 +1150,36 @@ public class ConfigHelper {
    *
    * @param stack
    *          the stack to pull stack-values from (not {@code null})
+   * @return a mapping of configuration type to map of key/value pairs for the
+   *         default configurations.
+   * @throws AmbariException
+   */
+  public Map<String, Map<String, String>> getDefaultStackProperties(StackId stack)
+      throws AmbariException {
+    Map<String, Map<String, String>> defaultPropertiesByType = new HashMap<>();
+
+    // populate the stack (non-service related) properties
+    Set<org.apache.ambari.server.state.PropertyInfo> stackConfigurationProperties = ambariMetaInfo.getStackProperties(
+        stack.getStackName(), stack.getStackVersion());
+
+    for (PropertyInfo stackDefaultProperty : stackConfigurationProperties) {
+      String type = ConfigHelper.fileNameToConfigType(stackDefaultProperty.getFilename());
+
+      if (!defaultPropertiesByType.containsKey(type)) {
+        defaultPropertiesByType.put(type, new HashMap<String, String>());
+      }
+
+      defaultPropertiesByType.get(type).put(stackDefaultProperty.getName(),
+          stackDefaultProperty.getValue());
+    }
+
+    return defaultPropertiesByType;
+  }
+
+  /**
+   *
+   * @param stack
+   *          the stack to pull stack-values from (not {@code null})
    * @param serviceName
    *          the service name {@code null}).
    * @return a mapping of configuration type to map of key/value pairs for the

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
index 0467b9b..9b64edc 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
@@ -69,6 +69,7 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.ConfigFactory;
+import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.SecurityType;
@@ -80,6 +81,7 @@ import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Sets;
+import com.google.inject.Provider;
 
 
 /**
@@ -100,6 +102,12 @@ public class AmbariContext {
   @Inject
   ConfigFactory configFactory;
 
+  /**
+   * Used for getting configuration property values from stack and services.
+   */
+  @Inject
+  private Provider<ConfigHelper> configHelper;
+
   private static AmbariManagementController controller;
   private static ClusterController clusterController;
   //todo: task id's.  Use existing mechanism for getting next task id sequence
@@ -674,6 +682,16 @@ public class AmbariContext {
     return String.format("%s:%s", bpName, hostGroupName);
   }
 
+  /**
+   * Gets an instance of {@link ConfigHelper} for classes which are not
+   * dependency injected.
+   *
+   * @return a {@link ConfigHelper} instance.
+   */
+  public ConfigHelper getConfigHelper() {
+    return configHelper.get();
+  }
+
   private synchronized HostResourceProvider getHostResourceProvider() {
     if (hostResourceProvider == null) {
       hostResourceProvider = (HostResourceProvider)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
index 74f8f35..fa3aea3 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
@@ -18,10 +18,20 @@
 package org.apache.ambari.server.upgrade;
 
 import java.sql.SQLException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigHelper;
+import org.apache.ambari.server.state.PropertyInfo;
+import org.apache.hadoop.metrics2.sink.relocated.commons.lang.StringUtils;
 
+import com.google.common.collect.Sets;
 import com.google.inject.Inject;
 import com.google.inject.Injector;
 
@@ -33,6 +43,8 @@ public class UpgradeCatalog252 extends AbstractUpgradeCatalog {
   static final String CLUSTERCONFIG_TABLE = "clusterconfig";
   static final String SERVICE_DELETED_COLUMN = "service_deleted";
 
+  private static final String CLUSTER_ENV = "cluster-env";
+
   /**
    * Constructor.
    *
@@ -79,6 +91,7 @@ public class UpgradeCatalog252 extends AbstractUpgradeCatalog {
    */
   @Override
   protected void executeDMLUpdates() throws AmbariException, SQLException {
+    resetStackToolsAndFeatures();
   }
 
   /**
@@ -91,4 +104,52 @@ public class UpgradeCatalog252 extends AbstractUpgradeCatalog {
     dbAccessor.addColumn(CLUSTERCONFIG_TABLE,
         new DBColumnInfo(SERVICE_DELETED_COLUMN, Short.class, null, 0, false));
   }
+
+  /**
+   * Resets the following properties in {@code cluster-env} to their new
+   * defaults:
+   * <ul>
+   * <li>stack_root
+   * <li>stack_tools
+   * <li>stack_features
+   * <ul>
+   *
+   * @throws AmbariException
+   */
+  private void resetStackToolsAndFeatures() throws AmbariException {
+    Set<String> propertiesToReset = Sets.newHashSet("stack_tools", "stack_features", "stack_root");
+
+    Clusters clusters = injector.getInstance(Clusters.class);
+    ConfigHelper configHelper = injector.getInstance(ConfigHelper.class);
+
+    Map<String, Cluster> clusterMap = clusters.getClusters();
+    for (Cluster cluster : clusterMap.values()) {
+      Config clusterEnv = cluster.getDesiredConfigByType(CLUSTER_ENV);
+      if (null == clusterEnv) {
+        continue;
+      }
+
+      Map<String, String> newStackProperties = new HashMap<>();
+      Set<PropertyInfo> stackProperties = configHelper.getStackProperties(cluster);
+      if (null == stackProperties) {
+        continue;
+      }
+
+      for (PropertyInfo propertyInfo : stackProperties) {
+        String fileName = propertyInfo.getFilename();
+        if (StringUtils.isEmpty(fileName)) {
+          continue;
+        }
+
+        if (StringUtils.equals(ConfigHelper.fileNameToConfigType(fileName), CLUSTER_ENV)) {
+          String stackPropertyName = propertyInfo.getName();
+          if (propertiesToReset.contains(stackPropertyName)) {
+            newStackProperties.put(stackPropertyName, propertyInfo.getValue());
+          }
+        }
+      }
+
+      updateConfigurationPropertiesForCluster(cluster, CLUSTER_ENV, newStackProperties, true, false);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_metastore.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_metastore.py
index 32df7d3..5b4fd68 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_metastore.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_metastore.py
@@ -27,6 +27,7 @@ import logging
 from resource_management.core import global_lock
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions import stack_tools
 from resource_management.core.resources import Execute
 from resource_management.core.signal_utils import TerminateStrategy
 from ambari_commons.os_check import OSConst
@@ -56,6 +57,7 @@ SMOKEUSER_PRINCIPAL_DEFAULT = 'ambari-qa@EXAMPLE.COM'
 SMOKEUSER_SCRIPT_PARAM_KEY = 'default.smoke.user'
 SMOKEUSER_DEFAULT = 'ambari-qa'
 
+STACK_NAME = '{{cluster-env/stack_name}}'
 STACK_ROOT = '{{cluster-env/stack_root}}'
 
 HIVE_CONF_DIR_LEGACY = '/etc/hive/conf.server'
@@ -78,7 +80,7 @@ def get_tokens():
   """
   return (SECURITY_ENABLED_KEY,SMOKEUSER_KEYTAB_KEY,SMOKEUSER_PRINCIPAL_KEY,
     HIVE_METASTORE_URIS_KEY, SMOKEUSER_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY,
-    STACK_ROOT)
+    STACK_NAME, STACK_ROOT)
 
 @OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
 def get_tokens():
@@ -175,9 +177,10 @@ def execute(configurations={}, parameters={}, host_name=None):
     bin_dir = HIVE_BIN_DIR_LEGACY
 
 
-    if STACK_ROOT in configurations:
-      hive_conf_dir = configurations[STACK_ROOT] + format("/current/hive-metastore/conf")
-      hive_bin_dir = configurations[STACK_ROOT] + format("/current/hive-metastore/bin")
+    if STACK_NAME in configurations and STACK_ROOT in configurations:
+      stack_root = stack_tools.get_stack_root(configurations[STACK_NAME], configurations[STACK_ROOT])
+      hive_conf_dir = stack_root + format("/current/hive-metastore/conf")
+      hive_bin_dir = stack_root + format("/current/hive-metastore/bin")
 
       if os.path.exists(hive_conf_dir):
         conf_dir = hive_conf_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py
index 98d1899..e46c896 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py
@@ -26,7 +26,7 @@ import subprocess
 
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions import get_kinit_path
-from ambari_commons.os_check import OSConst
+from resource_management.libraries.functions import stack_tools
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 from resource_management.core import shell
 from resource_management.core.resources import Execute
@@ -58,6 +58,7 @@ HIVE_AUTHENTICATION_DEFAULT = 'NOSASL'
 HIVE_USER_KEY = '{{hive-env/hive_user}}'
 HIVE_USER_DEFAULT = 'default.smoke.user'
 
+STACK_NAME = '{{cluster-env/stack_name}}'
 STACK_ROOT = '{{cluster-env/stack_root}}'
 STACK_ROOT_DEFAULT = Script.get_stack_root()
 
@@ -88,7 +89,7 @@ def get_tokens():
   to build the dictionary passed into execute
   """
   return (SECURITY_ENABLED_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY, HIVE_PRINCIPAL_KEY, HIVE_PRINCIPAL_KEYTAB_KEY,
-          HIVE_USER_KEY, STACK_ROOT, LLAP_APP_NAME_KEY)
+          HIVE_USER_KEY, STACK_NAME, STACK_ROOT, LLAP_APP_NAME_KEY)
 
 
 @OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
@@ -159,8 +160,11 @@ def execute(configurations={}, parameters={}, host_name=None):
 
 
     start_time = time.time()
-    if STACK_ROOT in configurations:
-      llap_status_cmd = configurations[STACK_ROOT] + format("/current/hive-server2-hive2/bin/hive --service llapstatus --name {llap_app_name}  --findAppTimeout {LLAP_APP_STATUS_CMD_TIMEOUT}")
+    if STACK_NAME in configurations and STACK_ROOT in configurations:
+      stack_root = stack_tools.get_stack_root(configurations[STACK_NAME],
+        configurations[STACK_ROOT])
+
+      llap_status_cmd = stack_root + format("/current/hive-server2-hive2/bin/hive --service llapstatus --name {llap_app_name}  --findAppTimeout {LLAP_APP_STATUS_CMD_TIMEOUT}")
     else:
       llap_status_cmd = STACK_ROOT_DEFAULT + format("/current/hive-server2-hive2/bin/hive --service llapstatus --name {llap_app_name} --findAppTimeout {LLAP_APP_STATUS_CMD_TIMEOUT}")
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py
index 0e9fe74..54eef18 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py
@@ -26,6 +26,7 @@ from resource_management.core.resources import Execute
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions import get_kinit_path
 from resource_management.libraries.functions import get_klist_path
+from resource_management.libraries.functions import stack_tools
 from ambari_commons.os_check import OSConst, OSCheck
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 from urlparse import urlparse
@@ -66,6 +67,7 @@ USER_PRINCIPAL_DEFAULT = 'oozie@EXAMPLE.COM'
 # default user
 USER_DEFAULT = 'oozie'
 
+STACK_NAME_KEY = '{{cluster-env/stack_name}}'
 STACK_ROOT_KEY = '{{cluster-env/stack_root}}'
 STACK_ROOT_DEFAULT = '/usr/hdp'
 
@@ -86,7 +88,7 @@ def get_tokens():
   to build the dictionary passed into execute
   """
   return (OOZIE_URL_KEY, USER_PRINCIPAL_KEY, SECURITY_ENABLED, USER_KEYTAB_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY,
-          USER_KEY, OOZIE_HTTPS_PORT, OOZIE_ENV_CONTENT, STACK_ROOT_KEY)
+          USER_KEY, OOZIE_HTTPS_PORT, OOZIE_ENV_CONTENT, STACK_NAME_KEY, STACK_ROOT_KEY)
 
 @OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
 def get_check_command(oozie_url, host_name, configurations):
@@ -158,8 +160,8 @@ def get_check_command(oozie_url, host_name, configurations, parameters, only_kin
 
   # Configure stack root
   stack_root = STACK_ROOT_DEFAULT
-  if STACK_ROOT_KEY in configurations:
-    stack_root = configurations[STACK_ROOT_KEY].lower()
+  if STACK_NAME_KEY in configurations and STACK_ROOT_KEY in configurations:
+    stack_root = stack_tools.get_stack_root(configurations[STACK_NAME_KEY], configurations[STACK_ROOT_KEY]).lower()
 
   # oozie configuration directory using a symlink
   oozie_config_directory = OOZIE_CONF_DIR.replace(STACK_ROOT_PATTERN, stack_root)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/host_scripts/alert_disk_space.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/host_scripts/alert_disk_space.py b/ambari-server/src/main/resources/host_scripts/alert_disk_space.py
index 4c5834f..f3c6406 100644
--- a/ambari-server/src/main/resources/host_scripts/alert_disk_space.py
+++ b/ambari-server/src/main/resources/host_scripts/alert_disk_space.py
@@ -23,6 +23,7 @@ import os
 import platform
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 from ambari_commons import OSConst
+from resource_management.libraries.functions import stack_tools
 
 DiskInfo = collections.namedtuple('DiskInfo', 'total used free path')
 
@@ -36,6 +37,7 @@ MIN_FREE_SPACE_DEFAULT = 5000000000L
 PERCENT_USED_WARNING_DEFAULT = 50
 PERCENT_USED_CRITICAL_DEFAULT = 80
 
+STACK_NAME = '{{cluster-env/stack_name}}'
 STACK_ROOT = '{{cluster-env/stack_root}}'
 
 def get_tokens():
@@ -43,7 +45,7 @@ def get_tokens():
   Returns a tuple of tokens in the format {{site/property}} that will be used
   to build the dictionary passed into execute
   """
-  return (STACK_ROOT, )
+  return (STACK_NAME, STACK_ROOT)
 
 
 @OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
@@ -64,10 +66,10 @@ def execute(configurations={}, parameters={}, host_name=None):
   if configurations is None:
     return (('UNKNOWN', ['There were no configurations supplied to the script.']))
 
-  if not STACK_ROOT in configurations:
-    return (('STACK_ROOT', ['cluster-env/stack_root is not specified']))
+  if not STACK_NAME in configurations or not STACK_ROOT in configurations:
+    return (('STACK_ROOT', ['cluster-env/stack_name and cluster-env/stack_root are required']))
 
-  path = configurations[STACK_ROOT]
+  path = stack_tools.get_stack_root(configurations[STACK_NAME], configurations[STACK_ROOT])
 
   try:
     disk_usage = _get_disk_usage(path)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/host_scripts/alert_version_select.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/host_scripts/alert_version_select.py b/ambari-server/src/main/resources/host_scripts/alert_version_select.py
index 0ce79e7..f54ccad 100644
--- a/ambari-server/src/main/resources/host_scripts/alert_version_select.py
+++ b/ambari-server/src/main/resources/host_scripts/alert_version_select.py
@@ -31,6 +31,7 @@ RESULT_STATE_WARNING = 'WARNING'
 RESULT_STATE_CRITICAL = 'CRITICAL'
 RESULT_STATE_UNKNOWN = 'UNKNOWN'
 
+STACK_NAME = '{{cluster-env/stack_name}}'
 STACK_TOOLS = '{{cluster-env/stack_tools}}'
 
 
@@ -42,7 +43,7 @@ def get_tokens():
   Returns a tuple of tokens in the format {{site/property}} that will be used
   to build the dictionary passed into execute
   """
-  return (STACK_TOOLS,)
+  return (STACK_NAME, STACK_TOOLS)
 
 
 def execute(configurations={}, parameters={}, host_name=None):
@@ -65,8 +66,10 @@ def execute(configurations={}, parameters={}, host_name=None):
     if STACK_TOOLS not in configurations:
       return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(STACK_TOOLS)])
 
+    stack_name = Script.get_stack_name()
+
     # Of the form,
-    # { "stack_selector": ["hdp-select", "/usr/bin/hdp-select", "hdp-select"], "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"] }
+    # { "HDP" : { "stack_selector": ["hdp-select", "/usr/bin/hdp-select", "hdp-select"], "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"] } }
     stack_tools_str = configurations[STACK_TOOLS]
 
     if stack_tools_str is None:
@@ -75,6 +78,7 @@ def execute(configurations={}, parameters={}, host_name=None):
     distro_select = "unknown-distro-select"
     try:
       stack_tools = json.loads(stack_tools_str)
+      stack_tools = stack_tools[stack_name]
       distro_select = stack_tools["stack_selector"][0]
     except:
       pass
@@ -87,18 +91,18 @@ def execute(configurations={}, parameters={}, host_name=None):
       (code, out, versions) = unsafe_get_stack_versions()
 
       if code == 0:
-        msg.append("Ok. {0}".format(distro_select))
+        msg.append("{0} ".format(distro_select))
         if versions is not None and type(versions) is list and len(versions) > 0:
-          msg.append("Versions: {0}".format(", ".join(versions)))
+          msg.append("reported the following versions: {0}".format(", ".join(versions)))
         return (RESULT_STATE_OK, ["\n".join(msg)])
       else:
-        msg.append("Failed, check dir {0} for unexpected contents.".format(stack_root_dir))
+        msg.append("{0} could not properly read {1}. Check this directory for unexpected contents.".format(distro_select, stack_root_dir))
         if out is not None:
           msg.append(out)
 
         return (RESULT_STATE_CRITICAL, ["\n".join(msg)])
     else:
-      msg.append("Ok. No stack root {0} to check.".format(stack_root_dir))
+      msg.append("No stack root {0} to check.".format(stack_root_dir))
       return (RESULT_STATE_OK, ["\n".join(msg)])
   except Exception, e:
     return (RESULT_STATE_CRITICAL, [e.message])

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
index f7d5de5..e6ec285 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
@@ -220,6 +220,18 @@ gpgcheck=0</value>
     </value-attributes>
     <on-ambari-upgrade add="true"/>
   </property>
+  <!-- Define stack_name property in the base stack. DO NOT override this property for each stack version -->
+  <property>
+    <name>stack_name</name>
+    <value>HDP</value>
+    <description>The name of the stack.</description>
+    <value-attributes>
+      <read-only>true</read-only>
+      <overridable>false</overridable>
+      <visible>false</visible>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
   <!-- Define stack_tools property in the base stack. DO NOT override this property for each stack version -->
   <property>
     <name>stack_tools</name>
@@ -252,8 +264,8 @@ gpgcheck=0</value>
   </property>
   <property>
     <name>stack_root</name>
-    <value>/usr/hdp</value>
-    <description>Stack root folder</description>
+    <value>{"HDP":"/usr/hdp"}</value>
+    <description>JSON which defines the stack root by stack name</description>
     <value-attributes>
       <read-only>true</read-only>
       <overridable>false</overridable>

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
index 878645b..31cf0c8 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
@@ -1,427 +1,429 @@
 {
-  "stack_features": [
-    {
-      "name": "snappy",
-      "description": "Snappy compressor/decompressor support",
-      "min_version": "2.0.0.0",
-      "max_version": "2.2.0.0"
-    },
-    {
-      "name": "lzo",
-      "description": "LZO libraries support",
-      "min_version": "2.2.1.0"
-    },
-    {
-      "name": "express_upgrade",
-      "description": "Express upgrade support",
-      "min_version": "2.1.0.0"
-    },
-    {
-      "name": "rolling_upgrade",
-      "description": "Rolling upgrade support",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "kafka_acl_migration_support",
-      "description": "ACL migration support",
-      "min_version": "2.3.4.0"
-    },
-    {
-      "name": "secure_zookeeper",
-      "description": "Protect ZNodes with SASL acl in secure clusters",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "config_versioning",
-      "description": "Configurable versions support",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "datanode_non_root",
-      "description": "DataNode running as non-root support (AMBARI-7615)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "remove_ranger_hdfs_plugin_env",
-      "description": "HDFS removes Ranger env files (AMBARI-14299)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "ranger",
-      "description": "Ranger Service support",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "ranger_tagsync_component",
-      "description": "Ranger Tagsync component support (AMBARI-14383)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "phoenix",
-      "description": "Phoenix Service support",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "nfs",
-      "description": "NFS support",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "tez_for_spark",
-      "description": "Tez dependency for Spark",
-      "min_version": "2.2.0.0",
-      "max_version": "2.3.0.0"
-    },
-    {
-      "name": "timeline_state_store",
-      "description": "Yarn application timeline-service supports state store property (AMBARI-11442)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "copy_tarball_to_hdfs",
-      "description": "Copy tarball to HDFS support (AMBARI-12113)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "spark_16plus",
-      "description": "Spark 1.6+",
-      "min_version": "2.4.0.0"
-    },
-    {
-      "name": "spark_thriftserver",
-      "description": "Spark Thrift Server",
-      "min_version": "2.3.2.0"
-    },
-    {
-      "name": "storm_kerberos",
-      "description": "Storm Kerberos support (AMBARI-7570)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "storm_ams",
-      "description": "Storm AMS integration (AMBARI-10710)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "create_kafka_broker_id",
-      "description": "Ambari should create Kafka Broker Id (AMBARI-12678)",
-      "min_version": "2.2.0.0",
-      "max_version": "2.3.0.0"
-    },
-    {
-      "name": "kafka_listeners",
-      "description": "Kafka listeners (AMBARI-10984)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "kafka_kerberos",
-      "description": "Kafka Kerberos support (AMBARI-10984)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "pig_on_tez",
-      "description": "Pig on Tez support (AMBARI-7863)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "ranger_usersync_non_root",
-      "description": "Ranger Usersync as non-root user (AMBARI-10416)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "ranger_audit_db_support",
-      "description": "Ranger Audit to DB support",
-      "min_version": "2.2.0.0",
-      "max_version": "2.4.99.99"
-    },
-    {
-      "name": "accumulo_kerberos_user_auth",
-      "description": "Accumulo Kerberos User Auth (AMBARI-10163)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "knox_versioned_data_dir",
-      "description": "Use versioned data dir for Knox (AMBARI-13164)",
-      "min_version": "2.3.2.0"
-    },
-    {
-      "name": "knox_sso_topology",
-      "description": "Knox SSO Topology support (AMBARI-13975)",
-      "min_version": "2.3.8.0"
-    },
-    {
-      "name": "atlas_rolling_upgrade",
-      "description": "Rolling upgrade support for Atlas",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "oozie_admin_user",
-      "description": "Oozie install user as an Oozie admin user (AMBARI-7976)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "oozie_create_hive_tez_configs",
-      "description": "Oozie create configs for Ambari Hive and Tez deployments (AMBARI-8074)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "oozie_setup_shared_lib",
-      "description": "Oozie setup tools used to shared Oozie lib to HDFS (AMBARI-7240)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "oozie_host_kerberos",
-      "description": "Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)",
-      "min_version": "2.0.0.0"
-    },
-    {
-      "name": "falcon_extensions",
-      "description": "Falcon Extension",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "hive_metastore_upgrade_schema",
-      "description": "Hive metastore upgrade schema support (AMBARI-11176)",
-      "min_version": "2.3.0.0"
-     },
-    {
-      "name": "hive_server_interactive",
-      "description": "Hive server interactive support (AMBARI-15573)",
-      "min_version": "2.5.0.0"
-     },
-    {
-      "name": "hive_webhcat_specific_configs",
-      "description": "Hive webhcat specific configurations support (AMBARI-12364)",
-      "min_version": "2.3.0.0"
-     },
-    {
-      "name": "hive_purge_table",
-      "description": "Hive purge table support (AMBARI-12260)",
-      "min_version": "2.3.0.0"
-     },
-    {
-      "name": "hive_server2_kerberized_env",
-      "description": "Hive server2 working on kerberized environment (AMBARI-13749)",
-      "min_version": "2.2.3.0",
-      "max_version": "2.2.5.0"
-     },
-    {
-      "name": "hive_env_heapsize",
-      "description": "Hive heapsize property defined in hive-env (AMBARI-12801)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "ranger_kms_hsm_support",
-      "description": "Ranger KMS HSM support (AMBARI-15752)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_log4j_support",
-      "description": "Ranger supporting log-4j properties (AMBARI-15681)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_kerberos_support",
-      "description": "Ranger Kerberos support",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "hive_metastore_site_support",
-      "description": "Hive Metastore site support",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_usersync_password_jceks",
-      "description": "Saving Ranger Usersync credentials in jceks",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_install_infra_client",
-      "description": "Ambari Infra Service support",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "falcon_atlas_support_2_3",
-      "description": "Falcon Atlas integration support for 2.3 stack",
-      "min_version": "2.3.99.0",
-      "max_version": "2.4.0.0"
-    },
-    {
-      "name": "falcon_atlas_support",
-      "description": "Falcon Atlas integration",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "hbase_home_directory",
-      "description": "Hbase home directory in HDFS needed for HBASE backup",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "spark_livy",
-      "description": "Livy as slave component of spark",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "spark_livy2",
-      "description": "Livy as slave component of spark",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "atlas_ranger_plugin_support",
-      "description": "Atlas Ranger plugin support",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "atlas_conf_dir_in_path",
-      "description": "Prepend the Atlas conf dir (/etc/atlas/conf) to the classpath of Storm and Falcon",
-      "min_version": "2.3.0.0",
-      "max_version": "2.4.99.99"
-    },
-    {
-      "name": "atlas_upgrade_support",
-      "description": "Atlas supports express and rolling upgrades",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "atlas_hook_support",
-      "description": "Atlas support for hooks in Hive, Storm, Falcon, and Sqoop",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_pid_support",
-      "description": "Ranger Service support pid generation AMBARI-16756",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_kms_pid_support",
-      "description": "Ranger KMS Service support pid generation",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_admin_password_change",
-      "description": "Allow ranger admin credentials to be specified during cluster creation (AMBARI-17000)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_setup_db_on_start",
-      "description": "Allows setup of ranger db and java patches to be called multiple times on each START",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "storm_metrics_apache_classes",
-      "description": "Metrics sink for Storm that uses Apache class names",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "spark_java_opts_support",
-      "description": "Allow Spark to generate java-opts file",
-      "min_version": "2.2.0.0",
-      "max_version": "2.4.0.0"
-    },
-    {
-      "name": "atlas_hbase_setup",
-      "description": "Use script to create Atlas tables in Hbase and set permissions for Atlas user.",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_hive_plugin_jdbc_url",
-      "description": "Handle Ranger hive repo config jdbc url change for stack 2.5 (AMBARI-18386)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "zkfc_version_advertised",
-      "description": "ZKFC advertise version",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "phoenix_core_hdfs_site_required",
-      "description": "HDFS and CORE site required for Phoenix",
-      "max_version": "2.5.9.9"
-    },
-    {
-      "name": "ranger_tagsync_ssl_xml_support",
-      "description": "Ranger Tagsync ssl xml support.",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "ranger_xml_configuration",
-      "description": "Ranger code base support xml configurations",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "kafka_ranger_plugin_support",
-      "description": "Ambari stack changes for Ranger Kafka Plugin (AMBARI-11299)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "yarn_ranger_plugin_support",
-      "description": "Implement Stack changes for Ranger Yarn Plugin integration (AMBARI-10866)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "ranger_solr_config_support",
-      "description": "Showing Ranger solrconfig.xml on UI",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "hive_interactive_atlas_hook_required",
-      "description": "Registering Atlas Hook for Hive Interactive.",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "core_site_for_ranger_plugins",
-      "description": "Adding core-site.xml in when Ranger plugin is enabled for Storm, Kafka, and Knox.",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "atlas_install_hook_package_support",
-      "description": "Stop installing packages from 2.6",
-      "max_version": "2.5.9.9"
-    },
-    {
-      "name": "atlas_hdfs_site_on_namenode_ha",
-      "description": "Need to create hdfs-site under atlas-conf dir when Namenode-HA is enabled.",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "hive_interactive_ga",
-      "description": "Hive Interactive GA support",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "secure_ranger_ssl_password",
-      "description": "Securing Ranger Admin and Usersync SSL and Trustore related passwords in jceks",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "ranger_kms_ssl",
-      "description": "Ranger KMS SSL properties in ambari stack",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "nifi_encrypt_config",
-      "description": "Encrypt sensitive properties written to nifi property file",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "toolkit_config_update",
-      "description": "Support separate input and output for toolkit configuration",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "admin_toolkit_support",
-      "description": "Supports the nifi admin toolkit",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "tls_toolkit_san",
-      "description": "Support subject alternative name flag",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "nifi_jaas_conf_create",
-      "description": "Create NIFI jaas configuration when kerberos is enabled",
-      "min_version": "2.6.0.0"
-    }
-  ]
+  "HDP": {
+    "stack_features": [
+      {
+        "name": "snappy",
+        "description": "Snappy compressor/decompressor support",
+        "min_version": "2.0.0.0",
+        "max_version": "2.2.0.0"
+      },
+      {
+        "name": "lzo",
+        "description": "LZO libraries support",
+        "min_version": "2.2.1.0"
+      },
+      {
+        "name": "express_upgrade",
+        "description": "Express upgrade support",
+        "min_version": "2.1.0.0"
+      },
+      {
+        "name": "rolling_upgrade",
+        "description": "Rolling upgrade support",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "kafka_acl_migration_support",
+        "description": "ACL migration support",
+        "min_version": "2.3.4.0"
+      },
+      {
+        "name": "secure_zookeeper",
+        "description": "Protect ZNodes with SASL acl in secure clusters",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "config_versioning",
+        "description": "Configurable versions support",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "datanode_non_root",
+        "description": "DataNode running as non-root support (AMBARI-7615)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "remove_ranger_hdfs_plugin_env",
+        "description": "HDFS removes Ranger env files (AMBARI-14299)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "ranger",
+        "description": "Ranger Service support",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "ranger_tagsync_component",
+        "description": "Ranger Tagsync component support (AMBARI-14383)",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "phoenix",
+        "description": "Phoenix Service support",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "nfs",
+        "description": "NFS support",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "tez_for_spark",
+        "description": "Tez dependency for Spark",
+        "min_version": "2.2.0.0",
+        "max_version": "2.3.0.0"
+      },
+      {
+        "name": "timeline_state_store",
+        "description": "Yarn application timeline-service supports state store property (AMBARI-11442)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "copy_tarball_to_hdfs",
+        "description": "Copy tarball to HDFS support (AMBARI-12113)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "spark_16plus",
+        "description": "Spark 1.6+",
+        "min_version": "2.4.0.0"
+      },
+      {
+        "name": "spark_thriftserver",
+        "description": "Spark Thrift Server",
+        "min_version": "2.3.2.0"
+      },
+      {
+        "name": "storm_kerberos",
+        "description": "Storm Kerberos support (AMBARI-7570)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "storm_ams",
+        "description": "Storm AMS integration (AMBARI-10710)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "create_kafka_broker_id",
+        "description": "Ambari should create Kafka Broker Id (AMBARI-12678)",
+        "min_version": "2.2.0.0",
+        "max_version": "2.3.0.0"
+      },
+      {
+        "name": "kafka_listeners",
+        "description": "Kafka listeners (AMBARI-10984)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "kafka_kerberos",
+        "description": "Kafka Kerberos support (AMBARI-10984)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "pig_on_tez",
+        "description": "Pig on Tez support (AMBARI-7863)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "ranger_usersync_non_root",
+        "description": "Ranger Usersync as non-root user (AMBARI-10416)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "ranger_audit_db_support",
+        "description": "Ranger Audit to DB support",
+        "min_version": "2.2.0.0",
+        "max_version": "2.4.99.99"
+      },
+      {
+        "name": "accumulo_kerberos_user_auth",
+        "description": "Accumulo Kerberos User Auth (AMBARI-10163)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "knox_versioned_data_dir",
+        "description": "Use versioned data dir for Knox (AMBARI-13164)",
+        "min_version": "2.3.2.0"
+      },
+      {
+        "name": "knox_sso_topology",
+        "description": "Knox SSO Topology support (AMBARI-13975)",
+        "min_version": "2.3.8.0"
+      },
+      {
+        "name": "atlas_rolling_upgrade",
+        "description": "Rolling upgrade support for Atlas",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "oozie_admin_user",
+        "description": "Oozie install user as an Oozie admin user (AMBARI-7976)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "oozie_create_hive_tez_configs",
+        "description": "Oozie create configs for Ambari Hive and Tez deployments (AMBARI-8074)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "oozie_setup_shared_lib",
+        "description": "Oozie setup tools used to shared Oozie lib to HDFS (AMBARI-7240)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "oozie_host_kerberos",
+        "description": "Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)",
+        "min_version": "2.0.0.0"
+      },
+      {
+        "name": "falcon_extensions",
+        "description": "Falcon Extension",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "hive_metastore_upgrade_schema",
+        "description": "Hive metastore upgrade schema support (AMBARI-11176)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "hive_server_interactive",
+        "description": "Hive server interactive support (AMBARI-15573)",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "hive_webhcat_specific_configs",
+        "description": "Hive webhcat specific configurations support (AMBARI-12364)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "hive_purge_table",
+        "description": "Hive purge table support (AMBARI-12260)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "hive_server2_kerberized_env",
+        "description": "Hive server2 working on kerberized environment (AMBARI-13749)",
+        "min_version": "2.2.3.0",
+        "max_version": "2.2.5.0"
+      },
+      {
+        "name": "hive_env_heapsize",
+        "description": "Hive heapsize property defined in hive-env (AMBARI-12801)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "ranger_kms_hsm_support",
+        "description": "Ranger KMS HSM support (AMBARI-15752)",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_log4j_support",
+        "description": "Ranger supporting log-4j properties (AMBARI-15681)",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_kerberos_support",
+        "description": "Ranger Kerberos support",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "hive_metastore_site_support",
+        "description": "Hive Metastore site support",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_usersync_password_jceks",
+        "description": "Saving Ranger Usersync credentials in jceks",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_install_infra_client",
+        "description": "Ambari Infra Service support",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "falcon_atlas_support_2_3",
+        "description": "Falcon Atlas integration support for 2.3 stack",
+        "min_version": "2.3.99.0",
+        "max_version": "2.4.0.0"
+      },
+      {
+        "name": "falcon_atlas_support",
+        "description": "Falcon Atlas integration",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "hbase_home_directory",
+        "description": "Hbase home directory in HDFS needed for HBASE backup",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "spark_livy",
+        "description": "Livy as slave component of spark",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "spark_livy2",
+        "description": "Livy as slave component of spark",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "atlas_ranger_plugin_support",
+        "description": "Atlas Ranger plugin support",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "atlas_conf_dir_in_path",
+        "description": "Prepend the Atlas conf dir (/etc/atlas/conf) to the classpath of Storm and Falcon",
+        "min_version": "2.3.0.0",
+        "max_version": "2.4.99.99"
+      },
+      {
+        "name": "atlas_upgrade_support",
+        "description": "Atlas supports express and rolling upgrades",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "atlas_hook_support",
+        "description": "Atlas support for hooks in Hive, Storm, Falcon, and Sqoop",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_pid_support",
+        "description": "Ranger Service support pid generation AMBARI-16756",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_kms_pid_support",
+        "description": "Ranger KMS Service support pid generation",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_admin_password_change",
+        "description": "Allow ranger admin credentials to be specified during cluster creation (AMBARI-17000)",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_setup_db_on_start",
+        "description": "Allows setup of ranger db and java patches to be called multiple times on each START",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "storm_metrics_apache_classes",
+        "description": "Metrics sink for Storm that uses Apache class names",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "spark_java_opts_support",
+        "description": "Allow Spark to generate java-opts file",
+        "min_version": "2.2.0.0",
+        "max_version": "2.4.0.0"
+      },
+      {
+        "name": "atlas_hbase_setup",
+        "description": "Use script to create Atlas tables in Hbase and set permissions for Atlas user.",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_hive_plugin_jdbc_url",
+        "description": "Handle Ranger hive repo config jdbc url change for stack 2.5 (AMBARI-18386)",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "zkfc_version_advertised",
+        "description": "ZKFC advertise version",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "phoenix_core_hdfs_site_required",
+        "description": "HDFS and CORE site required for Phoenix",
+        "max_version": "2.5.9.9"
+      },
+      {
+        "name": "ranger_tagsync_ssl_xml_support",
+        "description": "Ranger Tagsync ssl xml support.",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "ranger_xml_configuration",
+        "description": "Ranger code base support xml configurations",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "kafka_ranger_plugin_support",
+        "description": "Ambari stack changes for Ranger Kafka Plugin (AMBARI-11299)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "yarn_ranger_plugin_support",
+        "description": "Implement Stack changes for Ranger Yarn Plugin integration (AMBARI-10866)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "ranger_solr_config_support",
+        "description": "Showing Ranger solrconfig.xml on UI",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "hive_interactive_atlas_hook_required",
+        "description": "Registering Atlas Hook for Hive Interactive.",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "core_site_for_ranger_plugins",
+        "description": "Adding core-site.xml in when Ranger plugin is enabled for Storm, Kafka, and Knox.",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "atlas_install_hook_package_support",
+        "description": "Stop installing packages from 2.6",
+        "max_version": "2.5.9.9"
+      },
+      {
+        "name": "atlas_hdfs_site_on_namenode_ha",
+        "description": "Need to create hdfs-site under atlas-conf dir when Namenode-HA is enabled.",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "hive_interactive_ga",
+        "description": "Hive Interactive GA support",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "secure_ranger_ssl_password",
+        "description": "Securing Ranger Admin and Usersync SSL and Trustore related passwords in jceks",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "ranger_kms_ssl",
+        "description": "Ranger KMS SSL properties in ambari stack",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "nifi_encrypt_config",
+        "description": "Encrypt sensitive properties written to nifi property file",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "toolkit_config_update",
+        "description": "Support separate input and output for toolkit configuration",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "admin_toolkit_support",
+        "description": "Supports the nifi admin toolkit",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "tls_toolkit_san",
+        "description": "Support subject alternative name flag",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "nifi_jaas_conf_create",
+        "description": "Create NIFI jaas configuration when kerberos is enabled",
+        "min_version": "2.6.0.0"
+      }
+    ]
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_tools.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_tools.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_tools.json
index d1aab4b..c515d57 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_tools.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_tools.json
@@ -1,4 +1,14 @@
 {
-  "stack_selector": ["hdp-select", "/usr/bin/hdp-select", "hdp-select"],
-  "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"]
-}
\ No newline at end of file
+  "HDP": {
+    "stack_selector": [
+      "hdp-select",
+      "/usr/bin/hdp-select",
+      "hdp-select"
+    ],
+    "conf_selector": [
+      "conf-select",
+      "/usr/bin/conf-select",
+      "conf-select"
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml
index 7df00ee..f19ac52 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml
@@ -20,6 +20,18 @@
  */
 -->
 <configuration>
+  <!-- Define stack_name property in the base stack. DO NOT override this property for each stack version -->
+  <property>
+    <name>stack_name</name>
+    <value>PERF</value>
+    <description>The name of the stack.</description>
+    <value-attributes>
+      <read-only>true</read-only>
+      <overridable>false</overridable>
+      <visible>false</visible>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
 
   <!-- Define stack_tools property in the base stack. DO NOT override this property for each stack version -->
   <property>
@@ -55,8 +67,8 @@
 
   <property>
     <name>stack_root</name>
-    <value>/usr/perf</value>
-    <description>Stack root folder</description>
+    <value>{"PERF":"/usr/perf"}</value>
+    <description>JSON which defines the stack root by stack name</description>  
     <value-attributes>
       <read-only>true</read-only>
       <overridable>false</overridable>

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_features.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_features.json b/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_features.json
index e9e0ed2..839e8e6 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_features.json
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_features.json
@@ -1,19 +1,21 @@
 {
-  "stack_features": [
-    {
-      "name": "rolling_upgrade",
-      "description": "Rolling upgrade support",
-      "min_version": "1.0.0.0"
-    },
-    {
-      "name": "secure_zookeeper",
-      "description": "Protect ZNodes with SASL acl in secure clusters",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "config_versioning",
-      "description": "Configurable versions support",
-      "min_version": "1.0.0.0"
-    }
-  ]
-}
+  "PERF": {
+    "stack_features": [
+      {
+        "name": "rolling_upgrade",
+        "description": "Rolling upgrade support",
+        "min_version": "1.0.0.0"
+      },
+      {
+        "name": "secure_zookeeper",
+        "description": "Protect ZNodes with SASL acl in secure clusters",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "config_versioning",
+        "description": "Configurable versions support",
+        "min_version": "1.0.0.0"
+      }
+    ]
+  }
+}
\ No newline at end of file


[23/36] ambari git commit: Revert "AMBARI-21427. Assigning hosts concurrently to same config group may fail with "org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException: Config group already exist". (stoader)"

Posted by lp...@apache.org.
Revert "AMBARI-21427. Assigning hosts concurrently to same config group may fail with "org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException: Config group already exist". (stoader)"


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/70cf77e4
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/70cf77e4
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/70cf77e4

Branch: refs/heads/feature-branch-AMBARI-21307
Commit: 70cf77e4087840e89fab50a741d36bf8747ba416
Parents: 15dd999
Author: Vitaly Brodetskyi <vb...@hortonworks.com>
Authored: Mon Jul 10 23:11:38 2017 +0300
Committer: Vitaly Brodetskyi <vb...@hortonworks.com>
Committed: Mon Jul 10 23:19:34 2017 +0300

----------------------------------------------------------------------
 .../ambari/server/topology/AmbariContext.java   | 81 +++++---------------
 1 file changed, 19 insertions(+), 62 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/70cf77e4/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
index dee0e6c..106d7c8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
@@ -1,4 +1,4 @@
-/*
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -30,7 +30,6 @@ import java.util.SortedSet;
 import java.util.TreeSet;
 import java.util.concurrent.Callable;
 import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.locks.Lock;
 
 import javax.annotation.Nullable;
 import javax.inject.Inject;
@@ -70,11 +69,9 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.ConfigFactory;
-import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.SecurityType;
-import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.configgroup.ConfigGroup;
 import org.apache.ambari.server.utils.RetryHelper;
 import org.slf4j.Logger;
@@ -82,8 +79,6 @@ import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Sets;
-import com.google.common.util.concurrent.Striped;
-import com.google.inject.Provider;
 
 
 /**
@@ -104,12 +99,6 @@ public class AmbariContext {
   @Inject
   ConfigFactory configFactory;
 
-  /**
-   * Used for getting configuration property values from stack and services.
-   */
-  @Inject
-  private Provider<ConfigHelper> configHelper;
-
   private static AmbariManagementController controller;
   private static ClusterController clusterController;
   //todo: task id's.  Use existing mechanism for getting next task id sequence
@@ -123,16 +112,6 @@ public class AmbariContext {
 
   private final static Logger LOG = LoggerFactory.getLogger(AmbariContext.class);
 
-
-  /**
-   * When config groups are created using Blueprints these are created when
-   * hosts join a hostgroup and are added to the corresponding config group.
-   * Since hosts join in parallel there might be a race condition in creating
-   * the config group a host is to be added to. Thus we need to synchronize
-   * the creation of config groups with the same name.
-   */
-  private Striped<Lock> configGroupCreateLock = Striped.lazyWeakLock(1);
-
   public boolean isClusterKerberosEnabled(long clusterId) {
     Cluster cluster;
     try {
@@ -188,10 +167,9 @@ public class AmbariContext {
 
   public void createAmbariResources(ClusterTopology topology, String clusterName, SecurityType securityType, String repoVersion) {
     Stack stack = topology.getBlueprint().getStack();
-    StackId stackId = new StackId(stack.getName(), stack.getVersion());
 
     createAmbariClusterResource(clusterName, stack.getName(), stack.getVersion(), securityType, repoVersion);
-    createAmbariServiceAndComponentResources(topology, clusterName, stackId, repoVersion);
+    createAmbariServiceAndComponentResources(topology, clusterName);
   }
 
   public void createAmbariClusterResource(String clusterName, String stackName, String stackVersion, SecurityType securityType, String repoVersion) {
@@ -218,8 +196,7 @@ public class AmbariContext {
     }
   }
 
-  public void createAmbariServiceAndComponentResources(ClusterTopology topology, String clusterName,
-      StackId stackId, String repositoryVersion) {
+  public void createAmbariServiceAndComponentResources(ClusterTopology topology, String clusterName) {
     Collection<String> services = topology.getBlueprint().getServices();
 
     try {
@@ -228,13 +205,11 @@ public class AmbariContext {
     } catch (AmbariException e) {
       throw new RuntimeException("Failed to persist service and component resources: " + e, e);
     }
-    Set<ServiceRequest> serviceRequests = new HashSet<>();
-    Set<ServiceComponentRequest> componentRequests = new HashSet<>();
+    Set<ServiceRequest> serviceRequests = new HashSet<ServiceRequest>();
+    Set<ServiceComponentRequest> componentRequests = new HashSet<ServiceComponentRequest>();
     for (String service : services) {
       String credentialStoreEnabled = topology.getBlueprint().getCredentialStoreEnabled(service);
-      serviceRequests.add(new ServiceRequest(clusterName, service, stackId.getStackId(),
-          repositoryVersion, null, credentialStoreEnabled));
-
+      serviceRequests.add(new ServiceRequest(clusterName, service, null, credentialStoreEnabled));
       for (String component : topology.getBlueprint().getComponents(service)) {
         String recoveryEnabled = topology.getBlueprint().getRecoveryEnabled(service, component);
         componentRequests.add(new ServiceComponentRequest(clusterName, service, component, null, recoveryEnabled));
@@ -248,14 +223,14 @@ public class AmbariContext {
     }
     // set all services state to INSTALLED->STARTED
     // this is required so the user can start failed services at the service level
-    Map<String, Object> installProps = new HashMap<>();
+    Map<String, Object> installProps = new HashMap<String, Object>();
     installProps.put(ServiceResourceProvider.SERVICE_SERVICE_STATE_PROPERTY_ID, "INSTALLED");
     installProps.put(ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
-    Map<String, Object> startProps = new HashMap<>();
+    Map<String, Object> startProps = new HashMap<String, Object>();
     startProps.put(ServiceResourceProvider.SERVICE_SERVICE_STATE_PROPERTY_ID, "STARTED");
     startProps.put(ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
-    Predicate predicate = new EqualsPredicate<>(
-      ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
+    Predicate predicate = new EqualsPredicate<String>(
+        ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
     try {
       getServiceResourceProvider().updateResources(
           new RequestImpl(null, Collections.singleton(installProps), null, null), predicate);
@@ -287,9 +262,9 @@ public class AmbariContext {
     }
     String clusterName = cluster.getClusterName();
 
-    Map<String, Object> properties = new HashMap<>();
+    Map<String, Object> properties = new HashMap<String, Object>();
     properties.put(HostResourceProvider.HOST_CLUSTER_NAME_PROPERTY_ID, clusterName);
-    properties.put(HostResourceProvider.HOST_HOST_NAME_PROPERTY_ID, hostName);
+    properties.put(HostResourceProvider.HOST_NAME_PROPERTY_ID, hostName);
     properties.put(HostResourceProvider.HOST_RACK_INFO_PROPERTY_ID, host.getRackInfo());
 
     try {
@@ -300,7 +275,7 @@ public class AmbariContext {
           hostName, e.toString()), e);
     }
 
-    final Set<ServiceComponentHostRequest> requests = new HashSet<>();
+    final Set<ServiceComponentHostRequest> requests = new HashSet<ServiceComponentHostRequest>();
 
     for (Map.Entry<String, Collection<String>> entry : components.entrySet()) {
       String service = entry.getKey();
@@ -353,17 +328,11 @@ public class AmbariContext {
   }
 
   public void registerHostWithConfigGroup(final String hostName, final ClusterTopology topology, final String groupName) {
-    String qualifiedGroupName = getConfigurationGroupName(topology.getBlueprint().getName(), groupName);
-
-    Lock configGroupLock = configGroupCreateLock.get(qualifiedGroupName);
-
     try {
-      configGroupLock.lock();
-
       boolean hostAdded = RetryHelper.executeWithRetry(new Callable<Boolean>() {
         @Override
         public Boolean call() throws Exception {
-          return addHostToExistingConfigGroups(hostName, topology, qualifiedGroupName);
+          return addHostToExistingConfigGroups(hostName, topology, groupName);
         }
       });
       if (!hostAdded) {
@@ -373,9 +342,6 @@ public class AmbariContext {
       LOG.error("Unable to register config group for host: ", e);
       throw new RuntimeException("Unable to register config group for host: " + hostName);
     }
-    finally {
-      configGroupLock.unlock();
-    }
   }
 
   public RequestStatusResponse installHost(String hostName, String clusterName, Collection<String> skipInstallForComponents, Collection<String> dontSkipInstallForComponents, boolean skipFailure) {
@@ -583,7 +549,7 @@ public class AmbariContext {
   /**
    * Add the new host to an existing config group.
    */
-  private boolean addHostToExistingConfigGroups(String hostName, ClusterTopology topology, String configGroupName) {
+  private boolean addHostToExistingConfigGroups(String hostName, ClusterTopology topology, String groupName) {
     boolean addedHost = false;
     Clusters clusters;
     Cluster cluster;
@@ -597,8 +563,9 @@ public class AmbariContext {
     // I don't know of a method to get config group by name
     //todo: add a method to get config group by name
     Map<Long, ConfigGroup> configGroups = cluster.getConfigGroups();
+    String qualifiedGroupName = getConfigurationGroupName(topology.getBlueprint().getName(), groupName);
     for (ConfigGroup group : configGroups.values()) {
-      if (group.getName().equals(configGroupName)) {
+      if (group.getName().equals(qualifiedGroupName)) {
         try {
           Host host = clusters.getHost(hostName);
           addedHost = true;
@@ -622,7 +589,7 @@ public class AmbariContext {
    * and the hosts associated with the host group are assigned to the config group.
    */
   private void createConfigGroupsAndRegisterHost(ClusterTopology topology, String groupName) throws AmbariException {
-    Map<String, Map<String, Config>> groupConfigs = new HashMap<>();
+    Map<String, Map<String, Config>> groupConfigs = new HashMap<String, Map<String, Config>>();
     Stack stack = topology.getBlueprint().getStack();
 
     // get the host-group config with cluster creation template overrides
@@ -641,7 +608,7 @@ public class AmbariContext {
       //todo: attributes
       Map<String, Config> serviceConfigs = groupConfigs.get(service);
       if (serviceConfigs == null) {
-        serviceConfigs = new HashMap<>();
+        serviceConfigs = new HashMap<String, Config>();
         groupConfigs.put(service, serviceConfigs);
       }
       serviceConfigs.put(type, config);
@@ -702,16 +669,6 @@ public class AmbariContext {
     return String.format("%s:%s", bpName, hostGroupName);
   }
 
-  /**
-   * Gets an instance of {@link ConfigHelper} for classes which are not
-   * dependency injected.
-   *
-   * @return a {@link ConfigHelper} instance.
-   */
-  public ConfigHelper getConfigHelper() {
-    return configHelper.get();
-  }
-
   private synchronized HostResourceProvider getHostResourceProvider() {
     if (hostResourceProvider == null) {
       hostResourceProvider = (HostResourceProvider)


[13/36] ambari git commit: AMBARI-21430 - Allow Multiple Versions of Stack Tools to Co-Exist (jonathanhurley)

Posted by lp...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-default.json b/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-default.json
index 05cb78a..cafbede 100644
--- a/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-default.json
+++ b/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-default.json
@@ -1,55 +1,55 @@
 {
     "localComponents": [
-        "SECONDARY_NAMENODE", 
-        "HDFS_CLIENT", 
-        "DATANODE", 
-        "NAMENODE", 
-        "ZOOKEEPER_SERVER", 
-        "ZOOKEEPER_CLIENT", 
-        "RANGER_USERSYNC", 
-        "RANGER_ADMIN", 
+        "SECONDARY_NAMENODE",
+        "HDFS_CLIENT",
+        "DATANODE",
+        "NAMENODE",
+        "ZOOKEEPER_SERVER",
+        "ZOOKEEPER_CLIENT",
+        "RANGER_USERSYNC",
+        "RANGER_ADMIN",
         "RANGER_TAGSYNC",
         "RANGER_KMS_SERVER"
-    ], 
+    ],
     "configuration_attributes": {
-        "ranger-hdfs-audit": {}, 
-        "ssl-client": {}, 
-        "ranger-admin-site": {}, 
-        "ranger-hdfs-policymgr-ssl": {}, 
-        "tagsync-application-properties": {}, 
-        "ranger-env": {}, 
-        "usersync-log4j": {}, 
-        "admin-properties": {}, 
-        "ranger-ugsync-site": {}, 
+        "ranger-hdfs-audit": {},
+        "ssl-client": {},
+        "ranger-admin-site": {},
+        "ranger-hdfs-policymgr-ssl": {},
+        "tagsync-application-properties": {},
+        "ranger-env": {},
+        "usersync-log4j": {},
+        "admin-properties": {},
+        "ranger-ugsync-site": {},
         "hdfs-site": {
             "final": {
-                "dfs.datanode.data.dir": "true", 
-                "dfs.namenode.http-address": "true", 
-                "dfs.datanode.failed.volumes.tolerated": "true", 
-                "dfs.support.append": "true", 
-                "dfs.namenode.name.dir": "true", 
+                "dfs.datanode.data.dir": "true",
+                "dfs.namenode.http-address": "true",
+                "dfs.datanode.failed.volumes.tolerated": "true",
+                "dfs.support.append": "true",
+                "dfs.namenode.name.dir": "true",
                 "dfs.webhdfs.enabled": "true"
             }
-        }, 
-        "ranger-tagsync-site": {}, 
-        "zoo.cfg": {}, 
-        "hadoop-policy": {}, 
-        "hdfs-log4j": {}, 
-        "ranger-hdfs-plugin-properties": {}, 
+        },
+        "ranger-tagsync-site": {},
+        "zoo.cfg": {},
+        "hadoop-policy": {},
+        "hdfs-log4j": {},
+        "ranger-hdfs-plugin-properties": {},
         "core-site": {
             "final": {
                 "fs.defaultFS": "true"
             }
-        }, 
-        "hadoop-env": {}, 
-        "zookeeper-log4j": {}, 
-        "ssl-server": {}, 
-        "ranger-site": {}, 
-        "admin-log4j": {}, 
-        "tagsync-log4j": {}, 
-        "ranger-hdfs-security": {}, 
-        "usersync-properties": {}, 
-        "zookeeper-env": {}, 
+        },
+        "hadoop-env": {},
+        "zookeeper-log4j": {},
+        "ssl-server": {},
+        "ranger-site": {},
+        "admin-log4j": {},
+        "tagsync-log4j": {},
+        "ranger-hdfs-security": {},
+        "usersync-properties": {},
+        "zookeeper-env": {},
         "cluster-env": {},
         "dbks-site": {},
         "kms-env": {},
@@ -60,744 +60,744 @@
         "ranger-kms-site": {},
         "ranger-kms-policymgr-ssl": {},
         "ranger-kms-audit": {}
-    }, 
-    "public_hostname": "c6401.ambari.apache.org", 
-    "commandId": "9-1", 
-    "hostname": "c6401.ambari.apache.org", 
-    "kerberosCommandParams": [], 
-    "serviceName": "RANGER_KMS", 
-    "role": "RANGER_KMS_SERVER", 
-    "forceRefreshConfigTagsBeforeExecution": [], 
-    "requestId": 9, 
+    },
+    "public_hostname": "c6401.ambari.apache.org",
+    "commandId": "9-1",
+    "hostname": "c6401.ambari.apache.org",
+    "kerberosCommandParams": [],
+    "serviceName": "RANGER_KMS",
+    "role": "RANGER_KMS_SERVER",
+    "forceRefreshConfigTagsBeforeExecution": [],
+    "requestId": 9,
     "agentConfigParams": {
         "agent": {
             "parallel_execution": 0
         }
-    }, 
-    "clusterName": "c1", 
-    "commandType": "EXECUTION_COMMAND", 
-    "taskId": 64, 
-    "roleParams": {}, 
+    },
+    "clusterName": "c1",
+    "commandType": "EXECUTION_COMMAND",
+    "taskId": 64,
+    "roleParams": {},
     "configurationTags": {
         "ranger-hdfs-audit": {
             "tag": "version1466427664617"
-        }, 
+        },
         "ssl-client": {
             "tag": "version1"
-        }, 
+        },
         "ranger-admin-site": {
             "tag": "version1466427664621"
-        }, 
+        },
         "ranger-hdfs-policymgr-ssl": {
             "tag": "version1466427664617"
-        }, 
+        },
         "tagsync-application-properties": {
             "tag": "version1466427664621"
-        }, 
+        },
         "ranger-env": {
             "tag": "version1466427664621"
-        }, 
+        },
         "usersync-log4j": {
             "tag": "version1466427664621"
-        }, 
+        },
         "admin-properties": {
             "tag": "version1466427664621"
-        }, 
+        },
         "ranger-ugsync-site": {
             "tag": "version1466427664621"
-        }, 
+        },
         "hdfs-site": {
             "tag": "version1"
-        }, 
+        },
         "ranger-tagsync-site": {
             "tag": "version1466427664621"
-        }, 
+        },
         "zoo.cfg": {
             "tag": "version1"
-        }, 
+        },
         "hadoop-policy": {
             "tag": "version1"
-        }, 
+        },
         "hdfs-log4j": {
             "tag": "version1"
-        }, 
+        },
         "ranger-hdfs-plugin-properties": {
             "tag": "version1466427664617"
-        }, 
+        },
         "core-site": {
             "tag": "version1"
-        }, 
+        },
         "hadoop-env": {
             "tag": "version1"
-        }, 
+        },
         "zookeeper-log4j": {
             "tag": "version1"
-        }, 
+        },
         "ssl-server": {
             "tag": "version1"
-        }, 
+        },
         "ranger-site": {
             "tag": "version1466427664621"
-        }, 
+        },
         "admin-log4j": {
             "tag": "version1466427664621"
-        }, 
+        },
         "tagsync-log4j": {
             "tag": "version1466427664621"
-        }, 
+        },
         "ranger-hdfs-security": {
             "tag": "version1466427664617"
-        }, 
+        },
         "usersync-properties": {
             "tag": "version1466427664621"
-        }, 
+        },
         "zookeeper-env": {
             "tag": "version1"
-        }, 
+        },
         "cluster-env": {
             "tag": "version1"
         },
         "dbks-site": {
-            "tag": "version1"            
+            "tag": "version1"
             },
         "kms-env": {
-            "tag": "version1"            
+            "tag": "version1"
             },
         "kms-log4j": {
-            "tag": "version1"            
+            "tag": "version1"
             },
         "kms-properties": {
-            "tag": "version1"            
+            "tag": "version1"
             },
         "kms-site": {
-            "tag": "version1"            
+            "tag": "version1"
             },
         "ranger-kms-security": {
-            "tag": "version1"            
+            "tag": "version1"
             },
         "ranger-kms-site": {
-            "tag": "version1"            
+            "tag": "version1"
             },
         "ranger-kms-policymgr-ssl": {
-            "tag": "version1"            
+            "tag": "version1"
             },
         "ranger-kms-audit": {
-            "tag": "version1"            
+            "tag": "version1"
         }
-    }, 
-    "roleCommand": "START", 
+    },
+    "roleCommand": "START",
     "hostLevelParams": {
-        "agent_stack_retry_on_unavailability": "false", 
-        "stack_name": "HDP", 
+        "agent_stack_retry_on_unavailability": "false",
+        "stack_name": "HDP",
         "custom_mysql_jdbc_name": "mysql-connector-java.jar",
         "previous_custom_mysql_jdbc_name": "mysql-connector-java-old.jar",
-        "host_sys_prepped": "false", 
-        "ambari_db_rca_username": "mapred", 
-        "current_version": "2.5.0.0-777", 
-        "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar", 
-        "agent_stack_retry_count": "5", 
-        "stack_version": "2.5", 
-        "jdk_name": "jdk-8u60-linux-x64.tar.gz", 
-        "ambari_db_rca_driver": "org.postgresql.Driver", 
+        "host_sys_prepped": "false",
+        "ambari_db_rca_username": "mapred",
+        "current_version": "2.5.0.0-777",
+        "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar",
+        "agent_stack_retry_count": "5",
+        "stack_version": "2.5",
+        "jdk_name": "jdk-8u60-linux-x64.tar.gz",
+        "ambari_db_rca_driver": "org.postgresql.Driver",
         "java_home": "/usr/jdk64/jdk1.7.0_45",
-        "repository_version_id": "1", 
-        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
-        "not_managed_hdfs_path_list": "[\"/tmp\"]", 
-        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
-        "java_version": "8", 
-        "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-777\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.5\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-776\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]", 
-        "package_list": "[{\"name\":\"ranger_${stack_version}-admin\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-usersync\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-tagsync\",\"condition\":\"should_install_ranger_tagsync\",\"skipUpgrade\":false},{\"name\":\"ambari-logsearch-solr-client\",\"condition\":\"should_install_logsearch_solr_client\",\"skipUpgrade\":false}]", 
-        "db_name": "ambari", 
-        "group_list": "[\"ranger\",\"hadoop\",\"users\"]", 
-        "agentCacheDir": "/var/lib/ambari-agent/cache", 
-        "ambari_db_rca_password": "mapred", 
+        "repository_version_id": "1",
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
+        "not_managed_hdfs_path_list": "[\"/tmp\"]",
+        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
+        "java_version": "8",
+        "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-777\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.5\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-776\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]",
+        "package_list": "[{\"name\":\"ranger_${stack_version}-admin\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-usersync\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-tagsync\",\"condition\":\"should_install_ranger_tagsync\",\"skipUpgrade\":false},{\"name\":\"ambari-logsearch-solr-client\",\"condition\":\"should_install_logsearch_solr_client\",\"skipUpgrade\":false}]",
+        "db_name": "ambari",
+        "group_list": "[\"ranger\",\"hadoop\",\"users\"]",
+        "agentCacheDir": "/var/lib/ambari-agent/cache",
+        "ambari_db_rca_password": "mapred",
         "jce_name": "UnlimitedJCEPolicyJDK7.zip",
-        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar", 
-        "db_driver_filename": "mysql-connector-java.jar", 
-        "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]", 
+        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
+        "db_driver_filename": "mysql-connector-java.jar",
+        "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]",
         "clientsToUpdateConfigs": "[\"*\"]"
-    }, 
+    },
     "commandParams": {
-        "service_package_folder": "common-services/RANGER/0.4.0/package", 
-        "script": "scripts/ranger_usersync.py", 
-        "hooks_folder": "HDP/2.0.6/hooks", 
-        "version": "2.5.0.0-777", 
-        "max_duration_for_retries": "0", 
-        "command_retry_enabled": "false", 
-        "command_timeout": "600", 
+        "service_package_folder": "common-services/RANGER/0.4.0/package",
+        "script": "scripts/ranger_usersync.py",
+        "hooks_folder": "HDP/2.0.6/hooks",
+        "version": "2.5.0.0-777",
+        "max_duration_for_retries": "0",
+        "command_retry_enabled": "false",
+        "command_timeout": "600",
         "script_type": "PYTHON"
-    }, 
-    "forceRefreshConfigTags": [], 
-    "stageId": 1, 
+    },
+    "forceRefreshConfigTags": [],
+    "stageId": 1,
     "clusterHostInfo": {
         "snamenode_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ambari_server_use_ssl": [
             "false"
-        ], 
+        ],
         "all_ping_ports": [
             "8670"
-        ], 
+        ],
         "ranger_tagsync_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ranger_usersync_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "all_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "slave_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "namenode_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ambari_server_port": [
             "8080"
-        ], 
+        ],
         "ranger_admin_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "all_racks": [
             "/default-rack"
-        ], 
+        ],
         "all_ipv4_ips": [
             "172.22.125.4"
-        ], 
+        ],
         "ambari_server_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "zookeeper_hosts": [
             "c6401.ambari.apache.org"
         ],
         "ranger_kms_server_hosts": [
             "c6401.ambari.apache.org"
         ]
-    }, 
+    },
     "configurations": {
         "ranger-hdfs-audit": {
             "xasecure.audit.destination.solr.zookeepers": "c6401.ambari.apache.org:2181/ranger_audits",
-            "xasecure.audit.destination.solr.urls": "", 
-            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool", 
-            "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool", 
+            "xasecure.audit.destination.solr.urls": "",
+            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool",
+            "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool",
             "xasecure.audit.destination.hdfs": "true",
-            "xasecure.audit.destination.solr": "true", 
-            "xasecure.audit.provider.summary.enabled": "false", 
-            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit", 
+            "xasecure.audit.destination.solr": "true",
+            "xasecure.audit.provider.summary.enabled": "false",
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
             "xasecure.audit.is.enabled": "true"
-        }, 
+        },
         "ssl-client": {
-            "ssl.client.truststore.reload.interval": "10000", 
-            "ssl.client.keystore.password": "bigdata", 
-            "ssl.client.truststore.type": "jks", 
-            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks", 
-            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks", 
-            "ssl.client.truststore.password": "bigdata", 
+            "ssl.client.truststore.reload.interval": "10000",
+            "ssl.client.keystore.password": "bigdata",
+            "ssl.client.truststore.type": "jks",
+            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks",
+            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks",
+            "ssl.client.truststore.password": "bigdata",
             "ssl.client.keystore.type": "jks"
-        }, 
+        },
         "ranger-admin-site": {
-            "ranger.admin.kerberos.cookie.domain": "", 
-            "ranger.kms.service.user.hdfs": "hdfs", 
-            "ranger.spnego.kerberos.principal": "", 
-            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}", 
-            "ranger.plugins.hive.serviceuser": "hive", 
-            "ranger.lookup.kerberos.keytab": "", 
-            "ranger.plugins.kms.serviceuser": "kms", 
-            "ranger.service.https.attrib.ssl.enabled": "false", 
-            "ranger.sso.browser.useragent": "Mozilla,chrome", 
-            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01", 
-            "ranger.plugins.hbase.serviceuser": "hbase", 
-            "ranger.plugins.hdfs.serviceuser": "hdfs", 
-            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}", 
-            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net", 
-            "ranger.plugins.knox.serviceuser": "knox", 
-            "ranger.ldap.base.dn": "dc=example,dc=com", 
-            "ranger.sso.publicKey": "", 
-            "ranger.admin.kerberos.cookie.path": "/", 
-            "ranger.service.https.attrib.clientAuth": "want", 
-            "ranger.jpa.jdbc.user": "{{ranger_db_user}}", 
-            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})", 
-            "ranger.ldap.group.roleattribute": "cn", 
-            "ranger.plugins.kafka.serviceuser": "kafka", 
-            "ranger.admin.kerberos.principal": "", 
-            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
+            "ranger.admin.kerberos.cookie.domain": "",
+            "ranger.kms.service.user.hdfs": "hdfs",
+            "ranger.spnego.kerberos.principal": "",
+            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}",
+            "ranger.plugins.hive.serviceuser": "hive",
+            "ranger.lookup.kerberos.keytab": "",
+            "ranger.plugins.kms.serviceuser": "kms",
+            "ranger.service.https.attrib.ssl.enabled": "false",
+            "ranger.sso.browser.useragent": "Mozilla,chrome",
+            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01",
+            "ranger.plugins.hbase.serviceuser": "hbase",
+            "ranger.plugins.hdfs.serviceuser": "hdfs",
+            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}",
+            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net",
+            "ranger.plugins.knox.serviceuser": "knox",
+            "ranger.ldap.base.dn": "dc=example,dc=com",
+            "ranger.sso.publicKey": "",
+            "ranger.admin.kerberos.cookie.path": "/",
+            "ranger.service.https.attrib.clientAuth": "want",
+            "ranger.jpa.jdbc.user": "{{ranger_db_user}}",
+            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})",
+            "ranger.ldap.group.roleattribute": "cn",
+            "ranger.plugins.kafka.serviceuser": "kafka",
+            "ranger.admin.kerberos.principal": "",
+            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
             "ranger.credential.provider.path": "/etc/ranger/admin/rangeradmin.jceks",
-            "ranger.ldap.referral": "ignore", 
-            "ranger.service.http.port": "6080", 
-            "ranger.ldap.user.searchfilter": "(uid={0})", 
-            "ranger.plugins.atlas.serviceuser": "atlas", 
-            "ranger.truststore.password": "changeit", 
-            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
-            "ranger.audit.solr.password": "NONE", 
-            "ranger.audit.solr.zookeepers": "c6401.ambari.apache.org:2181/ranger_audits", 
-            "ranger.lookup.kerberos.principal": "", 
-            "ranger.service.https.port": "6182", 
-            "ranger.plugins.storm.serviceuser": "storm", 
-            "ranger.externalurl": "{{ranger_external_url}}", 
-            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
-            "ranger.kms.service.user.hive": "", 
-            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
-            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}", 
-            "ranger.service.host": "{{ranger_host}}", 
-            "ranger.service.https.attrib.keystore.keyalias": "rangeradmin", 
-            "ranger.service.https.attrib.keystore.pass": "xasecure", 
-            "ranger.unixauth.remote.login.enabled": "true", 
-            "ranger.jpa.jdbc.credential.alias": "rangeradmin", 
-            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
-            "ranger.audit.solr.username": "ranger_solr", 
-            "ranger.sso.enabled": "false", 
-            "ranger.audit.solr.urls": "", 
-            "ranger.ldap.ad.domain": "", 
-            "ranger.plugins.yarn.serviceuser": "yarn", 
-            "ranger.audit.source.type": "solr", 
-            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
-            "ranger.ldap.url": "{{ranger_ug_ldap_url}}", 
-            "ranger.authentication.method": "UNIX", 
-            "ranger.service.http.enabled": "true", 
-            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}", 
-            "ranger.ldap.ad.referral": "ignore", 
-            "ranger.ldap.ad.base.dn": "dc=example,dc=com", 
-            "ranger.jpa.jdbc.password": "_", 
-            "ranger.spnego.kerberos.keytab": "", 
-            "ranger.sso.providerurl": "", 
-            "ranger.unixauth.service.hostname": "{{ugsync_host}}", 
-            "ranger.admin.kerberos.keytab": "", 
-            "ranger.admin.kerberos.token.valid.seconds": "30", 
-            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver", 
+            "ranger.ldap.referral": "ignore",
+            "ranger.service.http.port": "6080",
+            "ranger.ldap.user.searchfilter": "(uid={0})",
+            "ranger.plugins.atlas.serviceuser": "atlas",
+            "ranger.truststore.password": "changeit",
+            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+            "ranger.audit.solr.password": "NONE",
+            "ranger.audit.solr.zookeepers": "c6401.ambari.apache.org:2181/ranger_audits",
+            "ranger.lookup.kerberos.principal": "",
+            "ranger.service.https.port": "6182",
+            "ranger.plugins.storm.serviceuser": "storm",
+            "ranger.externalurl": "{{ranger_external_url}}",
+            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+            "ranger.kms.service.user.hive": "",
+            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}",
+            "ranger.service.host": "{{ranger_host}}",
+            "ranger.service.https.attrib.keystore.keyalias": "rangeradmin",
+            "ranger.service.https.attrib.keystore.pass": "xasecure",
+            "ranger.unixauth.remote.login.enabled": "true",
+            "ranger.jpa.jdbc.credential.alias": "rangeradmin",
+            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+            "ranger.audit.solr.username": "ranger_solr",
+            "ranger.sso.enabled": "false",
+            "ranger.audit.solr.urls": "",
+            "ranger.ldap.ad.domain": "",
+            "ranger.plugins.yarn.serviceuser": "yarn",
+            "ranger.audit.source.type": "solr",
+            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
+            "ranger.ldap.url": "{{ranger_ug_ldap_url}}",
+            "ranger.authentication.method": "UNIX",
+            "ranger.service.http.enabled": "true",
+            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}",
+            "ranger.ldap.ad.referral": "ignore",
+            "ranger.ldap.ad.base.dn": "dc=example,dc=com",
+            "ranger.jpa.jdbc.password": "_",
+            "ranger.spnego.kerberos.keytab": "",
+            "ranger.sso.providerurl": "",
+            "ranger.unixauth.service.hostname": "{{ugsync_host}}",
+            "ranger.admin.kerberos.keytab": "",
+            "ranger.admin.kerberos.token.valid.seconds": "30",
+            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver",
             "ranger.unixauth.service.port": "5151"
-        }, 
+        },
         "ranger-hdfs-policymgr-ssl": {
-            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks", 
-            "xasecure.policymgr.clientssl.truststore.password": "changeit", 
-            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}", 
-            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks", 
-            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}", 
+            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks",
+            "xasecure.policymgr.clientssl.truststore.password": "changeit",
+            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}",
+            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks",
+            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}",
             "xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
-        }, 
+        },
         "tagsync-application-properties": {
-            "atlas.kafka.entities.group.id": "ranger_entities_consumer", 
-            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181", 
+            "atlas.kafka.entities.group.id": "ranger_entities_consumer",
+            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181",
             "atlas.kafka.bootstrap.servers": "localhost:6667"
-        }, 
+        },
         "ranger-env": {
-            "ranger_solr_shards": "1", 
-            "ranger_solr_config_set": "ranger_audits", 
-            "ranger_user": "ranger", 
-            "xml_configurations_supported": "true", 
-            "ranger-atlas-plugin-enabled": "No", 
-            "ranger-hbase-plugin-enabled": "No", 
-            "ranger-yarn-plugin-enabled": "No", 
-            "bind_anonymous": "false", 
-            "ranger_admin_username": "amb_ranger_admin", 
-            "admin_password": "admin", 
-            "is_solrCloud_enabled": "true", 
-            "ranger-storm-plugin-enabled": "No", 
-            "ranger-hdfs-plugin-enabled": "No", 
-            "ranger_group": "ranger", 
-            "ranger-knox-plugin-enabled": "No", 
-            "ranger_admin_log_dir": "/var/log/ranger/admin", 
-            "ranger-kafka-plugin-enabled": "No", 
-            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306", 
-            "ranger-hive-plugin-enabled": "No", 
-            "xasecure.audit.destination.solr": "true", 
-            "ranger_pid_dir": "/var/run/ranger", 
-            "xasecure.audit.destination.hdfs": "true", 
-            "admin_username": "admin", 
-            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit", 
-            "create_db_dbuser": "true", 
-            "ranger_solr_collection_name": "ranger_audits", 
-            "ranger_admin_password": "P1!qLEQwP24KVlWY", 
+            "ranger_solr_shards": "1",
+            "ranger_solr_config_set": "ranger_audits",
+            "ranger_user": "ranger",
+            "xml_configurations_supported": "true",
+            "ranger-atlas-plugin-enabled": "No",
+            "ranger-hbase-plugin-enabled": "No",
+            "ranger-yarn-plugin-enabled": "No",
+            "bind_anonymous": "false",
+            "ranger_admin_username": "amb_ranger_admin",
+            "admin_password": "admin",
+            "is_solrCloud_enabled": "true",
+            "ranger-storm-plugin-enabled": "No",
+            "ranger-hdfs-plugin-enabled": "No",
+            "ranger_group": "ranger",
+            "ranger-knox-plugin-enabled": "No",
+            "ranger_admin_log_dir": "/var/log/ranger/admin",
+            "ranger-kafka-plugin-enabled": "No",
+            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306",
+            "ranger-hive-plugin-enabled": "No",
+            "xasecure.audit.destination.solr": "true",
+            "ranger_pid_dir": "/var/run/ranger",
+            "xasecure.audit.destination.hdfs": "true",
+            "admin_username": "admin",
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
+            "create_db_dbuser": "true",
+            "ranger_solr_collection_name": "ranger_audits",
+            "ranger_admin_password": "P1!qLEQwP24KVlWY",
             "ranger_usersync_log_dir": "/var/log/ranger/usersync"
-        }, 
+        },
         "usersync-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/usersync.
 log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n"
-        }, 
+        },
         "admin-properties": {
-            "db_user": "rangeradmin01", 
-            "DB_FLAVOR": "MYSQL", 
-            "db_password": "rangeradmin01", 
-            "db_root_user": "root", 
-            "policymgr_external_url": "http://c6401.ambari.apache.org:6080", 
-            "db_name": "ranger01", 
-            "db_host": "c6401.ambari.apache.org", 
-            "db_root_password": "vagrant", 
+            "db_user": "rangeradmin01",
+            "DB_FLAVOR": "MYSQL",
+            "db_password": "rangeradmin01",
+            "db_root_user": "root",
+            "policymgr_external_url": "http://c6401.ambari.apache.org:6080",
+            "db_name": "ranger01",
+            "db_host": "c6401.ambari.apache.org",
+            "db_root_password": "vagrant",
             "SQL_CONNECTOR_JAR": "{{driver_curl_target}}"
-        }, 
+        },
         "ranger-ugsync-site": {
-            "ranger.usersync.ldap.binddn": "", 
-            "ranger.usersync.policymgr.username": "rangerusersync", 
-            "ranger.usersync.policymanager.mockrun": "false", 
-            "ranger.usersync.group.searchbase": "", 
-            "ranger.usersync.ldap.bindalias": "testldapalias", 
-            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks", 
-            "ranger.usersync.port": "5151", 
-            "ranger.usersync.pagedresultssize": "500", 
-            "ranger.usersync.group.memberattributename": "", 
-            "ranger.usersync.kerberos.principal": "", 
-            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder", 
-            "ranger.usersync.ldap.referral": "ignore", 
-            "ranger.usersync.group.searchfilter": "", 
-            "ranger.usersync.ldap.user.objectclass": "person", 
-            "ranger.usersync.logdir": "{{usersync_log_dir}}", 
-            "ranger.usersync.ldap.user.searchfilter": "", 
-            "ranger.usersync.ldap.groupname.caseconversion": "none", 
-            "ranger.usersync.ldap.ldapbindpassword": "", 
-            "ranger.usersync.unix.minUserId": "500", 
-            "ranger.usersync.policymanager.maxrecordsperapicall": "1000", 
-            "ranger.usersync.group.nameattribute": "", 
-            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password", 
-            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks", 
-            "ranger.usersync.user.searchenabled": "false", 
-            "ranger.usersync.group.usermapsyncenabled": "true", 
-            "ranger.usersync.ldap.bindkeystore": "", 
-            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof", 
-            "ranger.usersync.kerberos.keytab": "", 
-            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe", 
-            "ranger.usersync.group.objectclass": "", 
-            "ranger.usersync.ldap.user.searchscope": "sub", 
-            "ranger.usersync.unix.password.file": "/etc/passwd", 
-            "ranger.usersync.ldap.user.nameattribute": "", 
-            "ranger.usersync.pagedresultsenabled": "true", 
-            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}", 
-            "ranger.usersync.group.search.first.enabled": "false", 
-            "ranger.usersync.group.searchenabled": "false", 
-            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder", 
-            "ranger.usersync.ssl": "true", 
-            "ranger.usersync.ldap.url": "", 
-            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org", 
-            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
-            "ranger.usersync.ldap.user.searchbase": "", 
-            "ranger.usersync.ldap.username.caseconversion": "none", 
-            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
-            "ranger.usersync.keystore.password": "UnIx529p", 
-            "ranger.usersync.unix.group.file": "/etc/group", 
-            "ranger.usersync.filesource.file": "/tmp/usergroup.txt", 
-            "ranger.usersync.group.searchscope": "", 
-            "ranger.usersync.truststore.password": "changeit", 
-            "ranger.usersync.enabled": "true", 
-            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000", 
+            "ranger.usersync.ldap.binddn": "",
+            "ranger.usersync.policymgr.username": "rangerusersync",
+            "ranger.usersync.policymanager.mockrun": "false",
+            "ranger.usersync.group.searchbase": "",
+            "ranger.usersync.ldap.bindalias": "testldapalias",
+            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks",
+            "ranger.usersync.port": "5151",
+            "ranger.usersync.pagedresultssize": "500",
+            "ranger.usersync.group.memberattributename": "",
+            "ranger.usersync.kerberos.principal": "",
+            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder",
+            "ranger.usersync.ldap.referral": "ignore",
+            "ranger.usersync.group.searchfilter": "",
+            "ranger.usersync.ldap.user.objectclass": "person",
+            "ranger.usersync.logdir": "{{usersync_log_dir}}",
+            "ranger.usersync.ldap.user.searchfilter": "",
+            "ranger.usersync.ldap.groupname.caseconversion": "none",
+            "ranger.usersync.ldap.ldapbindpassword": "",
+            "ranger.usersync.unix.minUserId": "500",
+            "ranger.usersync.policymanager.maxrecordsperapicall": "1000",
+            "ranger.usersync.group.nameattribute": "",
+            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password",
+            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks",
+            "ranger.usersync.user.searchenabled": "false",
+            "ranger.usersync.group.usermapsyncenabled": "true",
+            "ranger.usersync.ldap.bindkeystore": "",
+            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof",
+            "ranger.usersync.kerberos.keytab": "",
+            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe",
+            "ranger.usersync.group.objectclass": "",
+            "ranger.usersync.ldap.user.searchscope": "sub",
+            "ranger.usersync.unix.password.file": "/etc/passwd",
+            "ranger.usersync.ldap.user.nameattribute": "",
+            "ranger.usersync.pagedresultsenabled": "true",
+            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}",
+            "ranger.usersync.group.search.first.enabled": "false",
+            "ranger.usersync.group.searchenabled": "false",
+            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder",
+            "ranger.usersync.ssl": "true",
+            "ranger.usersync.ldap.url": "",
+            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org",
+            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+            "ranger.usersync.ldap.user.searchbase": "",
+            "ranger.usersync.ldap.username.caseconversion": "none",
+            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+            "ranger.usersync.keystore.password": "UnIx529p",
+            "ranger.usersync.unix.group.file": "/etc/group",
+            "ranger.usersync.filesource.file": "/tmp/usergroup.txt",
+            "ranger.usersync.group.searchscope": "",
+            "ranger.usersync.truststore.password": "changeit",
+            "ranger.usersync.enabled": "true",
+            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000",
             "ranger.usersync.filesource.text.delimiter": ","
-        }, 
+        },
         "hdfs-site": {
-            "dfs.namenode.checkpoint.period": "21600", 
-            "dfs.namenode.avoid.write.stale.datanode": "true", 
-            "dfs.namenode.startup.delay.block.deletion.sec": "3600", 
-            "dfs.namenode.checkpoint.txns": "1000000", 
-            "dfs.content-summary.limit": "5000", 
-            "dfs.support.append": "true", 
-            "dfs.datanode.address": "0.0.0.0:50010", 
-            "dfs.cluster.administrators": " hdfs", 
-            "dfs.namenode.audit.log.async": "true", 
-            "dfs.datanode.balance.bandwidthPerSec": "6250000", 
-            "dfs.namenode.safemode.threshold-pct": "1", 
-            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", 
-            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020", 
-            "dfs.permissions.enabled": "true", 
-            "dfs.client.read.shortcircuit": "true", 
-            "dfs.https.port": "50470", 
-            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470", 
-            "nfs.file.dump.dir": "/tmp/.hdfs-nfs", 
-            "dfs.blocksize": "134217728", 
-            "dfs.blockreport.initialDelay": "120", 
-            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode", 
-            "dfs.namenode.fslock.fair": "false", 
-            "dfs.datanode.max.transfer.threads": "4096", 
-            "dfs.heartbeat.interval": "3", 
-            "dfs.replication": "3", 
-            "dfs.namenode.handler.count": "50", 
-            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary", 
-            "fs.permissions.umask-mode": "022", 
-            "dfs.namenode.stale.datanode.interval": "30000", 
-            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
-            "dfs.datanode.failed.volumes.tolerated": "0", 
-            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data", 
-            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070", 
-            "dfs.webhdfs.enabled": "true", 
-            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding", 
-            "dfs.namenode.accesstime.precision": "0", 
-            "dfs.datanode.https.address": "0.0.0.0:50475", 
-            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
-            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090", 
-            "nfs.exports.allowed.hosts": "* rw", 
-            "dfs.datanode.http.address": "0.0.0.0:50075", 
-            "dfs.datanode.du.reserved": "33011188224", 
-            "dfs.client.read.shortcircuit.streams.cache.size": "4096", 
-            "dfs.http.policy": "HTTP_ONLY", 
-            "dfs.block.access.token.enable": "true", 
-            "dfs.client.retry.policy.enabled": "false", 
-            "dfs.namenode.name.dir.restore": "true", 
-            "dfs.permissions.superusergroup": "hdfs", 
-            "dfs.journalnode.https-address": "0.0.0.0:8481", 
-            "dfs.journalnode.http-address": "0.0.0.0:8480", 
-            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
-            "dfs.namenode.avoid.read.stale.datanode": "true", 
-            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
-            "dfs.datanode.data.dir.perm": "750", 
-            "dfs.encryption.key.provider.uri": "", 
-            "dfs.replication.max": "50", 
+            "dfs.namenode.checkpoint.period": "21600",
+            "dfs.namenode.avoid.write.stale.datanode": "true",
+            "dfs.namenode.startup.delay.block.deletion.sec": "3600",
+            "dfs.namenode.checkpoint.txns": "1000000",
+            "dfs.content-summary.limit": "5000",
+            "dfs.support.append": "true",
+            "dfs.datanode.address": "0.0.0.0:50010",
+            "dfs.cluster.administrators": " hdfs",
+            "dfs.namenode.audit.log.async": "true",
+            "dfs.datanode.balance.bandwidthPerSec": "6250000",
+            "dfs.namenode.safemode.threshold-pct": "1",
+            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
+            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020",
+            "dfs.permissions.enabled": "true",
+            "dfs.client.read.shortcircuit": "true",
+            "dfs.https.port": "50470",
+            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470",
+            "nfs.file.dump.dir": "/tmp/.hdfs-nfs",
+            "dfs.blocksize": "134217728",
+            "dfs.blockreport.initialDelay": "120",
+            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode",
+            "dfs.namenode.fslock.fair": "false",
+            "dfs.datanode.max.transfer.threads": "4096",
+            "dfs.heartbeat.interval": "3",
+            "dfs.replication": "3",
+            "dfs.namenode.handler.count": "50",
+            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary",
+            "fs.permissions.umask-mode": "022",
+            "dfs.namenode.stale.datanode.interval": "30000",
+            "dfs.datanode.ipc.address": "0.0.0.0:8010",
+            "dfs.datanode.failed.volumes.tolerated": "0",
+            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data",
+            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070",
+            "dfs.webhdfs.enabled": "true",
+            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding",
+            "dfs.namenode.accesstime.precision": "0",
+            "dfs.datanode.https.address": "0.0.0.0:50475",
+            "dfs.namenode.write.stale.datanode.ratio": "1.0f",
+            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090",
+            "nfs.exports.allowed.hosts": "* rw",
+            "dfs.datanode.http.address": "0.0.0.0:50075",
+            "dfs.datanode.du.reserved": "33011188224",
+            "dfs.client.read.shortcircuit.streams.cache.size": "4096",
+            "dfs.http.policy": "HTTP_ONLY",
+            "dfs.block.access.token.enable": "true",
+            "dfs.client.retry.policy.enabled": "false",
+            "dfs.namenode.name.dir.restore": "true",
+            "dfs.permissions.superusergroup": "hdfs",
+            "dfs.journalnode.https-address": "0.0.0.0:8481",
+            "dfs.journalnode.http-address": "0.0.0.0:8480",
+            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
+            "dfs.namenode.avoid.read.stale.datanode": "true",
+            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
+            "dfs.datanode.data.dir.perm": "750",
+            "dfs.encryption.key.provider.uri": "",
+            "dfs.replication.max": "50",
             "dfs.namenode.name.dir": "/grid/0/hadoop/hdfs/namenode"
-        }, 
+        },
         "ranger-tagsync-site": {
-            "ranger.tagsync.atlas.to.ranger.service.mapping": "", 
-            "ranger.tagsync.dest.ranger.ssl.config.filename": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks", 
-            "ranger.tagsync.source.file.check.interval.millis": "", 
-            "ranger.tagsync.logdir": "/var/log/ranger/tagsync", 
-            "ranger.tagsync.source.atlasrest.download.interval.millis": "", 
-            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks", 
-            "ranger.tagsync.source.atlasrest.endpoint": "", 
-            "ranger.tagsync.dest.ranger.username": "rangertagsync", 
-            "ranger.tagsync.kerberos.principal": "", 
-            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}", 
-            "ranger.tagsync.atlas.custom.resource.mappers": "", 
-            "ranger.tagsync.kerberos.keytab": "", 
-            "ranger.tagsync.source.atlas": "false", 
-            "ranger.tagsync.source.atlasrest": "false", 
-            "ranger.tagsync.source.file": "false", 
+            "ranger.tagsync.atlas.to.ranger.service.mapping": "",
+            "ranger.tagsync.dest.ranger.ssl.config.filename": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks",
+            "ranger.tagsync.source.file.check.interval.millis": "",
+            "ranger.tagsync.logdir": "/var/log/ranger/tagsync",
+            "ranger.tagsync.source.atlasrest.download.interval.millis": "",
+            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks",
+            "ranger.tagsync.source.atlasrest.endpoint": "",
+            "ranger.tagsync.dest.ranger.username": "rangertagsync",
+            "ranger.tagsync.kerberos.principal": "",
+            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}",
+            "ranger.tagsync.atlas.custom.resource.mappers": "",
+            "ranger.tagsync.kerberos.keytab": "",
+            "ranger.tagsync.source.atlas": "false",
+            "ranger.tagsync.source.atlasrest": "false",
+            "ranger.tagsync.source.file": "false",
             "ranger.tagsync.source.file.filename": ""
-        }, 
+        },
         "zoo.cfg": {
-            "clientPort": "2181", 
-            "autopurge.purgeInterval": "24", 
-            "syncLimit": "5", 
-            "dataDir": "/grid/0/hadoop/zookeeper", 
-            "initLimit": "10", 
-            "tickTime": "2000", 
+            "clientPort": "2181",
+            "autopurge.purgeInterval": "24",
+            "syncLimit": "5",
+            "dataDir": "/grid/0/hadoop/zookeeper",
+            "initLimit": "10",
+            "tickTime": "2000",
             "autopurge.snapRetainCount": "30"
-        }, 
+        },
         "hadoop-policy": {
-            "security.job.client.protocol.acl": "*", 
-            "security.job.task.protocol.acl": "*", 
-            "security.datanode.protocol.acl": "*", 
-            "security.namenode.protocol.acl": "*", 
-            "security.client.datanode.protocol.acl": "*", 
-            "security.inter.tracker.protocol.acl": "*", 
-            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop", 
-            "security.client.protocol.acl": "*", 
-            "security.refresh.policy.protocol.acl": "hadoop", 
-            "security.admin.operations.protocol.acl": "hadoop", 
+            "security.job.client.protocol.acl": "*",
+            "security.job.task.protocol.acl": "*",
+            "security.datanode.protocol.acl": "*",
+            "security.namenode.protocol.acl": "*",
+            "security.client.datanode.protocol.acl": "*",
+            "security.inter.tracker.protocol.acl": "*",
+            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
+            "security.client.protocol.acl": "*",
+            "security.refresh.policy.protocol.acl": "hadoop",
+            "security.admin.operations.protocol.acl": "hadoop",
             "security.inter.datanode.protocol.acl": "*"
-        }, 
+        },
         "hdfs-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.lo
 gger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender
 .console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logg
 er}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=
 false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# NameNode metrics logging.\n# The default is to retain two namenode-metrics.log files up to 64MB each.\n#\nnamenode.metrics.logger=INFO,NullAppender\nlog4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}\nlog4j.additivity.NameNodeMetricsLog=false\nlog4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log\nlog4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n\nlog4j.appender.NNMETRICSRFA.MaxBackupIndex=1\nlog4j.appender.NNMETRICSRFA.MaxFileSize=64MB\n\n#\n# mapred audit logging\n#\nmapred
 .audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging leve
 ls\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateCh
 ange=WARN"
-        }, 
+        },
         "ranger-hdfs-plugin-properties": {
-            "hadoop.rpc.protection": "authentication", 
-            "ranger-hdfs-plugin-enabled": "No", 
-            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
-            "policy_user": "ambari-qa", 
-            "common.name.for.certificate": "", 
+            "hadoop.rpc.protection": "authentication",
+            "ranger-hdfs-plugin-enabled": "No",
+            "REPOSITORY_CONFIG_USERNAME": "hadoop",
+            "policy_user": "ambari-qa",
+            "common.name.for.certificate": "",
             "REPOSITORY_CONFIG_PASSWORD": "hadoop"
-        }, 
+        },
         "core-site": {
-            "hadoop.proxyuser.root.hosts": "*", 
-            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", 
-            "fs.trash.interval": "360", 
-            "ha.failover-controller.active-standby-elector.zk.op.retries": "120", 
-            "hadoop.http.authentication.simple.anonymous.allowed": "true", 
-            "hadoop.security.authentication": "simple", 
-            "hadoop.proxyuser.root.groups": "*", 
-            "ipc.client.connection.maxidletime": "30000", 
-            "hadoop.security.key.provider.path": "", 
-            "mapreduce.jobtracker.webinterface.trusted": "false", 
-            "hadoop.security.authorization": "false", 
-            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py", 
-            "ipc.server.tcpnodelay": "true", 
-            "ipc.client.connect.max.retries": "50", 
-            "hadoop.security.auth_to_local": "DEFAULT", 
-            "io.file.buffer.size": "131072", 
-            "hadoop.proxyuser.hdfs.hosts": "*", 
-            "hadoop.proxyuser.hdfs.groups": "*", 
-            "ipc.client.idlethreshold": "8000", 
-            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020", 
+            "hadoop.proxyuser.root.hosts": "*",
+            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
+            "fs.trash.interval": "360",
+            "ha.failover-controller.active-standby-elector.zk.op.retries": "120",
+            "hadoop.http.authentication.simple.anonymous.allowed": "true",
+            "hadoop.security.authentication": "simple",
+            "hadoop.proxyuser.root.groups": "*",
+            "ipc.client.connection.maxidletime": "30000",
+            "hadoop.security.key.provider.path": "",
+            "mapreduce.jobtracker.webinterface.trusted": "false",
+            "hadoop.security.authorization": "false",
+            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py",
+            "ipc.server.tcpnodelay": "true",
+            "ipc.client.connect.max.retries": "50",
+            "hadoop.security.auth_to_local": "DEFAULT",
+            "io.file.buffer.size": "131072",
+            "hadoop.proxyuser.hdfs.hosts": "*",
+            "hadoop.proxyuser.hdfs.groups": "*",
+            "ipc.client.idlethreshold": "8000",
+            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020",
             "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec"
-        }, 
+        },
         "hadoop-env": {
-            "keyserver_port": "", 
-            "proxyuser_group": "users", 
-            "hdfs_user_nproc_limit": "65536", 
-            "hdfs_log_dir_prefix": "/var/log/hadoop", 
-            "hdfs_user_nofile_limit": "128000", 
-            "hdfs_user": "hdfs", 
-            "keyserver_host": " ", 
-            "namenode_opt_maxnewsize": "128m", 
-            "namenode_opt_maxpermsize": "256m", 
-            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
 {{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
 node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
 NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
 N_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
 ing priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n  for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n  do\n    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n  done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
 ECURE_DN_USER\" ]; then\n  ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}", 
-            "namenode_heapsize": "1024m", 
-            "namenode_opt_newsize": "128m", 
-            "nfsgateway_heapsize": "1024", 
-            "dtnode_heapsize": "1024m", 
-            "hadoop_root_logger": "INFO,RFA", 
-            "hadoop_heapsize": "1024", 
-            "hadoop_pid_dir_prefix": "/var/run/hadoop", 
-            "namenode_opt_permsize": "128m", 
+            "keyserver_port": "",
+            "proxyuser_group": "users",
+            "hdfs_user_nproc_limit": "65536",
+            "hdfs_log_dir_prefix": "/var/log/hadoop",
+            "hdfs_user_nofile_limit": "128000",
+            "hdfs_user": "hdfs",
+            "keyserver_host": " ",
+            "namenode_opt_maxnewsize": "128m",
+            "namenode_opt_maxpermsize": "256m",
+            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
 {{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
 node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
 NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
 N_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
 ing priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n  for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n  do\n    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n  done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
 ECURE_DN_USER\" ]; then\n  ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}",
+            "namenode_heapsize": "1024m",
+            "namenode_opt_newsize": "128m",
+            "nfsgateway_heapsize": "1024",
+            "dtnode_heapsize": "1024m",
+            "hadoop_root_logger": "INFO,RFA",
+            "hadoop_heapsize": "1024",
+            "hadoop_pid_dir_prefix": "/var/run/hadoop",
+            "namenode_opt_permsize": "128m",
             "hdfs_tmp_dir": "/tmp"
-        }, 
+        },
         "zookeeper-log4j": {
             "content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: console appender only\nlog4j.rootLogger=INFO, CONSOLE\n\n# Example with rolling log file\n#log4
 j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE\n\n# Example with rolling log file and tracing\n#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE\n\n#\n# Log INFO level and above messages to the console\n#\nlog4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\nlog4j.appender.CONSOLE.Threshold=INFO\nlog4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n#\n# Add ROLLINGFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender\nlog4j.appender.ROLLINGFILE.Threshold=DEBUG\nlog4j.appender.ROLLINGFILE.File=zookeeper.log\n\n# Max log file size of 10MB\nlog4j.appender.ROLLINGFILE.MaxFileSize=10MB\n# uncomment the next line to limit number of backup files\n#log4j.appender.ROLLINGFILE.MaxBackupIndex=10\n\nlog4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.ROLLIN
 GFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n\n#\n# Add TRACEFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.TRACEFILE=org.apache.log4j.FileAppender\nlog4j.appender.TRACEFILE.Threshold=TRACE\nlog4j.appender.TRACEFILE.File=zookeeper_trace.log\n\nlog4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout\n### Notice we are including log4j's NDC here (%x)\nlog4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n"
-        }, 
+        },
         "ssl-server": {
-            "ssl.server.keystore.location": "/etc/security/serverKeys/keystore.jks", 
-            "ssl.server.keystore.keypassword": "bigdata", 
-            "ssl.server.truststore.location": "/etc/security/serverKeys/all.jks", 
-            "ssl.server.keystore.password": "bigdata", 
-            "ssl.server.truststore.password": "bigdata", 
-            "ssl.server.truststore.type": "jks", 
-            "ssl.server.keystore.type": "jks", 
+            "ssl.server.keystore.location": "/etc/security/serverKeys/keystore.jks",
+            "ssl.server.keystore.keypassword": "bigdata",
+            "ssl.server.truststore.location": "/etc/security/serverKeys/all.jks",
+            "ssl.server.keystore.password": "bigdata",
+            "ssl.server.truststore.password": "bigdata",
+            "ssl.server.truststore.type": "jks",
+            "ssl.server.keystore.type": "jks",
             "ssl.server.truststore.reload.interval": "10000"
-        }, 
-        "ranger-site": {}, 
+        },
+        "ranger-site": {},
         "admin-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nlog4j.rootLogger = warn,xa_log_appender\n\n\n# xa_logger\nlog4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.xa_log_app
 ender.file=${logdir}/xa_portal.log\nlog4j.appender.xa_log_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.xa_log_appender.append=true\nlog4j.appender.xa_log_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.xa_log_appender.layout.ConversionPattern=%d [%t] %-5p %C{6} (%F:%L) - %m%n\n# xa_log_appender : category and additivity\nlog4j.category.org.springframework=warn,xa_log_appender\nlog4j.additivity.org.springframework=false\n\nlog4j.category.org.apache.ranger=info,xa_log_appender\nlog4j.additivity.org.apache.ranger=false\n\nlog4j.category.xa=info,xa_log_appender\nlog4j.additivity.xa=false\n\n# perf_logger\nlog4j.appender.perf_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.perf_appender.file=${logdir}/ranger_admin_perf.log\nlog4j.appender.perf_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.perf_appender.append=true\nlog4j.appender.perf_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.perf_appender.layout.ConversionPattern=%d [%t] 
 %m%n\n\n\n# sql_appender\nlog4j.appender.sql_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.sql_appender.file=${logdir}/xa_portal_sql.log\nlog4j.appender.sql_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.sql_appender.append=true\nlog4j.appender.sql_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.sql_appender.layout.ConversionPattern=%d [%t] %-5p %C{6} (%F:%L) - %m%n\n\n# sql_appender : category and additivity\nlog4j.category.org.hibernate.SQL=warn,sql_appender\nlog4j.additivity.org.hibernate.SQL=false\n\nlog4j.category.jdbc.sqlonly=fatal,sql_appender\nlog4j.additivity.jdbc.sqlonly=false\n\nlog4j.category.jdbc.sqltiming=warn,sql_appender\nlog4j.additivity.jdbc.sqltiming=false\n\nlog4j.category.jdbc.audit=fatal,sql_appender\nlog4j.additivity.jdbc.audit=false\n\nlog4j.category.jdbc.resultset=fatal,sql_appender\nlog4j.additivity.jdbc.resultset=false\n\nlog4j.category.jdbc.connection=fatal,sql_appender\nlog4j.additivity.jdbc.connection=false"
-        }, 
+        },
         "tagsync-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/tagsync
 .log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n"
-        }, 
+        },
         "ranger-hdfs-security": {
-            "ranger.plugin.hdfs.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient", 
-            "ranger.plugin.hdfs.service.name": "{{repo_name}}", 
-            "ranger.plugin.hdfs.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache", 
-            "ranger.plugin.hdfs.policy.pollIntervalMs": "30000", 
-            "ranger.plugin.hdfs.policy.rest.url": "{{policymgr_mgr_url}}", 
-            "ranger.plugin.hdfs.policy.rest.ssl.config.file": "/etc/hadoop/conf/ranger-policymgr-ssl.xml", 
+            "ranger.plugin.hdfs.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient",
+            "ranger.plugin.hdfs.service.name": "{{repo_name}}",
+            "ranger.plugin.hdfs.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache",
+            "ranger.plugin.hdfs.policy.pollIntervalMs": "30000",
+            "ranger.plugin.hdfs.policy.rest.url": "{{policymgr_mgr_url}}",
+            "ranger.plugin.hdfs.policy.rest.ssl.config.file": "/etc/hadoop/conf/ranger-policymgr-ssl.xml",
             "xasecure.add-hadoop-authorization": "true"
-        }, 
-        "usersync-properties": {}, 
+        },
+        "usersync-properties": {},
         "zookeeper-env": {
-            "zk_log_dir": "/var/log/zookeeper", 
-            "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}", 
-            "zk_server_heapsize": "1024m", 
-            "zk_pid_dir": "/var/run/zookeeper", 
+            "zk_log_dir": "/var/log/zookeeper",
+            "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk

<TRUNCATED>

[34/36] ambari git commit: AMBARI-21454. hive20 and wfmanager views fails to build with issue missing module babel-plugin-transform-es2015-block-scoping (Vijay Kumar via via smohanty)

Posted by lp...@apache.org.
AMBARI-21454. hive20 and wfmanager views fails to build with issue missing module babel-plugin-transform-es2015-block-scoping (Vijay Kumar via via smohanty)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/eb1adcbf
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/eb1adcbf
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/eb1adcbf

Branch: refs/heads/feature-branch-AMBARI-21307
Commit: eb1adcbff32fb9440f288ccaddc997297eb8e4fb
Parents: f27f3af
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Wed Jul 12 16:30:49 2017 -0700
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Wed Jul 12 16:30:49 2017 -0700

----------------------------------------------------------------------
 contrib/views/hive20/src/main/resources/ui/package.json    | 1 +
 contrib/views/wfmanager/src/main/resources/ui/package.json | 1 +
 2 files changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/eb1adcbf/contrib/views/hive20/src/main/resources/ui/package.json
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/package.json b/contrib/views/hive20/src/main/resources/ui/package.json
index a409111..eea8cf9 100644
--- a/contrib/views/hive20/src/main/resources/ui/package.json
+++ b/contrib/views/hive20/src/main/resources/ui/package.json
@@ -24,6 +24,7 @@
     "bootstrap-daterangepicker": "2.1.24",
     "bower": "^1.7.9",
     "broccoli-asset-rev": "^2.4.2",
+    "babel-plugin-transform-es2015-block-scoping": "^6.24.1",
     "ember-ajax": "^2.0.1",
     "ember-cli": "2.7.0",
     "ember-cli-app-version": "^1.0.0",

http://git-wip-us.apache.org/repos/asf/ambari/blob/eb1adcbf/contrib/views/wfmanager/src/main/resources/ui/package.json
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/package.json b/contrib/views/wfmanager/src/main/resources/ui/package.json
index 25ed6c1..69f43c8 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/package.json
+++ b/contrib/views/wfmanager/src/main/resources/ui/package.json
@@ -21,6 +21,7 @@
   "devDependencies": {
     "bower": "^1.7.7",
     "broccoli-asset-rev": "^2.2.0",
+    "babel-plugin-transform-es2015-block-scoping": "^6.24.1",
     "ember-ajax": "0.7.1",
     "ember-cli": "2.3.0",
     "ember-cli-app-version": "^1.0.0",


[07/36] ambari git commit: AMBARI-21339 logviewer started along with nimbus if supervisor is not running on the same machine

Posted by lp...@apache.org.
AMBARI-21339 logviewer started along with nimbus if supervisor is not running on the same machine


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/1939dabc
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/1939dabc
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/1939dabc

Branch: refs/heads/feature-branch-AMBARI-21307
Commit: 1939dabcd7f6eeff3bb93e4d6f718b8a32351bd2
Parents: 6832ed9
Author: lpuskas <lp...@apache.org>
Authored: Thu Jun 29 17:11:59 2017 +0200
Committer: lpuskas <la...@sequenceiq.com>
Committed: Fri Jul 7 11:23:12 2017 +0200

----------------------------------------------------------------------
 .../STORM/0.9.1/package/scripts/nimbus.py       |  8 ++-
 .../stacks/2.1/STORM/test_storm_nimbus.py       | 60 +++++++++++++++++++-
 .../stacks/2.1/configs/default-storm-start.json | 14 +++++
 .../test/python/stacks/2.1/configs/default.json | 13 +++++
 .../stacks/2.1/configs/secured-storm-start.json | 13 +++++
 .../test/python/stacks/2.1/configs/secured.json | 15 ++++-
 6 files changed, 119 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/1939dabc/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/nimbus.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/nimbus.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/nimbus.py
index 360af5d..126ae78 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/nimbus.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/nimbus.py
@@ -70,12 +70,18 @@ class NimbusDefault(Nimbus):
     setup_ranger_storm(upgrade_type=upgrade_type)
     service("nimbus", action="start")
 
+    if "SUPERVISOR" not in params.config['localComponents']:
+      service("logviewer", action="start")
+
 
   def stop(self, env, upgrade_type=None):
     import params
     env.set_params(params)
     service("nimbus", action="stop")
 
+    if "SUPERVISOR" not in params.config['localComponents']:
+      service("logviewer", action="stop")
+
 
   def status(self, env):
     import status_params
@@ -85,7 +91,7 @@ class NimbusDefault(Nimbus):
   def get_log_folder(self):
     import params
     return params.log_dir
-  
+
   def get_user(self):
     import params
     return params.storm_user

http://git-wip-us.apache.org/repos/asf/ambari/blob/1939dabc/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py
index 35f057c..fd25126 100644
--- a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py
+++ b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py
@@ -58,6 +58,15 @@ class TestStormNimbus(TestStormBase):
         owner = 'storm',
         group = 'hadoop',
     )
+    self.assertResourceCalled('Execute', 'source /etc/storm/conf/storm-env.sh ; export PATH=$JAVA_HOME/bin:$PATH ; storm logviewer > /var/log/storm/logviewer.out 2>&1 &\n echo $! > /var/run/storm/logviewer.pid',
+        path = ['/usr/bin'],
+        user = 'storm',
+        not_if = "ambari-sudo.sh su storm -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1'",
+    )
+    self.assertResourceCalled('File', '/var/run/storm/logviewer.pid',
+        owner = 'storm',
+        group = 'hadoop',
+    )
     self.assertNoMoreResources()
 
   def test_start_with_metrics_collector(self):
@@ -99,6 +108,15 @@ class TestStormNimbus(TestStormBase):
         owner = 'storm',
         group = 'hadoop',
     )
+    self.assertResourceCalled('Execute', 'source /etc/storm/conf/storm-env.sh ; export PATH=$JAVA_HOME/bin:$PATH ; storm logviewer > /var/log/storm/logviewer.out 2>&1 &\n echo $! > /var/run/storm/logviewer.pid',
+      path = ['/usr/bin'],
+      user = 'storm',
+      not_if = "ambari-sudo.sh su storm -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1'",
+    )
+    self.assertResourceCalled('File', '/var/run/storm/logviewer.pid',
+      owner = 'storm',
+      group = 'hadoop',
+    )
     self.assertNoMoreResources()
 
   def test_start_with_metrics_collector_modern(self):
@@ -141,12 +159,21 @@ class TestStormNimbus(TestStormBase):
         owner = 'storm',
         group = 'hadoop',
     )
+    self.assertResourceCalled('Execute', 'source /etc/storm/conf/storm-env.sh ; export PATH=$JAVA_HOME/bin:$PATH ; storm logviewer > /var/log/storm/logviewer.out 2>&1 &\n echo $! > /var/run/storm/logviewer.pid',
+      path = ['/usr/bin'],
+      user = 'storm',
+      not_if = "ambari-sudo.sh su storm -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1'",
+    )
+    self.assertResourceCalled('File', '/var/run/storm/logviewer.pid',
+      owner = 'storm',
+      group = 'hadoop',
+    )
     self.assertNoMoreResources()
 
   @patch("os.path.exists")
   def test_stop_default(self, path_exists_mock):
     # Bool for the pid file
-    path_exists_mock.side_effect = [True]
+    path_exists_mock.side_effect = [True, True]
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nimbus.py",
                        classname = "Nimbus",
                        command = "stop",
@@ -164,6 +191,16 @@ class TestStormNimbus(TestStormBase):
     self.assertResourceCalled('File', '/var/run/storm/nimbus.pid',
         action = ['delete'],
     )
+    self.assertResourceCalled('Execute', "ambari-sudo.sh kill 123",
+      not_if = "! (ambari-sudo.sh su storm -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1')",
+    )
+    self.assertResourceCalled('Execute', "ambari-sudo.sh kill -9 123",
+      not_if = "sleep 2; ! (ambari-sudo.sh su storm -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1') || sleep 20; ! (ambari-sudo.sh su storm -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1')",
+      ignore_failures = True,
+    )
+    self.assertResourceCalled('File', '/var/run/storm/logviewer.pid',
+      action = ['delete'],
+    )
     self.assertNoMoreResources()
 
   def test_configure_secured(self):
@@ -196,12 +233,21 @@ class TestStormNimbus(TestStormBase):
         owner = 'storm',
         group = 'hadoop',
     )
+    self.assertResourceCalled('Execute', 'source /etc/storm/conf/storm-env.sh ; export PATH=$JAVA_HOME/bin:$PATH ; storm logviewer > /var/log/storm/logviewer.out 2>&1 &\n echo $! > /var/run/storm/logviewer.pid',
+        path = ['/usr/bin'],
+        user = 'storm',
+        not_if = "ambari-sudo.sh su storm -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1'",
+    )
+    self.assertResourceCalled('File', '/var/run/storm/logviewer.pid',
+        owner = 'storm',
+        group = 'hadoop',
+    )
     self.assertNoMoreResources()
 
   @patch("os.path.exists")
   def test_stop_secured(self, path_exists_mock):
     # Bool for the pid file
-    path_exists_mock.side_effect = [True]
+    path_exists_mock.side_effect = [True, True]
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nimbus.py",
                        classname = "Nimbus",
                        command = "stop",
@@ -219,6 +265,16 @@ class TestStormNimbus(TestStormBase):
     self.assertResourceCalled('File', '/var/run/storm/nimbus.pid',
         action = ['delete'],
     )
+    self.assertResourceCalled('Execute', "ambari-sudo.sh kill 123",
+      not_if = "! (ambari-sudo.sh su storm -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1')",
+    )
+    self.assertResourceCalled('Execute', "ambari-sudo.sh kill -9 123",
+      not_if = "sleep 2; ! (ambari-sudo.sh su storm -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1') || sleep 20; ! (ambari-sudo.sh su storm -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ls /var/run/storm/logviewer.pid >/dev/null 2>&1 && ps -p `cat /var/run/storm/logviewer.pid` >/dev/null 2>&1')",
+      ignore_failures = True,
+    )
+    self.assertResourceCalled('File', '/var/run/storm/logviewer.pid',
+      action = ['delete'],
+    )
     self.assertNoMoreResources()
 
   def test_pre_upgrade_restart(self):

http://git-wip-us.apache.org/repos/asf/ambari/blob/1939dabc/ambari-server/src/test/python/stacks/2.1/configs/default-storm-start.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/default-storm-start.json b/ambari-server/src/test/python/stacks/2.1/configs/default-storm-start.json
index 27cb63e..05330a0 100644
--- a/ambari-server/src/test/python/stacks/2.1/configs/default-storm-start.json
+++ b/ambari-server/src/test/python/stacks/2.1/configs/default-storm-start.json
@@ -1,4 +1,18 @@
 {
+    "localComponents": [
+        "APP_TIMELINE_SERVER",
+        "TEZ_CLIENT",
+        "DATANODE",
+        "HDFS_CLIENT",
+        "ZOOKEEPER_SERVER",
+        "RESOURCEMANAGER",
+        "MAPREDUCE2_CLIENT",
+        "YARN_CLIENT",
+        "HISTORYSERVER",
+        "ZOOKEEPER_CLIENT",
+        "NAMENODE"
+    ],
+
     "configuration_attributes": {
         "storm-site": {},
         "hdfs-site": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/1939dabc/ambari-server/src/test/python/stacks/2.1/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/default.json b/ambari-server/src/test/python/stacks/2.1/configs/default.json
index e04e1eb..536074e 100644
--- a/ambari-server/src/test/python/stacks/2.1/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.1/configs/default.json
@@ -1,4 +1,17 @@
 {
+    "localComponents": [
+        "APP_TIMELINE_SERVER",
+        "TEZ_CLIENT",
+        "DATANODE",
+        "HDFS_CLIENT",
+        "ZOOKEEPER_SERVER",
+        "RESOURCEMANAGER",
+        "MAPREDUCE2_CLIENT",
+        "YARN_CLIENT",
+        "HISTORYSERVER",
+        "ZOOKEEPER_CLIENT",
+        "NAMENODE"
+    ],
     "roleCommand": "SERVICE_CHECK", 
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1939dabc/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json b/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json
index 1b027b7..6d7fdd1 100644
--- a/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json
+++ b/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json
@@ -1,4 +1,17 @@
 {
+    "localComponents": [
+        "APP_TIMELINE_SERVER",
+        "TEZ_CLIENT",
+        "DATANODE",
+        "HDFS_CLIENT",
+        "ZOOKEEPER_SERVER",
+        "RESOURCEMANAGER",
+        "MAPREDUCE2_CLIENT",
+        "YARN_CLIENT",
+        "HISTORYSERVER",
+        "ZOOKEEPER_CLIENT",
+        "NAMENODE"
+    ],
     "configuration_attributes": {
         "storm-site": {}, 
         "hdfs-site": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/1939dabc/ambari-server/src/test/python/stacks/2.1/configs/secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/secured.json b/ambari-server/src/test/python/stacks/2.1/configs/secured.json
index 61b359c..e2c22be 100644
--- a/ambari-server/src/test/python/stacks/2.1/configs/secured.json
+++ b/ambari-server/src/test/python/stacks/2.1/configs/secured.json
@@ -1,5 +1,18 @@
 {
-    "roleCommand": "INSTALL", 
+  "localComponents": [
+    "APP_TIMELINE_SERVER",
+    "TEZ_CLIENT",
+    "DATANODE",
+    "HDFS_CLIENT",
+    "ZOOKEEPER_SERVER",
+    "RESOURCEMANAGER",
+    "MAPREDUCE2_CLIENT",
+    "YARN_CLIENT",
+    "HISTORYSERVER",
+    "ZOOKEEPER_CLIENT",
+    "NAMENODE"
+  ],
+    "roleCommand": "INSTALL",
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {


[36/36] ambari git commit: AMBARI-21426. Apply ZEPPELIN-2698 related changes in Ambari(prabhjyotsingh via Venkata Sairam)

Posted by lp...@apache.org.
AMBARI-21426. Apply ZEPPELIN-2698 related changes in Ambari(prabhjyotsingh via Venkata Sairam)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/63186bf3
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/63186bf3
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/63186bf3

Branch: refs/heads/feature-branch-AMBARI-21307
Commit: 63186bf3eb1bf0501e0c2450f85467a0bc6adf12
Parents: 853a5d4
Author: Venkata Sairam <ve...@gmail.com>
Authored: Fri Jul 14 12:35:26 2017 +0530
Committer: Venkata Sairam <ve...@gmail.com>
Committed: Fri Jul 14 12:35:52 2017 +0530

----------------------------------------------------------------------
 .../ZEPPELIN/0.6.0.2.5/configuration/zeppelin-env.xml              | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/63186bf3/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-env.xml b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-env.xml
index 4032b2c..80ac2bb 100644
--- a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-env.xml
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-env.xml
@@ -95,7 +95,7 @@ export ZEPPELIN_INTP_CLASSPATH_OVERRIDES="{{external_dependency_conf}}"
 ## Kerberos ticket refresh setting
 ##
 export KINIT_FAIL_THRESHOLD=5
-export LAUNCH_KERBEROS_REFRESH_INTERVAL=1d
+export KERBEROS_REFRESH_INTERVAL=1d
 
 ## Use provided spark installation ##
 ## defining SPARK_HOME makes Zeppelin run spark interpreter process using spark-submit


[30/36] ambari git commit: AMBARI-21444. Hive warehouse fixes.(vbrodetskyi)

Posted by lp...@apache.org.
AMBARI-21444. Hive warehouse fixes.(vbrodetskyi)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/31b9d777
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/31b9d777
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/31b9d777

Branch: refs/heads/feature-branch-AMBARI-21307
Commit: 31b9d7774b22f59a4d7120c9836c73a5216fd529
Parents: 383b8c7
Author: Vitaly Brodetskyi <vb...@hortonworks.com>
Authored: Wed Jul 12 15:35:53 2017 +0300
Committer: Vitaly Brodetskyi <vb...@hortonworks.com>
Committed: Wed Jul 12 15:35:53 2017 +0300

----------------------------------------------------------------------
 .../HIVE/0.12.0.2.0/package/scripts/hive.py     |  3 +-
 .../0.12.0.2.0/package/scripts/params_linux.py  |  2 ++
 .../services/HIVE/configuration/hive-site.xml   | 35 ++++++++++++++++++++
 .../stacks/2.0.6/HIVE/test_hive_server.py       |  2 ++
 4 files changed, 41 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/31b9d777/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
index 36725c3..8e176b6 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
@@ -224,7 +224,8 @@ def setup_hiveserver2():
                          type="directory",
                           action="create_on_execute",
                           owner=params.hive_user,
-                          mode=0777
+                          group=params.user_group,
+                          mode=params.hive_apps_whs_mode
     )
   else:
     Logger.info(format("Not creating warehouse directory '{hive_apps_whs_dir}', as the location is not in DFS."))

http://git-wip-us.apache.org/repos/asf/ambari/blob/31b9d777/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
index 078076a..21b3d8b 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
@@ -505,6 +505,8 @@ hive_env_sh_template = config['configurations']['hive-env']['content']
 
 hive_hdfs_user_dir = format("/user/{hive_user}")
 hive_hdfs_user_mode = 0755
+#Parameter for custom warehouse directory permissions. Permissions are in octal format and need to be converted to decimal
+hive_apps_whs_mode = int(default('/configurations/hive-site/custom.hive.warehouse.mode', '0777'), 8)
 hive_apps_whs_dir = config['configurations']['hive-site']["hive.metastore.warehouse.dir"]
 whs_dir_protocol = urlparse(hive_apps_whs_dir).scheme
 hive_exec_scratchdir = config['configurations']['hive-site']["hive.exec.scratchdir"]

http://git-wip-us.apache.org/repos/asf/ambari/blob/31b9d777/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-site.xml
new file mode 100644
index 0000000..a07c16f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-site.xml
@@ -0,0 +1,35 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+<configuration supports_final="true">
+  <property>
+    <name>hive.warehouse.subdir.inherit.perms</name>
+    <value>true</value>
+    <description>Set this to true if table directories should inherit the permissions of the warehouse or database directory instead of being created with permissions derived from dfs umask
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>hive.start.cleanup.scratchdir</name>
+    <value>false</value>
+    <description>To cleanup the hive scratchdir while starting the hive server.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/31b9d777/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
index ae2ec86..fc6d14e 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
@@ -491,6 +491,7 @@ class TestHiveServer(RMFTestCase):
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
         action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='missing_principal', default_fs=default_fs_default,
+        group = 'hadoop',
         mode = 0777,
     )
     self.assertResourceCalled('HdfsResource', '/user/hive',
@@ -703,6 +704,7 @@ class TestHiveServer(RMFTestCase):
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
         action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
+        group = 'hadoop',
         mode = 0777,
     )
     self.assertResourceCalled('HdfsResource', '/user/hive',


[21/36] ambari git commit: AMBARI-21210 ADDENDUM Add ability to Log Search to test a log entry if it is parseable (mgergely)

Posted by lp...@apache.org.
AMBARI-21210 ADDENDUM Add ability to Log Search to test a log entry if it is parseable (mgergely)

Change-Id: Icb847dc5cc9b6f63eb02cffe8046c78be0e585dc


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c0882898
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c0882898
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c0882898

Branch: refs/heads/feature-branch-AMBARI-21307
Commit: c0882898deed4b6f0ecbd6f12cd935dc6b75cfdf
Parents: 3c9f125
Author: Miklos Gergely <mg...@hortonworks.com>
Authored: Mon Jul 10 14:45:41 2017 +0200
Committer: Miklos Gergely <mg...@hortonworks.com>
Committed: Mon Jul 10 14:45:41 2017 +0200

----------------------------------------------------------------------
 .../org/apache/ambari/logfeeder/common/LogEntryParseTester.java    | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/c0882898/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/common/LogEntryParseTester.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/common/LogEntryParseTester.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/common/LogEntryParseTester.java
index 97bc3a2..5356159 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/common/LogEntryParseTester.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/common/LogEntryParseTester.java
@@ -76,7 +76,7 @@ public class LogEntryParseTester {
     ConfigHandler configHandler = new ConfigHandler();
     Input input = configHandler.getTestInput(inputConfig, logId);
     final Map<String, Object> result = new HashMap<>();
-    input.init();
+    input.getFirstFilter().init();
     input.addOutput(new Output() {
       @Override
       public void write(String block, InputMarker inputMarker) throws Exception {


[15/36] ambari git commit: AMBARI-21430 - Allow Multiple Versions of Stack Tools to Co-Exist (jonathanhurley)

Posted by lp...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-default.json b/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-default.json
index a1d930c..fb77531 100644
--- a/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-default.json
+++ b/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-default.json
@@ -1,150 +1,150 @@
 {
     "localComponents": [
-        "NAMENODE", 
-        "SECONDARY_NAMENODE", 
-        "ZOOKEEPER_SERVER", 
-        "DATANODE", 
-        "HDFS_CLIENT", 
-        "ZOOKEEPER_CLIENT", 
-        "RANGER_USERSYNC", 
-        "RANGER_ADMIN", 
+        "NAMENODE",
+        "SECONDARY_NAMENODE",
+        "ZOOKEEPER_SERVER",
+        "DATANODE",
+        "HDFS_CLIENT",
+        "ZOOKEEPER_CLIENT",
+        "RANGER_USERSYNC",
+        "RANGER_ADMIN",
         "RANGER_TAGSYNC",
         "LOGSEARCH_SOLR",
         "LOGSEARCH_SOLR_CLIENT"
-    ], 
+    ],
     "configuration_attributes": {
-        "ranger-hdfs-audit": {}, 
-        "ssl-client": {}, 
-        "ranger-admin-site": {}, 
-        "ranger-hdfs-policymgr-ssl": {}, 
-        "tagsync-application-properties": {}, 
-        "ranger-env": {}, 
-        "usersync-log4j": {}, 
-        "admin-properties": {}, 
-        "ranger-ugsync-site": {}, 
+        "ranger-hdfs-audit": {},
+        "ssl-client": {},
+        "ranger-admin-site": {},
+        "ranger-hdfs-policymgr-ssl": {},
+        "tagsync-application-properties": {},
+        "ranger-env": {},
+        "usersync-log4j": {},
+        "admin-properties": {},
+        "ranger-ugsync-site": {},
         "hdfs-site": {
             "final": {
-                "dfs.datanode.data.dir": "true", 
-                "dfs.namenode.http-address": "true", 
-                "dfs.datanode.failed.volumes.tolerated": "true", 
-                "dfs.support.append": "true", 
-                "dfs.namenode.name.dir": "true", 
+                "dfs.datanode.data.dir": "true",
+                "dfs.namenode.http-address": "true",
+                "dfs.datanode.failed.volumes.tolerated": "true",
+                "dfs.support.append": "true",
+                "dfs.namenode.name.dir": "true",
                 "dfs.webhdfs.enabled": "true"
             }
-        }, 
-        "ranger-tagsync-site": {}, 
-        "zoo.cfg": {}, 
-        "hadoop-policy": {}, 
-        "hdfs-log4j": {}, 
-        "ranger-hdfs-plugin-properties": {}, 
+        },
+        "ranger-tagsync-site": {},
+        "zoo.cfg": {},
+        "hadoop-policy": {},
+        "hdfs-log4j": {},
+        "ranger-hdfs-plugin-properties": {},
         "core-site": {
             "final": {
                 "fs.defaultFS": "true"
             }
-        }, 
-        "hadoop-env": {}, 
-        "zookeeper-log4j": {}, 
-        "ssl-server": {}, 
-        "ranger-site": {}, 
-        "admin-log4j": {}, 
-        "tagsync-log4j": {}, 
-        "ranger-hdfs-security": {}, 
-        "usersync-properties": {}, 
+        },
+        "hadoop-env": {},
+        "zookeeper-log4j": {},
+        "ssl-server": {},
+        "ranger-site": {},
+        "admin-log4j": {},
+        "tagsync-log4j": {},
+        "ranger-hdfs-security": {},
+        "usersync-properties": {},
         "zookeeper-env": {},
         "infra-solr-env": {},
         "infra-solr-client-log4j": {},
         "cluster-env": {}
-    }, 
-    "public_hostname": "c6401.ambari.apache.org", 
-    "commandId": "11-0", 
-    "hostname": "c6401.ambari.apache.org", 
-    "kerberosCommandParams": [], 
-    "serviceName": "RANGER", 
-    "role": "RANGER_ADMIN", 
-    "forceRefreshConfigTagsBeforeExecution": [], 
-    "requestId": 11, 
+    },
+    "public_hostname": "c6401.ambari.apache.org",
+    "commandId": "11-0",
+    "hostname": "c6401.ambari.apache.org",
+    "kerberosCommandParams": [],
+    "serviceName": "RANGER",
+    "role": "RANGER_ADMIN",
+    "forceRefreshConfigTagsBeforeExecution": [],
+    "requestId": 11,
     "agentConfigParams": {
         "agent": {
             "parallel_execution": 0
         }
-    }, 
-    "clusterName": "c1", 
-    "commandType": "EXECUTION_COMMAND", 
-    "taskId": 31, 
-    "roleParams": {}, 
+    },
+    "clusterName": "c1",
+    "commandType": "EXECUTION_COMMAND",
+    "taskId": 31,
+    "roleParams": {},
     "configurationTags": {
         "ranger-hdfs-audit": {
             "tag": "version1466705299922"
-        }, 
+        },
         "ssl-client": {
             "tag": "version1"
-        }, 
+        },
         "ranger-admin-site": {
             "tag": "version1466705299949"
-        }, 
+        },
         "ranger-hdfs-policymgr-ssl": {
             "tag": "version1466705299922"
-        }, 
+        },
         "tagsync-application-properties": {
             "tag": "version1466705299949"
-        }, 
+        },
         "ranger-env": {
             "tag": "version1466705299949"
-        }, 
+        },
         "usersync-log4j": {
             "tag": "version1466705299949"
-        }, 
+        },
         "admin-properties": {
             "tag": "version1466705299949"
-        }, 
+        },
         "ranger-ugsync-site": {
             "tag": "version1466705299949"
-        }, 
+        },
         "hdfs-site": {
             "tag": "version1"
-        }, 
+        },
         "ranger-tagsync-site": {
             "tag": "version1466705299949"
-        }, 
+        },
         "zoo.cfg": {
             "tag": "version1"
-        }, 
+        },
         "hadoop-policy": {
             "tag": "version1"
-        }, 
+        },
         "hdfs-log4j": {
             "tag": "version1"
-        }, 
+        },
         "ranger-hdfs-plugin-properties": {
             "tag": "version1466705299922"
-        }, 
+        },
         "core-site": {
             "tag": "version1"
-        }, 
+        },
         "hadoop-env": {
             "tag": "version1"
-        }, 
+        },
         "zookeeper-log4j": {
             "tag": "version1"
-        }, 
+        },
         "ssl-server": {
             "tag": "version1"
-        }, 
+        },
         "ranger-site": {
             "tag": "version1466705299949"
-        }, 
+        },
         "admin-log4j": {
             "tag": "version1466705299949"
-        }, 
+        },
         "tagsync-log4j": {
             "tag": "version1466705299949"
-        }, 
+        },
         "ranger-hdfs-security": {
             "tag": "version1466705299922"
-        }, 
+        },
         "usersync-properties": {
             "tag": "version1466705299949"
-        }, 
+        },
         "zookeeper-env": {
             "tag": "version1"
         },
@@ -157,492 +157,492 @@
         "cluster-env": {
             "tag": "version1"
         }
-    }, 
-    "roleCommand": "START", 
+    },
+    "roleCommand": "START",
     "hostLevelParams": {
-        "agent_stack_retry_on_unavailability": "false", 
-        "stack_name": "HDP", 
-        "package_version": "2_5_0_0_*", 
+        "agent_stack_retry_on_unavailability": "false",
+        "stack_name": "HDP",
+        "package_version": "2_5_0_0_*",
         "custom_mysql_jdbc_name": "mysql-connector-java.jar",
         "previous_custom_mysql_jdbc_name": "mysql-connector-java-old.jar",
-        "host_sys_prepped": "false", 
-        "ambari_db_rca_username": "mapred", 
-        "current_version": "2.5.0.0-801", 
-        "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar", 
-        "agent_stack_retry_count": "5", 
-        "stack_version": "2.5", 
-        "jdk_name": "jdk-8u60-linux-x64.tar.gz", 
-        "ambari_db_rca_driver": "org.postgresql.Driver", 
-        "java_home": "/usr/jdk64/jdk1.7.0_45", 
-        "repository_version_id": "1", 
-        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
-        "not_managed_hdfs_path_list": "[\"/tmp\"]", 
-        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
-        "java_version": "8", 
-        "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.5\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]", 
-        "package_list": "[{\"name\":\"ranger_${stack_version}-admin\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-usersync\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-tagsync\",\"condition\":\"should_install_ranger_tagsync\",\"skipUpgrade\":false},{\"name\":\"ambari-logsearch-solr-client\",\"condition\":\"should_install_logsearch_solr_client\",\"skipUpgrade\":false}]", 
-        "db_name": "ambari", 
-        "group_list": "[\"ranger\",\"hadoop\",\"users\"]", 
-        "agentCacheDir": "/var/lib/ambari-agent/cache", 
-        "ambari_db_rca_password": "mapred", 
-        "jce_name": "jce_policy-8.zip", 
-        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar", 
-        "db_driver_filename": "mysql-connector-java.jar", 
-        "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]", 
+        "host_sys_prepped": "false",
+        "ambari_db_rca_username": "mapred",
+        "current_version": "2.5.0.0-801",
+        "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar",
+        "agent_stack_retry_count": "5",
+        "stack_version": "2.5",
+        "jdk_name": "jdk-8u60-linux-x64.tar.gz",
+        "ambari_db_rca_driver": "org.postgresql.Driver",
+        "java_home": "/usr/jdk64/jdk1.7.0_45",
+        "repository_version_id": "1",
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
+        "not_managed_hdfs_path_list": "[\"/tmp\"]",
+        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
+        "java_version": "8",
+        "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.5\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]",
+        "package_list": "[{\"name\":\"ranger_${stack_version}-admin\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-usersync\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-tagsync\",\"condition\":\"should_install_ranger_tagsync\",\"skipUpgrade\":false},{\"name\":\"ambari-logsearch-solr-client\",\"condition\":\"should_install_logsearch_solr_client\",\"skipUpgrade\":false}]",
+        "db_name": "ambari",
+        "group_list": "[\"ranger\",\"hadoop\",\"users\"]",
+        "agentCacheDir": "/var/lib/ambari-agent/cache",
+        "ambari_db_rca_password": "mapred",
+        "jce_name": "jce_policy-8.zip",
+        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
+        "db_driver_filename": "mysql-connector-java.jar",
+        "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]",
         "clientsToUpdateConfigs": "[\"*\"]"
-    }, 
+    },
     "commandParams": {
-        "service_package_folder": "common-services/RANGER/0.4.0/package", 
-        "script": "scripts/ranger_admin.py", 
-        "hooks_folder": "HDP/2.0.6/hooks", 
-        "version": "2.5.0.0-801", 
-        "max_duration_for_retries": "0", 
-        "command_retry_enabled": "false", 
-        "command_timeout": "600", 
+        "service_package_folder": "common-services/RANGER/0.4.0/package",
+        "script": "scripts/ranger_admin.py",
+        "hooks_folder": "HDP/2.0.6/hooks",
+        "version": "2.5.0.0-801",
+        "max_duration_for_retries": "0",
+        "command_retry_enabled": "false",
+        "command_timeout": "600",
         "script_type": "PYTHON"
-    }, 
-    "forceRefreshConfigTags": [], 
-    "stageId": 0, 
+    },
+    "forceRefreshConfigTags": [],
+    "stageId": 0,
     "clusterHostInfo": {
         "snamenode_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ambari_server_use_ssl": [
             "false"
-        ], 
+        ],
         "all_ping_ports": [
             "8670"
-        ], 
+        ],
         "ranger_tagsync_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ranger_usersync_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "all_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "slave_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "namenode_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ambari_server_port": [
             "8080"
-        ], 
+        ],
         "ranger_admin_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "all_racks": [
             "/default-rack"
-        ], 
+        ],
         "all_ipv4_ips": [
             "172.22.83.73"
-        ], 
+        ],
         "ambari_server_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "zookeeper_hosts": [
             "c6401.ambari.apache.org"
         ],
         "infra_solr_hosts": [
             "c6401.ambari.apache.org"
         ]
-    }, 
+    },
     "configurations": {
         "ranger-hdfs-audit": {
-            "xasecure.audit.destination.solr.zookeepers": "NONE", 
-            "xasecure.audit.destination.solr.urls": "", 
-            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool", 
-            "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool", 
+            "xasecure.audit.destination.solr.zookeepers": "NONE",
+            "xasecure.audit.destination.solr.urls": "",
+            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool",
+            "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool",
             "xasecure.audit.destination.hdfs": "true",
-            "xasecure.audit.destination.solr": "false", 
+            "xasecure.audit.destination.solr": "false",
             "xasecure.audit.provider.summary.enabled": "false",
             "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
             "xasecure.audit.is.enabled": "true"
-        }, 
+        },
         "ssl-client": {
-            "ssl.client.truststore.reload.interval": "10000", 
-            "ssl.client.keystore.password": "bigdata", 
-            "ssl.client.truststore.type": "jks", 
-            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks", 
-            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks", 
-            "ssl.client.truststore.password": "bigdata", 
+            "ssl.client.truststore.reload.interval": "10000",
+            "ssl.client.keystore.password": "bigdata",
+            "ssl.client.truststore.type": "jks",
+            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks",
+            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks",
+            "ssl.client.truststore.password": "bigdata",
             "ssl.client.keystore.type": "jks"
-        }, 
+        },
         "ranger-admin-site": {
             "ranger.admin.kerberos.cookie.domain": "",
-            "ranger.kms.service.user.hdfs": "hdfs", 
-            "ranger.spnego.kerberos.principal": "", 
-            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}", 
-            "ranger.plugins.hive.serviceuser": "hive", 
-            "ranger.lookup.kerberos.keytab": "", 
-            "ranger.plugins.kms.serviceuser": "kms", 
-            "ranger.service.https.attrib.ssl.enabled": "false", 
-            "ranger.sso.browser.useragent": "Mozilla,chrome", 
-            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01", 
-            "ranger.plugins.hbase.serviceuser": "hbase", 
-            "ranger.plugins.hdfs.serviceuser": "hdfs", 
-            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}", 
-            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net", 
-            "ranger.plugins.knox.serviceuser": "knox", 
-            "ranger.ldap.base.dn": "dc=example,dc=com", 
-            "ranger.sso.publicKey": "", 
-            "ranger.admin.kerberos.cookie.path": "/", 
-            "ranger.service.https.attrib.clientAuth": "want", 
-            "ranger.jpa.jdbc.user": "{{ranger_db_user}}", 
-            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})", 
-            "ranger.ldap.group.roleattribute": "cn", 
-            "ranger.plugins.kafka.serviceuser": "kafka", 
-            "ranger.admin.kerberos.principal": "", 
-            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
-            "ranger.credential.provider.path": "/etc/ranger/admin/rangeradmin.jceks", 
-            "ranger.ldap.referral": "ignore", 
-            "ranger.service.http.port": "6080", 
-            "ranger.ldap.user.searchfilter": "(uid={0})", 
-            "ranger.plugins.atlas.serviceuser": "atlas", 
-            "ranger.truststore.password": "changeit", 
-            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
-            "ranger.audit.solr.password": "NONE", 
+            "ranger.kms.service.user.hdfs": "hdfs",
+            "ranger.spnego.kerberos.principal": "",
+            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}",
+            "ranger.plugins.hive.serviceuser": "hive",
+            "ranger.lookup.kerberos.keytab": "",
+            "ranger.plugins.kms.serviceuser": "kms",
+            "ranger.service.https.attrib.ssl.enabled": "false",
+            "ranger.sso.browser.useragent": "Mozilla,chrome",
+            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01",
+            "ranger.plugins.hbase.serviceuser": "hbase",
+            "ranger.plugins.hdfs.serviceuser": "hdfs",
+            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}",
+            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net",
+            "ranger.plugins.knox.serviceuser": "knox",
+            "ranger.ldap.base.dn": "dc=example,dc=com",
+            "ranger.sso.publicKey": "",
+            "ranger.admin.kerberos.cookie.path": "/",
+            "ranger.service.https.attrib.clientAuth": "want",
+            "ranger.jpa.jdbc.user": "{{ranger_db_user}}",
+            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})",
+            "ranger.ldap.group.roleattribute": "cn",
+            "ranger.plugins.kafka.serviceuser": "kafka",
+            "ranger.admin.kerberos.principal": "",
+            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
+            "ranger.credential.provider.path": "/etc/ranger/admin/rangeradmin.jceks",
+            "ranger.ldap.referral": "ignore",
+            "ranger.service.http.port": "6080",
+            "ranger.ldap.user.searchfilter": "(uid={0})",
+            "ranger.plugins.atlas.serviceuser": "atlas",
+            "ranger.truststore.password": "changeit",
+            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+            "ranger.audit.solr.password": "NONE",
             "ranger.audit.solr.zookeepers": "c6401.ambari.apache.org:2181/ambari-solr",
-            "ranger.lookup.kerberos.principal": "", 
-            "ranger.service.https.port": "6182", 
-            "ranger.plugins.storm.serviceuser": "storm", 
-            "ranger.externalurl": "{{ranger_external_url}}", 
-            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
-            "ranger.kms.service.user.hive": "", 
-            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
-            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}", 
-            "ranger.service.host": "{{ranger_host}}", 
-            "ranger.service.https.attrib.keystore.keyalias": "rangeradmin", 
-            "ranger.service.https.attrib.keystore.pass": "xasecure", 
-            "ranger.unixauth.remote.login.enabled": "true", 
-            "ranger.jpa.jdbc.credential.alias": "rangeradmin", 
-            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
-            "ranger.audit.solr.username": "ranger_solr", 
-            "ranger.sso.enabled": "false", 
-            "ranger.audit.solr.urls": "", 
-            "ranger.ldap.ad.domain": "", 
-            "ranger.plugins.yarn.serviceuser": "yarn", 
-            "ranger.audit.source.type": "solr", 
-            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
-            "ranger.ldap.url": "{{ranger_ug_ldap_url}}", 
-            "ranger.authentication.method": "UNIX", 
-            "ranger.service.http.enabled": "true", 
-            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}", 
-            "ranger.ldap.ad.referral": "ignore", 
-            "ranger.ldap.ad.base.dn": "dc=example,dc=com", 
-            "ranger.jpa.jdbc.password": "_", 
-            "ranger.spnego.kerberos.keytab": "", 
-            "ranger.sso.providerurl": "", 
-            "ranger.unixauth.service.hostname": "{{ugsync_host}}", 
-            "ranger.admin.kerberos.keytab": "", 
-            "ranger.admin.kerberos.token.valid.seconds": "30", 
-            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver", 
+            "ranger.lookup.kerberos.principal": "",
+            "ranger.service.https.port": "6182",
+            "ranger.plugins.storm.serviceuser": "storm",
+            "ranger.externalurl": "{{ranger_external_url}}",
+            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+            "ranger.kms.service.user.hive": "",
+            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}",
+            "ranger.service.host": "{{ranger_host}}",
+            "ranger.service.https.attrib.keystore.keyalias": "rangeradmin",
+            "ranger.service.https.attrib.keystore.pass": "xasecure",
+            "ranger.unixauth.remote.login.enabled": "true",
+            "ranger.jpa.jdbc.credential.alias": "rangeradmin",
+            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+            "ranger.audit.solr.username": "ranger_solr",
+            "ranger.sso.enabled": "false",
+            "ranger.audit.solr.urls": "",
+            "ranger.ldap.ad.domain": "",
+            "ranger.plugins.yarn.serviceuser": "yarn",
+            "ranger.audit.source.type": "solr",
+            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
+            "ranger.ldap.url": "{{ranger_ug_ldap_url}}",
+            "ranger.authentication.method": "UNIX",
+            "ranger.service.http.enabled": "true",
+            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}",
+            "ranger.ldap.ad.referral": "ignore",
+            "ranger.ldap.ad.base.dn": "dc=example,dc=com",
+            "ranger.jpa.jdbc.password": "_",
+            "ranger.spnego.kerberos.keytab": "",
+            "ranger.sso.providerurl": "",
+            "ranger.unixauth.service.hostname": "{{ugsync_host}}",
+            "ranger.admin.kerberos.keytab": "",
+            "ranger.admin.kerberos.token.valid.seconds": "30",
+            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver",
             "ranger.unixauth.service.port": "5151"
-        }, 
+        },
         "ranger-hdfs-policymgr-ssl": {
-            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks", 
-            "xasecure.policymgr.clientssl.truststore.password": "changeit", 
-            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}", 
-            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks", 
-            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}", 
+            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks",
+            "xasecure.policymgr.clientssl.truststore.password": "changeit",
+            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}",
+            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks",
+            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}",
             "xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
-        }, 
+        },
         "tagsync-application-properties": {
-            "atlas.kafka.entities.group.id": "ranger_entities_consumer", 
-            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181", 
+            "atlas.kafka.entities.group.id": "ranger_entities_consumer",
+            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181",
             "atlas.kafka.bootstrap.servers": "localhost:6667"
-        }, 
+        },
         "ranger-env": {
-            "ranger_solr_shards": "1", 
-            "ranger_solr_config_set": "ranger_audits", 
-            "ranger_user": "ranger", 
+            "ranger_solr_shards": "1",
+            "ranger_solr_config_set": "ranger_audits",
+            "ranger_user": "ranger",
             "ranger_solr_replication_factor": "1",
-            "xml_configurations_supported": "true", 
-            "ranger-atlas-plugin-enabled": "No", 
-            "ranger-hbase-plugin-enabled": "No", 
-            "ranger-yarn-plugin-enabled": "No", 
-            "bind_anonymous": "false", 
-            "ranger_admin_username": "amb_ranger_admin", 
-            "admin_password": "admin", 
-            "is_solrCloud_enabled": "true", 
-            "ranger-storm-plugin-enabled": "No", 
-            "ranger-hdfs-plugin-enabled": "No", 
-            "ranger_group": "ranger", 
-            "ranger-knox-plugin-enabled": "No", 
-            "ranger_admin_log_dir": "/var/log/ranger/admin", 
-            "ranger-kafka-plugin-enabled": "No", 
-            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306", 
-            "ranger-hive-plugin-enabled": "No", 
-            "xasecure.audit.destination.solr": "true", 
-            "ranger_pid_dir": "/var/run/ranger", 
-            "xasecure.audit.destination.hdfs": "true", 
-            "admin_username": "admin", 
-            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit", 
-            "create_db_dbuser": "true", 
-            "ranger_solr_collection_name": "ranger_audits", 
-            "ranger_admin_password": "P1!q9xa96SMi5NCl", 
+            "xml_configurations_supported": "true",
+            "ranger-atlas-plugin-enabled": "No",
+            "ranger-hbase-plugin-enabled": "No",
+            "ranger-yarn-plugin-enabled": "No",
+            "bind_anonymous": "false",
+            "ranger_admin_username": "amb_ranger_admin",
+            "admin_password": "admin",
+            "is_solrCloud_enabled": "true",
+            "ranger-storm-plugin-enabled": "No",
+            "ranger-hdfs-plugin-enabled": "No",
+            "ranger_group": "ranger",
+            "ranger-knox-plugin-enabled": "No",
+            "ranger_admin_log_dir": "/var/log/ranger/admin",
+            "ranger-kafka-plugin-enabled": "No",
+            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306",
+            "ranger-hive-plugin-enabled": "No",
+            "xasecure.audit.destination.solr": "true",
+            "ranger_pid_dir": "/var/run/ranger",
+            "xasecure.audit.destination.hdfs": "true",
+            "admin_username": "admin",
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
+            "create_db_dbuser": "true",
+            "ranger_solr_collection_name": "ranger_audits",
+            "ranger_admin_password": "P1!q9xa96SMi5NCl",
             "ranger_usersync_log_dir": "/var/log/ranger/usersync"
-        }, 
+        },
         "usersync-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/usersync.
 log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n"
-        }, 
+        },
         "admin-properties": {
-            "db_user": "rangeradmin01", 
-            "DB_FLAVOR": "MYSQL", 
-            "db_password": "rangeradmin01", 
-            "db_root_user": "root", 
-            "policymgr_external_url": "http://c6401.ambari.apache.org:6080", 
-            "db_name": "ranger01", 
-            "db_host": "c6401.ambari.apache.org", 
-            "db_root_password": "vagrant", 
+            "db_user": "rangeradmin01",
+            "DB_FLAVOR": "MYSQL",
+            "db_password": "rangeradmin01",
+            "db_root_user": "root",
+            "policymgr_external_url": "http://c6401.ambari.apache.org:6080",
+            "db_name": "ranger01",
+            "db_host": "c6401.ambari.apache.org",
+            "db_root_password": "vagrant",
             "SQL_CONNECTOR_JAR": "{{driver_curl_target}}"
-        }, 
+        },
         "ranger-ugsync-site": {
-            "ranger.usersync.ldap.binddn": "", 
-            "ranger.usersync.policymgr.username": "rangerusersync", 
-            "ranger.usersync.policymanager.mockrun": "false", 
-            "ranger.usersync.group.searchbase": "", 
-            "ranger.usersync.ldap.bindalias": "testldapalias", 
-            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks", 
-            "ranger.usersync.port": "5151", 
-            "ranger.usersync.pagedresultssize": "500", 
-            "ranger.usersync.group.memberattributename": "", 
-            "ranger.usersync.kerberos.principal": "", 
-            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder", 
-            "ranger.usersync.ldap.referral": "ignore", 
-            "ranger.usersync.group.searchfilter": "", 
-            "ranger.usersync.ldap.user.objectclass": "person", 
-            "ranger.usersync.logdir": "{{usersync_log_dir}}", 
-            "ranger.usersync.ldap.user.searchfilter": "", 
-            "ranger.usersync.ldap.groupname.caseconversion": "none", 
-            "ranger.usersync.ldap.ldapbindpassword": "", 
-            "ranger.usersync.unix.minUserId": "500", 
-            "ranger.usersync.policymanager.maxrecordsperapicall": "1000", 
-            "ranger.usersync.group.nameattribute": "", 
-            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password", 
-            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks", 
-            "ranger.usersync.user.searchenabled": "false", 
-            "ranger.usersync.group.usermapsyncenabled": "true", 
-            "ranger.usersync.ldap.bindkeystore": "", 
-            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof", 
-            "ranger.usersync.kerberos.keytab": "", 
-            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe", 
-            "ranger.usersync.group.objectclass": "", 
-            "ranger.usersync.ldap.user.searchscope": "sub", 
-            "ranger.usersync.unix.password.file": "/etc/passwd", 
-            "ranger.usersync.ldap.user.nameattribute": "", 
-            "ranger.usersync.pagedresultsenabled": "true", 
-            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}", 
-            "ranger.usersync.group.search.first.enabled": "false", 
-            "ranger.usersync.group.searchenabled": "false", 
-            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder", 
-            "ranger.usersync.ssl": "true", 
-            "ranger.usersync.ldap.url": "", 
-            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org", 
-            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
-            "ranger.usersync.ldap.user.searchbase": "", 
-            "ranger.usersync.ldap.username.caseconversion": "none", 
-            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
-            "ranger.usersync.keystore.password": "UnIx529p", 
-            "ranger.usersync.unix.group.file": "/etc/group", 
-            "ranger.usersync.filesource.file": "/tmp/usergroup.txt", 
-            "ranger.usersync.group.searchscope": "", 
-            "ranger.usersync.truststore.password": "changeit", 
-            "ranger.usersync.enabled": "true", 
-            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000", 
+            "ranger.usersync.ldap.binddn": "",
+            "ranger.usersync.policymgr.username": "rangerusersync",
+            "ranger.usersync.policymanager.mockrun": "false",
+            "ranger.usersync.group.searchbase": "",
+            "ranger.usersync.ldap.bindalias": "testldapalias",
+            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks",
+            "ranger.usersync.port": "5151",
+            "ranger.usersync.pagedresultssize": "500",
+            "ranger.usersync.group.memberattributename": "",
+            "ranger.usersync.kerberos.principal": "",
+            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder",
+            "ranger.usersync.ldap.referral": "ignore",
+            "ranger.usersync.group.searchfilter": "",
+            "ranger.usersync.ldap.user.objectclass": "person",
+            "ranger.usersync.logdir": "{{usersync_log_dir}}",
+            "ranger.usersync.ldap.user.searchfilter": "",
+            "ranger.usersync.ldap.groupname.caseconversion": "none",
+            "ranger.usersync.ldap.ldapbindpassword": "",
+            "ranger.usersync.unix.minUserId": "500",
+            "ranger.usersync.policymanager.maxrecordsperapicall": "1000",
+            "ranger.usersync.group.nameattribute": "",
+            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password",
+            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks",
+            "ranger.usersync.user.searchenabled": "false",
+            "ranger.usersync.group.usermapsyncenabled": "true",
+            "ranger.usersync.ldap.bindkeystore": "",
+            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof",
+            "ranger.usersync.kerberos.keytab": "",
+            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe",
+            "ranger.usersync.group.objectclass": "",
+            "ranger.usersync.ldap.user.searchscope": "sub",
+            "ranger.usersync.unix.password.file": "/etc/passwd",
+            "ranger.usersync.ldap.user.nameattribute": "",
+            "ranger.usersync.pagedresultsenabled": "true",
+            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}",
+            "ranger.usersync.group.search.first.enabled": "false",
+            "ranger.usersync.group.searchenabled": "false",
+            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder",
+            "ranger.usersync.ssl": "true",
+            "ranger.usersync.ldap.url": "",
+            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org",
+            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+            "ranger.usersync.ldap.user.searchbase": "",
+            "ranger.usersync.ldap.username.caseconversion": "none",
+            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+            "ranger.usersync.keystore.password": "UnIx529p",
+            "ranger.usersync.unix.group.file": "/etc/group",
+            "ranger.usersync.filesource.file": "/tmp/usergroup.txt",
+            "ranger.usersync.group.searchscope": "",
+            "ranger.usersync.truststore.password": "changeit",
+            "ranger.usersync.enabled": "true",
+            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000",
             "ranger.usersync.filesource.text.delimiter": ","
-        }, 
+        },
         "hdfs-site": {
-            "dfs.namenode.checkpoint.period": "21600", 
-            "dfs.namenode.avoid.write.stale.datanode": "true", 
-            "dfs.namenode.startup.delay.block.deletion.sec": "3600", 
-            "dfs.namenode.checkpoint.txns": "1000000", 
-            "dfs.content-summary.limit": "5000", 
-            "dfs.support.append": "true", 
-            "dfs.datanode.address": "0.0.0.0:50010", 
-            "dfs.cluster.administrators": " hdfs", 
-            "dfs.namenode.audit.log.async": "true", 
-            "dfs.datanode.balance.bandwidthPerSec": "6250000", 
-            "dfs.namenode.safemode.threshold-pct": "1", 
-            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", 
-            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020", 
-            "dfs.permissions.enabled": "true", 
-            "dfs.client.read.shortcircuit": "true", 
-            "dfs.https.port": "50470", 
-            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470", 
-            "nfs.file.dump.dir": "/tmp/.hdfs-nfs", 
-            "dfs.blocksize": "134217728", 
-            "dfs.blockreport.initialDelay": "120", 
-            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode", 
-            "dfs.namenode.fslock.fair": "false", 
-            "dfs.datanode.max.transfer.threads": "4096", 
-            "dfs.heartbeat.interval": "3", 
-            "dfs.replication": "3", 
-            "dfs.namenode.handler.count": "50", 
-            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary", 
-            "fs.permissions.umask-mode": "022", 
-            "dfs.namenode.stale.datanode.interval": "30000", 
-            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
-            "dfs.datanode.failed.volumes.tolerated": "0", 
-            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data", 
-            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070", 
-            "dfs.webhdfs.enabled": "true", 
-            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding", 
-            "dfs.namenode.accesstime.precision": "0", 
-            "dfs.datanode.https.address": "0.0.0.0:50475", 
-            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
-            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090", 
-            "nfs.exports.allowed.hosts": "* rw", 
-            "dfs.datanode.http.address": "0.0.0.0:50075", 
-            "dfs.datanode.du.reserved": "33011188224", 
-            "dfs.client.read.shortcircuit.streams.cache.size": "4096", 
-            "dfs.http.policy": "HTTP_ONLY", 
-            "dfs.block.access.token.enable": "true", 
-            "dfs.client.retry.policy.enabled": "false", 
-            "dfs.namenode.name.dir.restore": "true", 
-            "dfs.permissions.superusergroup": "hdfs", 
-            "dfs.journalnode.https-address": "0.0.0.0:8481", 
-            "dfs.journalnode.http-address": "0.0.0.0:8480", 
-            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
-            "dfs.namenode.avoid.read.stale.datanode": "true", 
-            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
-            "dfs.datanode.data.dir.perm": "750", 
-            "dfs.encryption.key.provider.uri": "", 
-            "dfs.replication.max": "50", 
+            "dfs.namenode.checkpoint.period": "21600",
+            "dfs.namenode.avoid.write.stale.datanode": "true",
+            "dfs.namenode.startup.delay.block.deletion.sec": "3600",
+            "dfs.namenode.checkpoint.txns": "1000000",
+            "dfs.content-summary.limit": "5000",
+            "dfs.support.append": "true",
+            "dfs.datanode.address": "0.0.0.0:50010",
+            "dfs.cluster.administrators": " hdfs",
+            "dfs.namenode.audit.log.async": "true",
+            "dfs.datanode.balance.bandwidthPerSec": "6250000",
+            "dfs.namenode.safemode.threshold-pct": "1",
+            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
+            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020",
+            "dfs.permissions.enabled": "true",
+            "dfs.client.read.shortcircuit": "true",
+            "dfs.https.port": "50470",
+            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470",
+            "nfs.file.dump.dir": "/tmp/.hdfs-nfs",
+            "dfs.blocksize": "134217728",
+            "dfs.blockreport.initialDelay": "120",
+            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode",
+            "dfs.namenode.fslock.fair": "false",
+            "dfs.datanode.max.transfer.threads": "4096",
+            "dfs.heartbeat.interval": "3",
+            "dfs.replication": "3",
+            "dfs.namenode.handler.count": "50",
+            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary",
+            "fs.permissions.umask-mode": "022",
+            "dfs.namenode.stale.datanode.interval": "30000",
+            "dfs.datanode.ipc.address": "0.0.0.0:8010",
+            "dfs.datanode.failed.volumes.tolerated": "0",
+            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data",
+            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070",
+            "dfs.webhdfs.enabled": "true",
+            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding",
+            "dfs.namenode.accesstime.precision": "0",
+            "dfs.datanode.https.address": "0.0.0.0:50475",
+            "dfs.namenode.write.stale.datanode.ratio": "1.0f",
+            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090",
+            "nfs.exports.allowed.hosts": "* rw",
+            "dfs.datanode.http.address": "0.0.0.0:50075",
+            "dfs.datanode.du.reserved": "33011188224",
+            "dfs.client.read.shortcircuit.streams.cache.size": "4096",
+            "dfs.http.policy": "HTTP_ONLY",
+            "dfs.block.access.token.enable": "true",
+            "dfs.client.retry.policy.enabled": "false",
+            "dfs.namenode.name.dir.restore": "true",
+            "dfs.permissions.superusergroup": "hdfs",
+            "dfs.journalnode.https-address": "0.0.0.0:8481",
+            "dfs.journalnode.http-address": "0.0.0.0:8480",
+            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
+            "dfs.namenode.avoid.read.stale.datanode": "true",
+            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
+            "dfs.datanode.data.dir.perm": "750",
+            "dfs.encryption.key.provider.uri": "",
+            "dfs.replication.max": "50",
             "dfs.namenode.name.dir": "/grid/0/hadoop/hdfs/namenode"
-        }, 
+        },
         "ranger-tagsync-site": {
-            "ranger.tagsync.dest.ranger.ssl.config.filename": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks", 
-            "ranger.tagsync.source.atlasrest.username": "", 
-            "ranger.tagsync.logdir": "/var/log/ranger/tagsync", 
-            "ranger.tagsync.source.atlasrest.download.interval.millis": "", 
-            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks", 
-            "ranger.tagsync.source.file.check.interval.millis": "", 
-            "ranger.tagsync.source.atlasrest.endpoint": "", 
-            "ranger.tagsync.dest.ranger.username": "rangertagsync", 
-            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}", 
-            "ranger.tagsync.kerberos.principal": "", 
-            "ranger.tagsync.kerberos.keytab": "", 
-            "ranger.tagsync.source.atlas": "false", 
-            "ranger.tagsync.source.atlasrest": "false", 
-            "ranger.tagsync.source.file": "false", 
+            "ranger.tagsync.dest.ranger.ssl.config.filename": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks",
+            "ranger.tagsync.source.atlasrest.username": "",
+            "ranger.tagsync.logdir": "/var/log/ranger/tagsync",
+            "ranger.tagsync.source.atlasrest.download.interval.millis": "",
+            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks",
+            "ranger.tagsync.source.file.check.interval.millis": "",
+            "ranger.tagsync.source.atlasrest.endpoint": "",
+            "ranger.tagsync.dest.ranger.username": "rangertagsync",
+            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}",
+            "ranger.tagsync.kerberos.principal": "",
+            "ranger.tagsync.kerberos.keytab": "",
+            "ranger.tagsync.source.atlas": "false",
+            "ranger.tagsync.source.atlasrest": "false",
+            "ranger.tagsync.source.file": "false",
             "ranger.tagsync.source.file.filename": ""
-        }, 
+        },
         "zoo.cfg": {
-            "clientPort": "2181", 
-            "autopurge.purgeInterval": "24", 
-            "syncLimit": "5", 
-            "dataDir": "/grid/0/hadoop/zookeeper", 
-            "initLimit": "10", 
-            "tickTime": "2000", 
+            "clientPort": "2181",
+            "autopurge.purgeInterval": "24",
+            "syncLimit": "5",
+            "dataDir": "/grid/0/hadoop/zookeeper",
+            "initLimit": "10",
+            "tickTime": "2000",
             "autopurge.snapRetainCount": "30"
-        }, 
+        },
         "hadoop-policy": {
-            "security.job.client.protocol.acl": "*", 
-            "security.job.task.protocol.acl": "*", 
-            "security.datanode.protocol.acl": "*", 
-            "security.namenode.protocol.acl": "*", 
-            "security.client.datanode.protocol.acl": "*", 
-            "security.inter.tracker.protocol.acl": "*", 
-            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop", 
-            "security.client.protocol.acl": "*", 
-            "security.refresh.policy.protocol.acl": "hadoop", 
-            "security.admin.operations.protocol.acl": "hadoop", 
+            "security.job.client.protocol.acl": "*",
+            "security.job.task.protocol.acl": "*",
+            "security.datanode.protocol.acl": "*",
+            "security.namenode.protocol.acl": "*",
+            "security.client.datanode.protocol.acl": "*",
+            "security.inter.tracker.protocol.acl": "*",
+            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
+            "security.client.protocol.acl": "*",
+            "security.refresh.policy.protocol.acl": "hadoop",
+            "security.admin.operations.protocol.acl": "hadoop",
             "security.inter.datanode.protocol.acl": "*"
-        }, 
+        },
         "hdfs-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.lo
 gger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender
 .console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logg
 er}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=
 false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# NameNode metrics logging.\n# The default is to retain two namenode-metrics.log files up to 64MB each.\n#\nnamenode.metrics.logger=INFO,NullAppender\nlog4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}\nlog4j.additivity.NameNodeMetricsLog=false\nlog4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log\nlog4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n\nlog4j.appender.NNMETRICSRFA.MaxBackupIndex=1\nlog4j.appender.NNMETRICSRFA.MaxFileSize=64MB\n\n#\n# mapred audit logging\n#\nmapred
 .audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging leve
 ls\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateCh
 ange=WARN"
-        }, 
+        },
         "ranger-hdfs-plugin-properties": {
-            "hadoop.rpc.protection": "authentication", 
-            "ranger-hdfs-plugin-enabled": "No", 
-            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
-            "policy_user": "ambari-qa", 
-            "common.name.for.certificate": "", 
+            "hadoop.rpc.protection": "authentication",
+            "ranger-hdfs-plugin-enabled": "No",
+            "REPOSITORY_CONFIG_USERNAME": "hadoop",
+            "policy_user": "ambari-qa",
+            "common.name.for.certificate": "",
             "REPOSITORY_CONFIG_PASSWORD": "hadoop"
-        }, 
+        },
         "core-site": {
-            "hadoop.proxyuser.root.hosts": "c6401.ambari.apache.org", 
-            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", 
-            "fs.trash.interval": "360", 
-            "ha.failover-controller.active-standby-elector.zk.op.retries": "120", 
-            "hadoop.http.authentication.simple.anonymous.allowed": "true", 
-            "hadoop.security.authentication": "simple", 
-            "hadoop.proxyuser.root.groups": "*", 
-            "ipc.client.connection.maxidletime": "30000", 
-            "hadoop.security.key.provider.path": "", 
-            "mapreduce.jobtracker.webinterface.trusted": "false", 
-            "hadoop.security.authorization": "false", 
-            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py", 
-            "ipc.server.tcpnodelay": "true", 
-            "ipc.client.connect.max.retries": "50", 
-            "hadoop.security.auth_to_local": "DEFAULT", 
-            "io.file.buffer.size": "131072", 
-            "hadoop.proxyuser.hdfs.hosts": "*", 
-            "hadoop.proxyuser.hdfs.groups": "*", 
-            "ipc.client.idlethreshold": "8000", 
-            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020", 
+            "hadoop.proxyuser.root.hosts": "c6401.ambari.apache.org",
+            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
+            "fs.trash.interval": "360",
+            "ha.failover-controller.active-standby-elector.zk.op.retries": "120",
+            "hadoop.http.authentication.simple.anonymous.allowed": "true",
+            "hadoop.security.authentication": "simple",
+            "hadoop.proxyuser.root.groups": "*",
+            "ipc.client.connection.maxidletime": "30000",
+            "hadoop.security.key.provider.path": "",
+            "mapreduce.jobtracker.webinterface.trusted": "false",
+            "hadoop.security.authorization": "false",
+            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py",
+            "ipc.server.tcpnodelay": "true",
+            "ipc.client.connect.max.retries": "50",
+            "hadoop.security.auth_to_local": "DEFAULT",
+            "io.file.buffer.size": "131072",
+            "hadoop.proxyuser.hdfs.hosts": "*",
+            "hadoop.proxyuser.hdfs.groups": "*",
+            "ipc.client.idlethreshold": "8000",
+            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020",
             "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec"
-        }, 
+        },
         "hadoop-env": {
-            "keyserver_port": "", 
-            "proxyuser_group": "users", 
-            "hdfs_user_nproc_limit": "65536", 
-            "hdfs_log_dir_prefix": "/var/log/hadoop", 
-            "hdfs_user_nofile_limit": "128000", 
-            "hdfs_user": "hdfs", 
-            "keyserver_host": " ", 
-            "namenode_opt_maxnewsize": "128m", 
-            "namenode_opt_maxpermsize": "256m", 
-            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
 {{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
 node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
 NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
 N_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
 ing priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n  for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n  do\n    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n  done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
 ECURE_DN_USER\" ]; then\n  ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}", 
-            "namenode_heapsize": "1024m", 
-            "namenode_opt_newsize": "128m", 
-            "nfsgateway_heapsize": "1024", 
-            "dtnode_heapsize": "1024m", 
-            "hadoop_root_logger": "INFO,RFA", 
-            "hadoop_heapsize": "1024", 
-            "hadoop_pid_dir_prefix": "/var/run/hadoop", 
-            "namenode_opt_permsize": "128m", 
+            "keyserver_port": "",
+            "proxyuser_group": "users",
+            "hdfs_user_nproc_limit": "65536",
+            "hdfs_log_dir_prefix": "/var/log/hadoop",
+            "hdfs_user_nofile_limit": "128000",
+            "hdfs_user": "hdfs",
+            "keyserver_host": " ",
+            "namenode_opt_maxnewsize": "128m",
+            "namenode_opt_maxpermsize": "256m",
+            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
 {{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
 node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
 NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
 N_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
 ing priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n  for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n  do\n    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n  done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
 ECURE_DN_USER\" ]; then\n  ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}",
+            "namenode_heapsize": "1024m",
+            "namenode_opt_newsize": "128m",
+            "nfsgateway_heapsize": "1024",
+            "dtnode_heapsize": "1024m",
+            "hadoop_root_logger": "INFO,RFA",
+            "hadoop_heapsize": "1024",
+            "hadoop_pid_dir_prefix": "/var/run/hadoop",
+            "namenode_opt_permsize": "128m",
             "hdfs_tmp_dir": "/tmp"
-        }, 
+        },
         "zookeeper-log4j": {
             "content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: console appender only\nlog4j.rootLogger=INFO, CONSOLE\n\n# Example with rolling log file\n#log4
 j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE\n\n# Example with rolling log file and tracing\n#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE\n\n#\n# Log INFO level and above messages to the console\n#\nlog4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\nlog4j.appender.CONSOLE.Threshold=INFO\nlog4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n#\n# Add ROLLINGFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender\nlog4j.appender.ROLLINGFILE.Threshold=DEBUG\nlog4j.appender.ROLLINGFILE.File=zookeeper.log\n\n# Max log file size of 10MB\nlog4j.appender.ROLLINGFILE.MaxFileSize=10MB\n# uncomment the next line to limit number of backup files\n#log4j.appender.ROLLINGFILE.MaxBackupIndex=10\n\nlog4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.ROLLIN
 GFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n\n#\n# Add TRACEFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.TRACEFILE=org.apache.log4j.FileAppender\nlog4j.appender.TRACEFILE.Threshold=TRACE\nlog4j.appender.TRACEFILE.File=zookeeper_trace.log\n\nlog4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout\n### Notice we are including log4j's NDC here (%x)\nlog4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n"
-        }, 
+        },
         "ssl-server": {
-            "ssl.server.keystore.location": "/etc/security/serverKeys/keystore.jks", 
-            "ssl.server.keystore.keypassword": "bigdata", 
-            "ssl.server.truststore.location": "/etc/security/serverKeys/all.jks", 
-            "ssl.server.keystore.password": "bigdata", 
-            "ssl.server.truststore.password": "bigdata", 
-            "ssl.server.truststore.type": "jks", 
-            "ssl.server.keystore.type": "jks", 
+            "ssl.server.keystore.location": "/etc/security/serverKeys/keystore.jks",
+            "ssl.server.keystore.keypassword": "bigdata",
+            "ssl.server.truststore.location": "/etc/security/serverKeys/all.jks",
+            "ssl.server.keystore.password": "bigdata",
+            "ssl.server.truststore.password": "bigdata",
+            "ssl.server.truststore.type": "jks",
+            "ssl.server.keystore.type": "jks",
             "ssl.server.truststore.reload.interval": "10000"
-        }, 
-        "ranger-site": {}, 
+        },
+        "ranger-site": {},
         "admin-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nlog4j.rootLogger = warn,xa_log_appender\n\n\n# xa_logger\nlog4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.xa_log_app
 ender.file=${logdir}/xa_portal.log\nlog4j.appender.xa_log_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.xa_log_appender.append=true\nlog4j.appender.xa_log_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.xa_log_appender.layout.ConversionPattern=%d [%t] %-5p %C{6} (%F:%L) - %m%n\n# xa_log_appender : category and additivity\nlog4j.category.org.springframework=warn,xa_log_appender\nlog4j.additivity.org.springframework=false\n\nlog4j.category.org.apache.ranger=info,xa_log_appender\nlog4j.additivity.org.apache.ranger=false\n\nlog4j.category.xa=info,xa_log_appender\nlog4j.additivity.xa=false\n\n# perf_logger\nlog4j.appender.perf_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.perf_appender.file=${logdir}/ranger_admin_perf.log\nlog4j.appender.perf_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.perf_appender.append=true\nlog4j.appender.perf_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.perf_appender.layout.ConversionPattern=%d [%t] 
 %m%n\n\n\n# sql_appender\nlog4j.appender.sql_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.sql_appender.file=${logdir}/xa_portal_sql.log\nlog4j.appender.sql_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.sql_appender.append=true\nlog4j.appender.sql_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.sql_appender.layout.ConversionPattern=%d [%t] %-5p %C{6} (%F:%L) - %m%n\n\n# sql_appender : category and additivity\nlog4j.category.org.hibernate.SQL=warn,sql_appender\nlog4j.additivity.org.hibernate.SQL=false\n\nlog4j.category.jdbc.sqlonly=fatal,sql_appender\nlog4j.additivity.jdbc.sqlonly=false\n\nlog4j.category.jdbc.sqltiming=warn,sql_appender\nlog4j.additivity.jdbc.sqltiming=false\n\nlog4j.category.jdbc.audit=fatal,sql_appender\nlog4j.additivity.jdbc.audit=false\n\nlog4j.category.jdbc.resultset=fatal,sql_appender\nlog4j.additivity.jdbc.resultset=false\n\nlog4j.category.jdbc.connection=fatal,sql_appender\nlog4j.additivity.jdbc.connection=false"
-        }, 
+        },
         "tagsync-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/tagsync
 .log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n"
-        }, 
+        },
         "ranger-hdfs-security": {
-            "ranger.plugin.hdfs.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient", 
-            "ranger.plugin.hdfs.service.name": "{{repo_name}}", 
-            "ranger.plugin.hdfs.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache", 
-            "ranger.plugin.hdfs.policy.pollIntervalMs": "30000", 
-            "ranger.plugin.hdfs.policy.rest.url": "{{policymgr_mgr_url}}", 
-            "ranger.plugin.hdfs.policy.rest.ssl.config.file": "/etc/hadoop/conf/ranger-policymgr-ssl.xml", 
+            "ranger.plugin.hdfs.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient",
+            "ranger.plugin.hdfs.service.name": "{{repo_name}}",
+            "ranger.plugin.hdfs.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache",
+            "ranger.plugin.hdfs.policy.pollIntervalMs": "30000",
+            "ranger.plugin.hdfs.policy.rest.url": "{{policymgr_mgr_url}}",
+            "ranger.plugin.hdfs.policy.rest.ssl.config.file": "/etc/hadoop/conf/ranger-policymgr-ssl.xml",
             "xasecure.add-hadoop-authorization": "true"
-        }, 
-        "usersync-properties": {}, 
+        },
+        "usersync-properties": {},
         "zookeeper-env": {
-            "zk_log_dir": "/var/log/zookeeper", 
-            "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}", 
-            "zk_server_heapsize": "1024m", 
-            "zk_pid_dir": "/var/run/zookeeper", 
+            "zk_log_dir": "/var/log/zookeeper",
+            "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}",
+            "zk_server_heapsize": "1024m",
+            "zk_pid_dir": "/var/run/zookeeper",
             "zk_user": "zookeeper"
         },
         "infra-solr-env": {
@@ -651,7 +651,7 @@
             "infra_solr_kerberos_name_rules": "DEFAULT",
             "infra_solr_user": "infra-solr",
             "infra_solr_maxmem": "1024",
-            "content": "#!/bin/bash\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LI

<TRUNCATED>

[28/36] ambari git commit: AMBARI-21392. Cleanup relevant Kerberos identities when a service is removed (amagyar)

Posted by lp...@apache.org.
AMBARI-21392. Cleanup relevant Kerberos identities when a service is removed (amagyar)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e767aa44
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e767aa44
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e767aa44

Branch: refs/heads/feature-branch-AMBARI-21307
Commit: e767aa44d872bab9ac0c416684f80b2b662347e5
Parents: 0b397cd
Author: Attila Magyar <am...@hortonworks.com>
Authored: Tue Jul 11 20:10:12 2017 +0200
Committer: Attila Magyar <am...@hortonworks.com>
Committed: Tue Jul 11 20:10:12 2017 +0200

----------------------------------------------------------------------
 .../controller/DeleteIdentityHandler.java       |  77 ++++++++--
 .../server/controller/KerberosHelper.java       |   2 +-
 .../server/controller/KerberosHelperImpl.java   |   5 +-
 .../utilities/KerberosIdentityCleaner.java      |  88 +++--------
 .../utilities/RemovableIdentities.java          | 145 +++++++++++++++++++
 .../controller/utilities/UsedIdentities.java    | 101 +++++++++++++
 .../ServiceComponentUninstalledEvent.java       |   6 +
 .../server/events/ServiceRemovedEvent.java      |  29 ++--
 .../ambari/server/orm/dao/ClusterDAO.java       |  15 ++
 .../orm/entities/ClusterConfigEntity.java       |   3 +
 .../org/apache/ambari/server/state/Cluster.java |   7 +
 .../apache/ambari/server/state/ServiceImpl.java |  14 +-
 .../server/state/cluster/ClusterImpl.java       |   9 ++
 .../AbstractKerberosDescriptorContainer.java    |  12 ++
 .../kerberos/KerberosComponentDescriptor.java   |  15 --
 .../kerberos/KerberosIdentityDescriptor.java    |  14 +-
 .../utilities/KerberosIdentityCleanerTest.java  | 102 +++++++++++--
 .../server/orm/dao/ServiceConfigDAOTest.java    |  12 ++
 18 files changed, 520 insertions(+), 136 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/e767aa44/ambari-server/src/main/java/org/apache/ambari/server/controller/DeleteIdentityHandler.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/DeleteIdentityHandler.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/DeleteIdentityHandler.java
index aa098b6..3329e76 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/DeleteIdentityHandler.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/DeleteIdentityHandler.java
@@ -17,12 +17,13 @@
  */
 package org.apache.ambari.server.controller;
 
-import static com.google.common.collect.Sets.newHashSet;
+import static java.util.Collections.singleton;
+import static java.util.stream.Collectors.toSet;
 import static org.apache.ambari.server.controller.KerberosHelperImpl.BASE_LOG_DIR;
 
 import java.io.File;
+import java.lang.reflect.Type;
 import java.util.ArrayList;
-import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -45,10 +46,15 @@ import org.apache.ambari.server.serveraction.kerberos.KDCType;
 import org.apache.ambari.server.serveraction.kerberos.KerberosOperationHandler;
 import org.apache.ambari.server.serveraction.kerberos.KerberosServerAction;
 import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostServerActionEvent;
 import org.apache.ambari.server.utils.StageUtils;
 
+import com.google.gson.reflect.TypeToken;
+
+
 /**
  * I delete kerberos identities (principals and keytabs) of a given component.
  */
@@ -78,7 +84,7 @@ class DeleteIdentityHandler {
     if (manageIdentities) {
       addPrepareDeleteIdentity(cluster, hostParamsJson, event, commandParameters, stageContainer);
       addDestroyPrincipals(cluster, hostParamsJson, event, commandParameters, stageContainer);
-      addDeleteKeytab(cluster, newHashSet(commandParameters.component.getHostName()), hostParamsJson, commandParameters, stageContainer);
+      addDeleteKeytab(cluster, commandParameters.getAffectedHostNames(), hostParamsJson, commandParameters, stageContainer);
     }
     addFinalize(cluster, hostParamsJson, event, stageContainer, commandParameters);
   }
@@ -172,15 +178,15 @@ class DeleteIdentityHandler {
 
 
   public static class CommandParams {
-    private final Component component;
-    private final List<String> identities;
+    private final List<Component> components;
+    private final Set<String> identities;
     private final String authName;
     private final File dataDirectory;
     private final String defaultRealm;
     private final KDCType kdcType;
 
-    public CommandParams(Component component, List<String> identities, String authName, File dataDirectory, String defaultRealm, KDCType kdcType) {
-      this.component = component;
+    public CommandParams(List<Component> components, Set<String> identities, String authName, File dataDirectory, String defaultRealm, KDCType kdcType) {
+      this.components = components;
       this.identities = identities;
       this.authName = authName;
       this.dataDirectory = dataDirectory;
@@ -194,11 +200,15 @@ class DeleteIdentityHandler {
       commandParameters.put(KerberosServerAction.DEFAULT_REALM, defaultRealm);
       commandParameters.put(KerberosServerAction.KDC_TYPE, kdcType.name());
       commandParameters.put(KerberosServerAction.IDENTITY_FILTER, StageUtils.getGson().toJson(identities));
-      commandParameters.put(KerberosServerAction.COMPONENT_FILTER, StageUtils.getGson().toJson(component));
+      commandParameters.put(KerberosServerAction.COMPONENT_FILTER, StageUtils.getGson().toJson(components));
       commandParameters.put(KerberosServerAction.DATA_DIRECTORY, dataDirectory.getAbsolutePath());
       return commandParameters;
     }
 
+    public Set<String> getAffectedHostNames() {
+      return components.stream().map(Component::getHostName).collect(toSet());
+    }
+
     public String asJson() {
       return StageUtils.getGson().toJson(asMap());
     }
@@ -211,22 +221,57 @@ class DeleteIdentityHandler {
       processServiceComponents(
         getCluster(),
         kerberosDescriptor,
-        Collections.singletonList(getComponentFilter()),
+        componentFilter(),
         getIdentityFilter(),
         dataDirectory(),
-        calculateConfig(kerberosDescriptor),
-        new HashMap<String, Map<String, String>>(),
+        calculateConfig(kerberosDescriptor, serviceNames()),
+        new HashMap<>(),
         false,
-        new HashMap<String, Set<String>>());
+        new HashMap<>());
       return createCommandReport(0, HostRoleStatus.COMPLETED, "{}", actionLog.getStdOut(), actionLog.getStdErr());
     }
 
-    protected Component getComponentFilter() {
-      return StageUtils.getGson().fromJson(getCommandParameterValue(KerberosServerAction.COMPONENT_FILTER), Component.class);
+    private Set<String> serviceNames() {
+      return componentFilter().stream().map(component -> component.getServiceName()).collect(toSet());
+    }
+
+    private List<Component> componentFilter() {
+      Type jsonType = new TypeToken<List<Component>>() {}.getType();
+      return StageUtils.getGson().fromJson(getCommandParameterValue(KerberosServerAction.COMPONENT_FILTER), jsonType);
+    }
+
+    /**
+     * Cleaning identities is asynchronous, it can happen that the service and its configuration is already deleted at this point.
+     * We're extending the actual config with the properties of the latest deleted configuration of the service.
+     * The service configuration is needed because principal names may contain placeholder variables which are replaced based on the service configuration.
+     */
+    private Map<String, Map<String, String>> calculateConfig(KerberosDescriptor kerberosDescriptor, Set<String> serviceNames) throws AmbariException {
+      Map<String, Map<String, String>> actualConfig = getKerberosHelper().calculateConfigurations(getCluster(), null, kerberosDescriptor.getProperties());
+      extendWithDeletedConfigOfService(actualConfig, serviceNames);
+      return actualConfig;
+    }
+
+    private void extendWithDeletedConfigOfService(Map<String, Map<String, String>> configToBeExtended, Set<String> serviceNames) throws AmbariException {
+      Set<String> deletedConfigTypes = serviceNames.stream()
+        .flatMap(serviceName -> configTypesOfService(serviceName).stream())
+        .collect(toSet());
+      for (Config deletedConfig : getCluster().getLatestConfigsWithTypes(deletedConfigTypes)) {
+        configToBeExtended.put(deletedConfig.getType(), deletedConfig.getProperties());
+      }
     }
 
-    private Map<String, Map<String, String>> calculateConfig(KerberosDescriptor kerberosDescriptor) throws AmbariException {
-      return getKerberosHelper().calculateConfigurations(getCluster(), null, kerberosDescriptor.getProperties());
+    private Set<String> configTypesOfService(String serviceName) {
+      try {
+        StackId stackId = getCluster().getCurrentStackVersion();
+        StackServiceRequest stackServiceRequest = new StackServiceRequest(stackId.getStackName(), stackId.getStackVersion(), serviceName);
+        return AmbariServer.getController().getStackServices(singleton(stackServiceRequest)).stream()
+          .findFirst()
+          .orElseThrow(() -> new IllegalArgumentException("Could not find stack service " + serviceName))
+          .getConfigTypes()
+          .keySet();
+      } catch (AmbariException e) {
+        throw new RuntimeException(e);
+      }
     }
 
     private String dataDirectory() {

http://git-wip-us.apache.org/repos/asf/ambari/blob/e767aa44/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
index cc0c048..3819863 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
@@ -233,7 +233,7 @@ public interface KerberosHelper {
                                          RequestStageContainer requestStageContainer, Boolean manageIdentities)
       throws AmbariException, KerberosOperationException;
 
-  void deleteIdentity(Cluster cluster, Component component, List<String> identities) throws AmbariException, KerberosOperationException;
+  void deleteIdentities(Cluster cluster, List<Component> components, Set<String> identities) throws AmbariException, KerberosOperationException;
 
   /**
    * Updates the relevant configurations for the components specified in the service filter.

http://git-wip-us.apache.org/repos/asf/ambari/blob/e767aa44/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
index b30f8f6..e5b7afd 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
@@ -301,17 +301,18 @@ public class KerberosHelperImpl implements KerberosHelper {
    * Deletes the kerberos identities of the given component, even if the component is already deleted.
    */
   @Override
-  public void deleteIdentity(Cluster cluster, Component component, List<String> identities) throws AmbariException, KerberosOperationException {
+  public void deleteIdentities(Cluster cluster, List<Component> components, Set<String> identities) throws AmbariException, KerberosOperationException {
     if (identities.isEmpty()) {
       return;
     }
+    LOG.info("Deleting identities: ", identities);
     KerberosDetails kerberosDetails = getKerberosDetails(cluster, null);
     validateKDCCredentials(kerberosDetails, cluster);
     File dataDirectory = createTemporaryDirectory();
     RoleCommandOrder roleCommandOrder = ambariManagementController.getRoleCommandOrder(cluster);
     DeleteIdentityHandler handler = new DeleteIdentityHandler(customCommandExecutionHelper, configuration.getDefaultServerTaskTimeout(), stageFactory, ambariManagementController);
     DeleteIdentityHandler.CommandParams commandParameters = new DeleteIdentityHandler.CommandParams(
-      component,
+      components,
       identities,
       ambariManagementController.getAuthName(),
       dataDirectory,

http://git-wip-us.apache.org/repos/asf/ambari/blob/e767aa44/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleaner.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleaner.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleaner.java
index 0a8462f..7ec4a6e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleaner.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleaner.java
@@ -17,26 +17,12 @@
  */
 package org.apache.ambari.server.controller.utilities;
 
-import static org.apache.ambari.server.state.kerberos.AbstractKerberosDescriptor.nullToEmpty;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.controller.KerberosHelper;
 import org.apache.ambari.server.events.ServiceComponentUninstalledEvent;
+import org.apache.ambari.server.events.ServiceRemovedEvent;
 import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
-import org.apache.ambari.server.serveraction.kerberos.Component;
 import org.apache.ambari.server.serveraction.kerberos.KerberosMissingAdminCredentialsException;
-import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.SecurityType;
-import org.apache.ambari.server.state.Service;
-import org.apache.ambari.server.state.kerberos.KerberosComponentDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosIdentityDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosServiceDescriptor;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -69,67 +55,29 @@ public class KerberosIdentityCleaner {
   @Subscribe
   public void componentRemoved(ServiceComponentUninstalledEvent event) throws KerberosMissingAdminCredentialsException {
     try {
-      Cluster cluster = clusters.getCluster(event.getClusterId());
-      if (cluster.getSecurityType() != SecurityType.KERBEROS) {
-        return;
-      }
-      KerberosComponentDescriptor descriptor = componentDescriptor(cluster, event.getServiceName(), event.getComponentName());
-      if (descriptor == null) {
-        LOG.info("No kerberos descriptor for {}", event);
-        return;
-      }
-      List<String> identitiesToRemove = identityNames(skipSharedIdentities(descriptor.getIdentitiesSkipReferences(), cluster, event));
-      LOG.info("Deleting identities {} after an event {}",  identitiesToRemove, event);
-      kerberosHelper.deleteIdentity(cluster, new Component(event.getHostName(), event.getServiceName(), event.getComponentName()), identitiesToRemove);
+      LOG.info("Removing identities after {}", event);
+      RemovableIdentities
+        .ofComponent(clusters.getCluster(event.getClusterId()), event, kerberosHelper)
+        .remove(kerberosHelper);
     } catch (Exception e) {
       LOG.error("Error while deleting kerberos identity after an event: " + event, e);
     }
   }
 
-  private KerberosComponentDescriptor componentDescriptor(Cluster cluster, String serviceName, String componentName) throws AmbariException {
-    KerberosServiceDescriptor serviceDescriptor = kerberosHelper.getKerberosDescriptor(cluster).getService(serviceName);
-    return serviceDescriptor == null ? null : serviceDescriptor.getComponent(componentName);
-  }
-
-  private List<String> identityNames(List<KerberosIdentityDescriptor> identities) {
-    List<String> result = new ArrayList<>();
-    for (KerberosIdentityDescriptor each : identities) { result.add(each.getName()); }
-    return result;
-  }
-
-  private List<KerberosIdentityDescriptor> skipSharedIdentities(List<KerberosIdentityDescriptor> candidates, Cluster cluster, ServiceComponentUninstalledEvent event) throws AmbariException {
-    List<KerberosIdentityDescriptor> activeIdentities = activeIdentities(cluster, kerberosHelper.getKerberosDescriptor(cluster), event);
-    List<KerberosIdentityDescriptor> result = new ArrayList<>();
-    for (KerberosIdentityDescriptor candidate : candidates) {
-      if (!candidate.isShared(activeIdentities)) {
-        result.add(candidate);
-      } else {
-        LOG.debug("Skip removing shared identity: {}", candidate.getName());
-      }
-    }
-    return result;
-  }
-
-  private List<KerberosIdentityDescriptor> activeIdentities(Cluster cluster, KerberosDescriptor root, ServiceComponentUninstalledEvent event) {
-    List<KerberosIdentityDescriptor> result = new ArrayList<>();
-    result.addAll(nullToEmpty(root.getIdentities()));
-    for (Map.Entry<String, Service> serviceEntry : cluster.getServices().entrySet()) {
-      KerberosServiceDescriptor serviceDescriptor = root.getService(serviceEntry.getKey());
-      if (serviceDescriptor == null) {
-        continue;
-      }
-      result.addAll(nullToEmpty(serviceDescriptor.getIdentities()));
-      for (String componentName : serviceEntry.getValue().getServiceComponents().keySet()) {
-        if (!sameComponent(event, componentName, serviceEntry.getKey())) {
-          result.addAll(serviceDescriptor.getComponentIdentities(componentName));
-        }
-      }
+  /**
+   * Removes kerberos identities (principals and keytabs) after a service was uninstalled.
+   * Keeps the identity if either the principal or the keytab is used by an other service
+   */
+  @Subscribe
+  public void serviceRemoved(ServiceRemovedEvent event) {
+    try {
+      LOG.info("Removing identities after {}", event);
+      RemovableIdentities
+        .ofService(clusters.getCluster(event.getClusterId()), event, kerberosHelper)
+        .remove(kerberosHelper);
+    } catch (Exception e) {
+      LOG.error("Error while deleting kerberos identity after an event: " + event, e);
     }
-    return result;
-  }
-
-  private boolean sameComponent(ServiceComponentUninstalledEvent event, String componentName, String serviceName) {
-    return event.getServiceName().equals(serviceName) && event.getComponentName().equals(componentName);
   }
 }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e767aa44/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/RemovableIdentities.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/RemovableIdentities.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/RemovableIdentities.java
new file mode 100644
index 0000000..d4bb501
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/RemovableIdentities.java
@@ -0,0 +1,145 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.controller.utilities;
+
+import static java.util.Collections.emptyList;
+import static java.util.Collections.singletonList;
+import static java.util.stream.Collectors.toList;
+import static java.util.stream.Collectors.toSet;
+
+import java.util.Collection;
+import java.util.List;
+import java.util.Objects;
+import java.util.Set;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.controller.KerberosHelper;
+import org.apache.ambari.server.controller.utilities.UsedIdentities.ComponentExclude;
+import org.apache.ambari.server.controller.utilities.UsedIdentities.ServiceExclude;
+import org.apache.ambari.server.events.ServiceComponentUninstalledEvent;
+import org.apache.ambari.server.events.ServiceRemovedEvent;
+import org.apache.ambari.server.serveraction.kerberos.Component;
+import org.apache.ambari.server.serveraction.kerberos.KerberosOperationException;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.SecurityType;
+import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.kerberos.KerberosIdentityDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosServiceDescriptor;
+
+/**
+ * I represent a group of kerberos identities which are to be deleted after a service or a component was removed.
+ * My instances provide methods for removing the candidates, excluding those that are still used by other components or services.
+ */
+public class RemovableIdentities {
+  private final List<KerberosIdentityDescriptor> candidateIdentities;
+  private final UsedIdentities usedIdentities;
+  private final Cluster cluster;
+  private final List<Component> components;
+
+  /**
+   * Populate the identities with the identities of the removed service and its components
+   */
+  public static RemovableIdentities ofService(Cluster cluster, ServiceRemovedEvent event, KerberosHelper kerberosHelper) throws AmbariException {
+    if (cluster.getSecurityType() != SecurityType.KERBEROS) {
+      return RemovableIdentities.none();
+    }
+    KerberosServiceDescriptor serviceDescriptor = kerberosHelper.getKerberosDescriptor(cluster).getService(event.getServiceName());
+    if (serviceDescriptor == null) {
+      return RemovableIdentities.none();
+    }
+    UsedIdentities usedIdentities = UsedIdentities.populate(cluster, excludeService(event.getServiceName()), ComponentExclude.NONE, kerberosHelper);
+    return new RemovableIdentities(
+      serviceDescriptor.getIdentitiesSkipReferences(),
+      usedIdentities,
+      cluster,
+      event.getComponents());
+  }
+
+  /**
+   * Populate the identities with the identities of the removed component
+   */
+  public static RemovableIdentities ofComponent(Cluster cluster, ServiceComponentUninstalledEvent event, KerberosHelper kerberosHelper) throws AmbariException {
+    if (cluster.getSecurityType() != SecurityType.KERBEROS) {
+      return RemovableIdentities.none();
+    }
+    KerberosServiceDescriptor serviceDescriptor = kerberosHelper.getKerberosDescriptor(cluster).getService(event.getServiceName());
+    if (serviceDescriptor == null) {
+      return RemovableIdentities.none();
+    }
+    UsedIdentities usedIdentities = UsedIdentities.populate(
+      cluster,
+      ServiceExclude.NONE,
+      excludeComponent(event.getServiceName(), event.getComponentName(), event.getHostName()),
+      kerberosHelper);
+    return new RemovableIdentities(
+      componentIdentities(singletonList(event.getComponentName()), serviceDescriptor),
+      usedIdentities,
+      cluster,
+      singletonList(event.getComponent()));
+  }
+
+  /**
+   * Populates the identities with an empty list
+   */
+  public static RemovableIdentities none() throws AmbariException {
+    return new RemovableIdentities(emptyList(), UsedIdentities.none(), null, null);
+  }
+
+  private static ServiceExclude excludeService(String excludedServiceName) {
+    return serviceName -> excludedServiceName.equals(serviceName);
+  }
+
+  private static ComponentExclude excludeComponent(String excludedServiceName, String excludedComponentName, String excludedHostName) {
+    return (serviceName, componentName, hosts) -> excludedServiceName.equals(serviceName)
+      && excludedComponentName.equals(componentName)
+      && hostNames(hosts).equals(singletonList(excludedHostName));
+  }
+
+  private static List<String> hostNames(Collection<ServiceComponentHost> hosts) {
+    return hosts.stream().map(ServiceComponentHost::getHostName).collect(toList());
+  }
+
+  private static List<KerberosIdentityDescriptor> componentIdentities(List<String> componentNames, KerberosServiceDescriptor serviceDescriptor) throws AmbariException {
+    return componentNames.stream()
+      .map(componentName -> serviceDescriptor.getComponent(componentName))
+      .filter(Objects::nonNull)
+      .flatMap(componentDescriptor -> componentDescriptor.getIdentitiesSkipReferences().stream())
+      .collect(toList());
+  }
+
+  private RemovableIdentities(List<KerberosIdentityDescriptor> candidateIdentities, UsedIdentities usedIdentities, Cluster cluster, List<Component> components) {
+    this.candidateIdentities = candidateIdentities;
+    this.usedIdentities = usedIdentities;
+    this.cluster = cluster;
+    this.components = components;
+  }
+
+  /**
+   * Remove all identities which are not used by other services or components
+   */
+  public void remove(KerberosHelper kerberosHelper) throws AmbariException, KerberosOperationException {
+    Set<String> identitiesToRemove = skipUsed().stream().map(KerberosIdentityDescriptor::getName).collect(toSet());
+    if (!identitiesToRemove.isEmpty()) {
+      kerberosHelper.deleteIdentities(cluster, components, identitiesToRemove);
+    }
+  }
+
+  private List<KerberosIdentityDescriptor> skipUsed() throws AmbariException {
+    return candidateIdentities.stream().filter(each -> !usedIdentities.contains(each)).collect(toList());
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/e767aa44/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/UsedIdentities.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/UsedIdentities.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/UsedIdentities.java
new file mode 100644
index 0000000..46f5642
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/UsedIdentities.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.controller.utilities;
+
+import static java.util.Collections.emptyList;
+import static java.util.stream.Collectors.toList;
+import static org.apache.ambari.server.state.kerberos.AbstractKerberosDescriptor.nullToEmpty;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.controller.KerberosHelper;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.ServiceComponent;
+import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosIdentityDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosServiceDescriptor;
+
+/**
+ * I represent a group of identities that are still used by any non-excluded component or service
+ */
+public class UsedIdentities {
+  private final List<KerberosIdentityDescriptor> used;
+
+  public static UsedIdentities none() throws AmbariException {
+    return new UsedIdentities(emptyList());
+  }
+
+  /**
+   * Get all identities of the installed services and components. Skip service or component that is excluded.
+   */
+  public static UsedIdentities populate(Cluster cluster, ServiceExclude serviceExclude, ComponentExclude componentExclude, KerberosHelper kerberosHelper) throws AmbariException {
+    List<KerberosIdentityDescriptor> result = new ArrayList<>();
+    KerberosDescriptor root = kerberosHelper.getKerberosDescriptor(cluster);
+    result.addAll(nullToEmpty(root.getIdentities()));
+    for (Service service : cluster.getServices().values()) {
+      if (serviceExclude.shouldExclude(service.getName())) {
+        continue;
+      }
+      KerberosServiceDescriptor serviceDescriptor = root.getService(service.getName());
+      if (serviceDescriptor != null) {
+        result.addAll(nullToEmpty(serviceDescriptor.getIdentities()));
+        result.addAll(nullToEmpty(componentIdentities(serviceDescriptor, service, componentExclude)));
+      }
+    }
+    return new UsedIdentities(result);
+  }
+
+  private static List<KerberosIdentityDescriptor> componentIdentities(KerberosServiceDescriptor serviceDescriptor, Service service, ComponentExclude componentExclude) {
+    return service.getServiceComponents().values()
+      .stream()
+      .filter(component -> !isComponentExcluded(service, componentExclude, component))
+      .flatMap(component -> serviceDescriptor.getComponentIdentities(component.getName()).stream())
+      .collect(toList());
+  }
+
+  private static boolean isComponentExcluded(Service service, ComponentExclude componentExclude, ServiceComponent component) {
+    return component.getServiceComponentHosts().isEmpty()
+      || componentExclude.shouldExclude(service.getName(), component.getName(), component.getServiceComponentHosts().values());
+  }
+
+  private UsedIdentities(List<KerberosIdentityDescriptor> used) {
+    this.used = used;
+  }
+
+  /**
+   * @return true if there is an identity in the used list with the same keytab or principal name than the given identity
+   */
+  public boolean contains(KerberosIdentityDescriptor identity) {
+    return used.stream().anyMatch(each -> identity.isShared(each));
+  }
+
+  public interface ServiceExclude {
+    boolean shouldExclude(String serviceName);
+    ServiceExclude NONE = serviceName -> false; // default implementation, exclude nothing
+  }
+
+  public interface ComponentExclude {
+    boolean shouldExclude(String serviceName, String componentName, Collection<ServiceComponentHost> hosts);
+    ComponentExclude NONE = (serviceName, componentName, hosts) -> false; // default implementation, exclude nothing
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/e767aa44/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceComponentUninstalledEvent.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceComponentUninstalledEvent.java b/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceComponentUninstalledEvent.java
index 5b55339..8acc401 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceComponentUninstalledEvent.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceComponentUninstalledEvent.java
@@ -17,6 +17,8 @@
  */
 package org.apache.ambari.server.events;
 
+import org.apache.ambari.server.serveraction.kerberos.Component;
+
 /**
  * The {@link ServiceComponentUninstalledEvent} class is fired when a service
  * component is successfully uninstalled.
@@ -85,4 +87,8 @@ public class ServiceComponentUninstalledEvent extends ServiceEvent {
     buffer.append("}");
     return buffer.toString();
   }
+
+  public Component getComponent() {
+    return new Component(getHostName(), getServiceName(), getComponentName());
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/e767aa44/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceRemovedEvent.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceRemovedEvent.java b/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceRemovedEvent.java
index aca00a8..de96342 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceRemovedEvent.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceRemovedEvent.java
@@ -17,23 +17,24 @@
  */
 package org.apache.ambari.server.events;
 
+import static java.util.stream.Collectors.toList;
+
+import java.util.List;
+
+import org.apache.ambari.server.serveraction.kerberos.Component;
+
 /**
  * The {@link ServiceRemovedEvent} class is fired when a service is successfully
  * removed.
  */
 public class ServiceRemovedEvent extends ServiceEvent {
-  /**
-   * Constructor.
-   *
-   * @param clusterId
-   * @param stackName
-   * @param stackVersion
-   * @param serviceName
-   */
+  private final List<Component> components;
+
   public ServiceRemovedEvent(long clusterId, String stackName,
-      String stackVersion, String serviceName) {
+                             String stackVersion, String serviceName, List<Component> components) {
     super(AmbariEventType.SERVICE_REMOVED_SUCCESS, clusterId, stackName,
-        stackVersion, serviceName);
+      stackVersion, serviceName);
+    this.components = components;
   }
 
   /**
@@ -49,4 +50,12 @@ public class ServiceRemovedEvent extends ServiceEvent {
     buffer.append("}");
     return buffer.toString();
   }
+
+  public List<Component> getComponents() {
+    return components;
+  }
+
+  public List<String> getComponentNames() {
+    return components.stream().map(Component::getServiceComponentName).collect(toList());
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/e767aa44/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterDAO.java
index a23b914..d0f8d0b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterDAO.java
@@ -217,6 +217,21 @@ public class ClusterDAO {
   }
 
   /**
+   * Gets the latest configurations for a given stack with any of the given config types.
+   * This method does not take into account the configuration being enabled.
+   */
+  @RequiresSession
+  public List<ClusterConfigEntity> getLatestConfigurationsWithTypes(long clusterId, StackId stackId, Collection<String> configTypes) {
+    StackEntity stackEntity = stackDAO.find(stackId.getStackName(), stackId.getStackVersion());
+    return daoUtils.selectList(
+      entityManagerProvider.get()
+      .createNamedQuery("ClusterConfigEntity.findLatestConfigsByStackWithTypes", ClusterConfigEntity.class)
+      .setParameter("clusterId", clusterId)
+      .setParameter("stack", stackEntity)
+      .setParameter("types", configTypes));
+  }
+
+  /**
    * Gets the latest configurations for a given stack for all of the
    * configurations of the specified cluster.
    *

http://git-wip-us.apache.org/repos/asf/ambari/blob/e767aa44/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntity.java
index 34f3034..3a74367 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntity.java
@@ -62,6 +62,9 @@ import org.apache.commons.lang.builder.EqualsBuilder;
         name = "ClusterConfigEntity.findLatestConfigsByStack",
         query = "SELECT clusterConfig FROM ClusterConfigEntity clusterConfig WHERE clusterConfig.clusterId = :clusterId AND clusterConfig.stack = :stack AND clusterConfig.selectedTimestamp = (SELECT MAX(clusterConfig2.selectedTimestamp) FROM ClusterConfigEntity clusterConfig2 WHERE clusterConfig2.clusterId=:clusterId AND clusterConfig2.stack=:stack AND clusterConfig2.type = clusterConfig.type)"),
     @NamedQuery(
+        name = "ClusterConfigEntity.findLatestConfigsByStackWithTypes",
+        query = "SELECT clusterConfig FROM ClusterConfigEntity clusterConfig WHERE clusterConfig.type IN :types AND clusterConfig.clusterId = :clusterId AND clusterConfig.stack = :stack AND clusterConfig.selectedTimestamp = (SELECT MAX(clusterConfig2.selectedTimestamp) FROM ClusterConfigEntity clusterConfig2 WHERE clusterConfig2.clusterId=:clusterId AND clusterConfig2.stack=:stack AND clusterConfig2.type = clusterConfig.type)"),
+    @NamedQuery(
         name = "ClusterConfigEntity.findNotMappedClusterConfigsToService",
         query = "SELECT clusterConfig FROM ClusterConfigEntity clusterConfig WHERE clusterConfig.serviceConfigEntities IS EMPTY AND clusterConfig.type != 'cluster-env'"),
     @NamedQuery(

http://git-wip-us.apache.org/repos/asf/ambari/blob/e767aa44/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java b/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
index b4f7120..9597ba1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
@@ -267,6 +267,13 @@ public interface Cluster {
   Config getConfig(String configType, String versionTag);
 
   /**
+   * Get latest (including inactive ones) configurations with any of the given types.
+   * This method does not take into account the configuration being enabled.
+   * @return the list of configurations with the given types
+   */
+  List<Config> getLatestConfigsWithTypes(Collection<String> types);
+
+  /**
    * Gets the specific config that matches the specified type and version.  This not
    * necessarily a DESIRED configuration that applies to a cluster.
    * @param configType  the config type to find

http://git-wip-us.apache.org/repos/asf/ambari/blob/e767aa44/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
index 5084703..74d79c8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
@@ -51,6 +51,7 @@ import org.apache.ambari.server.orm.entities.ServiceConfigEntity;
 import org.apache.ambari.server.orm.entities.ServiceDesiredStateEntity;
 import org.apache.ambari.server.orm.entities.ServiceDesiredStateEntityPK;
 import org.apache.ambari.server.orm.entities.StackEntity;
+import org.apache.ambari.server.serveraction.kerberos.Component;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -588,6 +589,7 @@ public class ServiceImpl implements Service {
   @Override
   @Transactional
   public void delete() throws AmbariException {
+    List<Component> components = getComponents(); // XXX temporal coupling, need to call this BEFORE deletingAllComponents
     deleteAllComponents();
     deleteAllServiceConfigs();
 
@@ -601,11 +603,21 @@ public class ServiceImpl implements Service {
     }
 
     ServiceRemovedEvent event = new ServiceRemovedEvent(getClusterId(), stackId.getStackName(),
-        stackId.getStackVersion(), getName());
+        stackId.getStackVersion(), getName(), components);
 
     eventPublisher.publish(event);
   }
 
+  private List<Component> getComponents() {
+    List<Component> result = new ArrayList<>();
+    for (ServiceComponent component : getServiceComponents().values()) {
+      for (ServiceComponentHost host : component.getServiceComponentHosts().values()) {
+        result.add(new Component(host.getHostName(), getName(), component.getName()));
+      }
+    }
+    return result;
+  }
+
   @Transactional
   protected void removeEntities() throws AmbariException {
     serviceDesiredStateDAO.removeByPK(serviceDesiredStateEntityPK);

http://git-wip-us.apache.org/repos/asf/ambari/blob/e767aa44/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
index 06b6217..c950d67 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
@@ -35,6 +35,7 @@ import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.CopyOnWriteArrayList;
 import java.util.concurrent.locks.ReadWriteLock;
+import java.util.stream.Collectors;
 
 import javax.annotation.Nullable;
 import javax.persistence.EntityManager;
@@ -1125,6 +1126,14 @@ public class ClusterImpl implements Cluster {
   }
 
   @Override
+  public List<Config> getLatestConfigsWithTypes(Collection<String> types) {
+    return clusterDAO.getLatestConfigurationsWithTypes(clusterId, getDesiredStackVersion(), types)
+      .stream()
+      .map(clusterConfigEntity -> configFactory.createExisting(this, clusterConfigEntity))
+      .collect(Collectors.toList());
+  }
+
+  @Override
   public Config getConfigByVersion(String configType, Long configVersion) {
     clusterGlobalLock.readLock().lock();
     try {

http://git-wip-us.apache.org/repos/asf/ambari/blob/e767aa44/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptorContainer.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptorContainer.java b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptorContainer.java
index 0a89c1d..5658133 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptorContainer.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptorContainer.java
@@ -18,6 +18,8 @@
 
 package org.apache.ambari.server.state.kerberos;
 
+import static java.util.stream.Collectors.toList;
+
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
@@ -777,6 +779,16 @@ public abstract class AbstractKerberosDescriptorContainer extends AbstractKerber
     return map;
   }
 
+  /**
+   * @return identities which are not references to other identities
+   */
+  public List<KerberosIdentityDescriptor> getIdentitiesSkipReferences() {
+    return nullToEmpty(getIdentities())
+      .stream()
+      .filter(identity -> !identity.getReferencedServiceName().isPresent() && identity.getName() != null && !identity.getName().startsWith("/"))
+      .collect(toList());
+  }
+
   @Override
   public int hashCode() {
     return super.hashCode() +

http://git-wip-us.apache.org/repos/asf/ambari/blob/e767aa44/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosComponentDescriptor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosComponentDescriptor.java b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosComponentDescriptor.java
index 41d1f65..768a17e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosComponentDescriptor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosComponentDescriptor.java
@@ -17,9 +17,7 @@
  */
 package org.apache.ambari.server.state.kerberos;
 
-import java.util.ArrayList;
 import java.util.Collection;
-import java.util.List;
 import java.util.Map;
 
 /**
@@ -113,19 +111,6 @@ public class KerberosComponentDescriptor extends AbstractKerberosDescriptorConta
     return null;
   }
 
-  /**
-   * @return identities which are not references to other identities
-   */
-  public List<KerberosIdentityDescriptor> getIdentitiesSkipReferences() {
-    List<KerberosIdentityDescriptor> result = new ArrayList<>();
-    for (KerberosIdentityDescriptor each : nullToEmpty(getIdentities())) {
-      if (!each.getReferencedServiceName().isPresent() && each.getName() != null && !each.getName().startsWith("/")) {
-        result.add(each);
-      }
-    }
-    return result;
-  }
-
   @Override
   public int hashCode() {
     return 35 * super.hashCode();

http://git-wip-us.apache.org/repos/asf/ambari/blob/e767aa44/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosIdentityDescriptor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosIdentityDescriptor.java b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosIdentityDescriptor.java
index 2023793..911723b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosIdentityDescriptor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosIdentityDescriptor.java
@@ -17,10 +17,8 @@
  */
 package org.apache.ambari.server.state.kerberos;
 
-import java.util.List;
 import java.util.Map;
 
-import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.collections.Predicate;
 import org.apache.ambari.server.collections.PredicateUtils;
 
@@ -371,16 +369,12 @@ public class KerberosIdentityDescriptor extends AbstractKerberosDescriptor {
     }
   }
 
+
   /**
-   * @return true if this identity either has the same principal or keytab as any of the given identities.
+   * @return true if the given identity has the same principal or keytab as me
    */
-  public boolean isShared(List<KerberosIdentityDescriptor> identities) throws AmbariException {
-    for (KerberosIdentityDescriptor each : identities) {
-      if (hasSamePrincipal(each) || hasSameKeytab(each)) {
-        return true;
-      }
-    }
-    return false;
+  public boolean isShared(KerberosIdentityDescriptor that) {
+    return hasSamePrincipal(that) || hasSameKeytab(that);
   }
 
   private boolean hasSameKeytab(KerberosIdentityDescriptor that) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/e767aa44/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleanerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleanerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleanerTest.java
index d22c92e..027f339 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleanerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleanerTest.java
@@ -18,15 +18,20 @@
 package org.apache.ambari.server.controller.utilities;
 
 import static com.google.common.collect.Lists.newArrayList;
+import static com.google.common.collect.Sets.newHashSet;
+import static java.util.Collections.singletonList;
 import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.expectLastCall;
 import static org.easymock.EasyMock.reset;
 
+import java.util.ArrayList;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 
 import org.apache.ambari.server.controller.KerberosHelper;
 import org.apache.ambari.server.events.ServiceComponentUninstalledEvent;
+import org.apache.ambari.server.events.ServiceRemovedEvent;
 import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
 import org.apache.ambari.server.serveraction.kerberos.Component;
 import org.apache.ambari.server.serveraction.kerberos.KerberosMissingAdminCredentialsException;
@@ -35,6 +40,7 @@ import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.SecurityType;
 import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.ServiceComponent;
+import org.apache.ambari.server.state.ServiceComponentHost;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptorFactory;
 import org.easymock.EasyMockRule;
@@ -47,6 +53,7 @@ import org.junit.Test;
 public class KerberosIdentityCleanerTest extends EasyMockSupport {
   @Rule public EasyMockRule mocks = new EasyMockRule(this);
   private static final String HOST = "c6401";
+  private static final String HOST2 = "c6402";
   private static final String OOZIE = "OOZIE";
   private static final String OOZIE_SERVER = "OOZIE_SERVER";
   private static final String OOZIE_2 = "OOZIE2";
@@ -55,6 +62,9 @@ public class KerberosIdentityCleanerTest extends EasyMockSupport {
   private static final String RESOURCE_MANAGER_2 = "RESOURCE_MANAGER2";
   private static final String YARN = "YARN";
   private static final String RESOURCE_MANAGER = "RESOURCE_MANAGER";
+  private static final String HDFS = "HDFS";
+  private static final String NAMENODE = "NAMENODE";
+  private static final String DATANODE = "DATANODE";
   private static final long CLUSTER_ID = 1;
   @Mock private KerberosHelper kerberosHelper;
   @Mock private Clusters clusters;
@@ -66,8 +76,8 @@ public class KerberosIdentityCleanerTest extends EasyMockSupport {
 
   @Test
   public void removesAllKerberosIdentitesOfComponentAfterComponentWasUninstalled() throws Exception {
-    installComponent(OOZIE, OOZIE_SERVER);
-    kerberosHelper.deleteIdentity(cluster, new Component(HOST, OOZIE, OOZIE_SERVER), newArrayList("oozie_server1", "oozie_server2"));
+    installComponent(OOZIE, OOZIE_SERVER, HOST);
+    kerberosHelper.deleteIdentities(cluster, singletonList(new Component(HOST, OOZIE, OOZIE_SERVER)), newHashSet("oozie_server1", "oozie_server2"));
     expectLastCall().once();
     replayAll();
     uninstallComponent(OOZIE, OOZIE_SERVER, HOST);
@@ -83,9 +93,9 @@ public class KerberosIdentityCleanerTest extends EasyMockSupport {
 
   @Test
   public void skipsRemovingIdentityThatIsSharedByPrincipalName() throws Exception {
-    installComponent(OOZIE, OOZIE_SERVER);
-    installComponent(OOZIE_2, OOZIE_SERVER_2);
-    kerberosHelper.deleteIdentity(cluster, new Component(HOST, OOZIE, OOZIE_SERVER), newArrayList("oozie_server1"));
+    installComponent(OOZIE, OOZIE_SERVER, HOST);
+    installComponent(OOZIE_2, OOZIE_SERVER_2, HOST);
+    kerberosHelper.deleteIdentities(cluster, singletonList(new Component(HOST, OOZIE, OOZIE_SERVER)), newHashSet("oozie_server1"));
     expectLastCall().once();
     replayAll();
     uninstallComponent(OOZIE, OOZIE_SERVER, HOST);
@@ -94,9 +104,9 @@ public class KerberosIdentityCleanerTest extends EasyMockSupport {
 
   @Test
   public void skipsRemovingIdentityThatIsSharedByKeyTabFilePath() throws Exception {
-    installComponent(YARN, RESOURCE_MANAGER);
-    installComponent(YARN_2, RESOURCE_MANAGER_2);
-    kerberosHelper.deleteIdentity(cluster, new Component(HOST, YARN, RESOURCE_MANAGER), newArrayList("rm_unique"));
+    installComponent(YARN, RESOURCE_MANAGER, HOST);
+    installComponent(YARN_2, RESOURCE_MANAGER_2, HOST);
+    kerberosHelper.deleteIdentities(cluster, singletonList(new Component(HOST, YARN, RESOURCE_MANAGER)), newHashSet("rm_unique"));
     expectLastCall().once();
     replayAll();
     uninstallComponent(YARN, RESOURCE_MANAGER, HOST);
@@ -112,11 +122,43 @@ public class KerberosIdentityCleanerTest extends EasyMockSupport {
     verifyAll();
   }
 
-  private void installComponent(String serviceName, final String componentName) {
+  @Test
+  public void skipsRemovingIdentityIfComponentIsStillInstalledOnADifferentHost() throws Exception {
+    installComponent(OOZIE, OOZIE_SERVER, HOST, HOST2);
+    replayAll();
+    uninstallComponent(OOZIE, OOZIE_SERVER, HOST);
+    verifyAll();
+  }
+
+  @Test
+  public void removesServiceIdentitiesSkipComponentIdentitiesAfterServiceWasUninstalled() throws Exception {
+    installComponent(OOZIE, OOZIE_SERVER, HOST);
+    kerberosHelper.deleteIdentities(cluster, hdfsComponents(), newHashSet("hdfs-service"));
+    expectLastCall().once();
+    replayAll();
+    uninstallService(HDFS, hdfsComponents());
+    verifyAll();
+  }
+
+  private ArrayList<Component> hdfsComponents() {
+    return newArrayList(new Component(HOST, HDFS, NAMENODE), new Component(HOST, HDFS, DATANODE));
+  }
+
+  private void installComponent(String serviceName, String componentName, String... hostNames) {
     Service service = createMock(serviceName + "_" + componentName, Service.class);
+    ServiceComponent component = createMock(componentName, ServiceComponent.class);
+    expect(component.getName()).andReturn(componentName).anyTimes();
+    Map<String, ServiceComponentHost> hosts = new HashMap<>();
+    expect(component.getServiceComponentHosts()).andReturn(hosts).anyTimes();
+    for (String hostName : hostNames) {
+      ServiceComponentHost host = createMock(hostName, ServiceComponentHost.class);
+      expect(host.getHostName()).andReturn(hostName).anyTimes();
+      hosts.put(hostName, host);
+    }
     installedServices.put(serviceName, service);
+    expect(service.getName()).andReturn(serviceName).anyTimes();
     expect(service.getServiceComponents()).andReturn(new HashMap<String, ServiceComponent>() {{
-      put(componentName, null);
+      put(componentName, component);
     }}).anyTimes();
   }
 
@@ -124,6 +166,10 @@ public class KerberosIdentityCleanerTest extends EasyMockSupport {
     kerberosIdentityCleaner.componentRemoved(new ServiceComponentUninstalledEvent(CLUSTER_ID, "any", "any", service, component, host, false));
   }
 
+  private void uninstallService(String service, List<Component> components) throws KerberosMissingAdminCredentialsException {
+    kerberosIdentityCleaner.serviceRemoved(new ServiceRemovedEvent(CLUSTER_ID, "any", "any", service, components));
+  }
+
   @Before
   public void setUp() throws Exception {
     kerberosIdentityCleaner = new KerberosIdentityCleaner(new AmbariEventPublisher(), kerberosHelper, clusters);
@@ -139,7 +185,8 @@ public class KerberosIdentityCleanerTest extends EasyMockSupport {
       "              'name': '/HDFS/NAMENODE/hdfs'" +
       "            }," +
       "            {" +
-      "              'name': 'oozie_server1'" +
+      "              'name': 'oozie_server1'," +
+      "              'principal': { 'value': 'oozie1/_HOST@EXAMPLE.COM' }" +
       "            }," +"" +
       "            {" +
       "              'name': 'oozie_server2'," +
@@ -193,6 +240,39 @@ public class KerberosIdentityCleanerTest extends EasyMockSupport {
       "          ]" +
       "        }" +
       "      ]" +
+      "    }," +
+      "    {" +
+      "      'name': 'HDFS'," +
+      "      'identities': [" +
+      "            {" +
+      "              'name': 'hdfs-service'" +
+      "            }," +
+      "            {" +
+      "              'name': 'shared'," +
+      "              'principal': { 'value': 'oozie/_HOST@EXAMPLE.COM' }" +
+      "            }," +
+      "            {" +
+      "              'name': '/YARN/RESOURCE_MANAGER/rm'" +
+      "            }," +
+      "          ]," +
+      "      'components': [" +
+      "        {" +
+      "          'name': 'NAMENODE'," +
+      "          'identities': [" +
+      "            {" +
+      "              'name': 'namenode'" +
+      "            }" +
+      "          ]" +
+      "        }," +
+      "        {" +
+      "          'name': 'DATANODE'," +
+      "          'identities': [" +
+      "            {" +
+      "              'name': 'datanode'" +
+      "            }" +
+      "          ]" +
+      "        }" +
+      "      ]" +
       "    }" +
       "  ]" +
       "}");

http://git-wip-us.apache.org/repos/asf/ambari/blob/e767aa44/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java
index 406349a..80cb4dc 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java
@@ -17,6 +17,8 @@
  */
 package org.apache.ambari.server.orm.dao;
 
+import static java.util.Arrays.asList;
+
 import java.sql.SQLException;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -482,6 +484,16 @@ public class ServiceConfigDAOTest {
     Assert.assertTrue(entity.isSelected());
   }
 
+  @Test
+  public void testGetLatestClusterConfigsWithTypes() throws Exception {
+    initClusterEntities();
+    ClusterEntity clusterEntity = clusterDAO.findByName("c1");
+    List<ClusterConfigEntity> entities = clusterDAO.getLatestConfigurationsWithTypes(clusterEntity.getClusterId(), HDP_01, asList("oozie-site"));
+    Assert.assertEquals(1, entities.size());
+    entities = clusterDAO.getLatestConfigurationsWithTypes(clusterEntity.getClusterId(), HDP_01, asList("no-such-type"));
+    Assert.assertTrue(entities.isEmpty());
+  }
+
   /**
    * Tests getting latest and enabled configurations when there is a
    * configuration group. Configurations for configuration groups are not


[29/36] ambari git commit: AMBARI-21435 Add python functions to get the Ambari version of the agent (mgergely)

Posted by lp...@apache.org.
AMBARI-21435 Add python functions to get the Ambari version of the agent (mgergely)

Change-Id: I9481b32babac92ad5d7496fe4abb208eefaac922


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/383b8c7d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/383b8c7d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/383b8c7d

Branch: refs/heads/feature-branch-AMBARI-21307
Commit: 383b8c7d83545bc4cb21058794e91b5a4aece425
Parents: e767aa4
Author: Miklos Gergely <mg...@hortonworks.com>
Authored: Wed Jul 12 13:22:33 2017 +0200
Committer: Miklos Gergely <mg...@hortonworks.com>
Committed: Wed Jul 12 13:22:33 2017 +0200

----------------------------------------------------------------------
 .../python/ambari_commons/get_ambari_version.py | 44 ++++++++++++++++++++
 1 file changed, 44 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/383b8c7d/ambari-common/src/main/python/ambari_commons/get_ambari_version.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/ambari_commons/get_ambari_version.py b/ambari-common/src/main/python/ambari_commons/get_ambari_version.py
new file mode 100644
index 0000000..c8c5336
--- /dev/null
+++ b/ambari-common/src/main/python/ambari_commons/get_ambari_version.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import ConfigParser
+from resource_management.core.logger import Logger
+
+"""
+returns the ambari version on an agent host
+"""
+def get_ambari_version_agent():
+  ambari_version = None
+  AMBARI_AGENT_CONF = '/etc/ambari-agent/conf/ambari-agent.ini'
+  if os.path.exists(AMBARI_AGENT_CONF):
+    try:
+      ambari_agent_config = ConfigParser.RawConfigParser()
+      ambari_agent_config.read(AMBARI_AGENT_CONF)
+      data_dir = ambari_agent_config.get('agent', 'prefix')
+      ver_file = os.path.join(data_dir, 'version')
+      with open(ver_file, "r") as f:
+        ambari_version = f.read().strip()
+    except Exception, e:
+      Logger.info('Unable to determine ambari version from the agent version file.')
+      Logger.debug('Exception: %s' % str(e))
+      pass
+    pass
+  return ambari_version


[04/36] ambari git commit: AMBARI-21385. Selected hosts are not dropped event if some of the selected hosts was deleted (alexantonenko)

Posted by lp...@apache.org.
AMBARI-21385. Selected hosts are not dropped event if some of the selected hosts was deleted (alexantonenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/ac5eaae5
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/ac5eaae5
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/ac5eaae5

Branch: refs/heads/feature-branch-AMBARI-21307
Commit: ac5eaae59673740a43b56b54978948e73cd71a54
Parents: f17d317
Author: Alex Antonenko <hi...@gmail.com>
Authored: Fri Jun 30 17:52:42 2017 +0300
Committer: Alex Antonenko <hi...@gmail.com>
Committed: Fri Jul 7 10:38:45 2017 +0300

----------------------------------------------------------------------
 .../main/host/bulk_operations_controller.js           | 11 +++++++++--
 ambari-web/app/controllers/main/host/details.js       |  2 +-
 ambari-web/app/mappers/hosts_mapper.js                |  2 +-
 ambari-web/app/utils/db.js                            | 14 ++++++++++----
 ambari-web/app/views/main/host.js                     |  8 +++-----
 ambari-web/test/views/main/host_test.js               |  4 ++--
 6 files changed, 26 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/ac5eaae5/ambari-web/app/controllers/main/host/bulk_operations_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/host/bulk_operations_controller.js b/ambari-web/app/controllers/main/host/bulk_operations_controller.js
index b053fc3..b921cd5 100644
--- a/ambari-web/app/controllers/main/host/bulk_operations_controller.js
+++ b/ambari-web/app/controllers/main/host/bulk_operations_controller.js
@@ -450,13 +450,20 @@ App.BulkOperationsController = Em.Controller.extend({
         }
       }),
 
-      onPrimary: function () {
+      completeDelete() {
+        if (arg1 !== 'error') {
+          App.db.unselectHosts(arg2.hosts);
+        }
         location.reload();
+      },
+
+      onPrimary: function () {
+        this.completeDelete();
         this._super();
       },
 
       onClose: function () {
-        location.reload();
+        this.completeDelete();
         this._super();
       }
     });

http://git-wip-us.apache.org/repos/asf/ambari/blob/ac5eaae5/ambari-web/app/controllers/main/host/details.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/host/details.js b/ambari-web/app/controllers/main/host/details.js
index 6f34dfe..382b09d 100644
--- a/ambari-web/app/controllers/main/host/details.js
+++ b/ambari-web/app/controllers/main/host/details.js
@@ -2678,7 +2678,7 @@ App.MainHostDetailsController = Em.Controller.extend(App.SupportClientConfigsDow
         var popup = this;
         var completeCallback = function () {
           var remainingHosts = App.db.getSelectedHosts('mainHostController').removeObject(self.get('content.hostName'));
-          App.db.setSelectedHosts('mainHostController', remainingHosts);
+          App.db.setSelectedHosts(remainingHosts);
           popup.hide();
         };
         self.doDeleteHost(completeCallback);

http://git-wip-us.apache.org/repos/asf/ambari/blob/ac5eaae5/ambari-web/app/mappers/hosts_mapper.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mappers/hosts_mapper.js b/ambari-web/app/mappers/hosts_mapper.js
index 203cd67..e536269 100644
--- a/ambari-web/app/mappers/hosts_mapper.js
+++ b/ambari-web/app/mappers/hosts_mapper.js
@@ -103,7 +103,7 @@ App.hostsMapper = App.QuickDataMapper.create({
       var cacheServices = App.cache['services'];
       var currentServiceComponentsMap = App.get('componentConfigMapper').buildServiceComponentMap(cacheServices);
       var newHostComponentsMap = {};
-      var selectedHosts = App.db.getSelectedHosts('mainHostController');
+      var selectedHosts = App.db.getSelectedHosts();
       var clusterName = App.get('clusterName');
       var advancedHostComponents = [];
       var hostComponentLogs = [];

http://git-wip-us.apache.org/repos/asf/ambari/blob/ac5eaae5/ambari-web/app/utils/db.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/db.js b/ambari-web/app/utils/db.js
index e41b9c0..eddd7ef 100644
--- a/ambari-web/app/utils/db.js
+++ b/ambari-web/app/utils/db.js
@@ -221,10 +221,16 @@ App.db.setSortingStatuses = function (name, sortingConditions) {
   App.db.set('app.tables.sortingConditions', name, sortingConditions);
 };
 
-App.db.setSelectedHosts = function (name, selectedHosts) {
-  App.db.set('app.tables.selectedItems', name, selectedHosts);
+App.db.setSelectedHosts = function (selectedHosts) {
+  App.db.set('app.tables.selectedItems', 'mainHostController', selectedHosts);
 };
 
+App.db.unselectHosts = function (hostsToUnselect = []) {
+  let selectedHosts = App.db.getSelectedHosts();
+  selectedHosts = selectedHosts.filter(host => hostsToUnselect.indexOf(host) === -1);
+  App.db.setSelectedHosts(selectedHosts);
+}
+
 App.db.setHosts = function (hostInfo) {
   App.db.set('Installer', 'hostInfo', hostInfo);
 };
@@ -405,8 +411,8 @@ App.db.getSortingStatuses = function (name) {
   return name ? App.db.get('app.tables.sortingConditions', name): null;
 };
 
-App.db.getSelectedHosts = function (name) {
-  return App.db.get('app.tables.selectedItems', name) || [];
+App.db.getSelectedHosts = function () {
+  return App.db.get('app.tables.selectedItems', 'mainHostController') || [];
 };
 
 /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/ac5eaae5/ambari-web/app/views/main/host.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/host.js b/ambari-web/app/views/main/host.js
index 6a8dcf2..78eb82b 100644
--- a/ambari-web/app/views/main/host.js
+++ b/ambari-web/app/views/main/host.js
@@ -246,8 +246,7 @@ App.MainHostView = App.TableView.extend(App.TableServerViewMixin, {
     }
     this.combineSelectedFilter();
     //10 is an index of selected column
-    var controllerName = this.get('controller.name');
-    App.db.setSelectedHosts(controllerName, this.get('selectedHosts'));
+    App.db.setSelectedHosts(this.get('selectedHosts'));
 
     this.addObserver('selectAllHosts', this, this.toggleAllHosts);
   },
@@ -255,8 +254,7 @@ App.MainHostView = App.TableView.extend(App.TableServerViewMixin, {
    * combine selected hosts on page with selected hosts which are filtered out but added to cluster
    */
   combineSelectedFilter: function () {
-    var controllerName = this.get('controller.name');
-    var previouslySelectedHosts = App.db.getSelectedHosts(controllerName);
+    var previouslySelectedHosts = App.db.getSelectedHosts();
     var selectedHosts = [];
     var hostsOnPage = this.get('pageContent').mapProperty('hostName');
     selectedHosts = this.get('pageContent').filterProperty('selected').mapProperty('hostName');
@@ -306,7 +304,7 @@ App.MainHostView = App.TableView.extend(App.TableServerViewMixin, {
   clearSelection: function() {
     this.get('pageContent').setEach('selected', false);
     this.set('selectAllHosts', false);
-    App.db.setSelectedHosts(this.get('controller.name'), []);
+    App.db.setSelectedHosts([]);
     this.get('selectedHosts').clear();
     this.filterSelected();
   },

http://git-wip-us.apache.org/repos/asf/ambari/blob/ac5eaae5/ambari-web/test/views/main/host_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/host_test.js b/ambari-web/test/views/main/host_test.js
index e0eb9bc..15bdab2 100644
--- a/ambari-web/test/views/main/host_test.js
+++ b/ambari-web/test/views/main/host_test.js
@@ -523,7 +523,7 @@ describe('App.MainHostView', function () {
     it("App.db.setSelectedHosts should be called", function() {
       view.set('selectedHosts', []);
       view.updateCheckedFlags();
-      expect(App.db.setSelectedHosts.calledWith('ctrl1', [])).to.be.true;
+      expect(App.db.setSelectedHosts.calledWith([])).to.be.true;
     });
 
     it("addObserver should be called", function() {
@@ -620,7 +620,7 @@ describe('App.MainHostView', function () {
     });
 
     it("App.db.setSelectedHosts should be called", function() {
-      expect(App.db.setSelectedHosts.calledWith('ctrl1', [])).to.be.true;
+      expect(App.db.setSelectedHosts.calledWith([])).to.be.true;
     });
 
     it("filterSelected should be called", function() {


[03/36] ambari git commit: AMBARI-21388. Can't delete hosts components via bulk-menu (alexantonenko)

Posted by lp...@apache.org.
AMBARI-21388. Can't delete hosts components via bulk-menu (alexantonenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7029e7fa
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7029e7fa
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7029e7fa

Branch: refs/heads/feature-branch-AMBARI-21307
Commit: 7029e7fa0c3e2ebd8e6eea7eef1f965234018586
Parents: ac5eaae
Author: Alex Antonenko <hi...@gmail.com>
Authored: Mon Jul 3 15:35:28 2017 +0300
Committer: Alex Antonenko <hi...@gmail.com>
Committed: Fri Jul 7 10:38:45 2017 +0300

----------------------------------------------------------------------
 .../main/host/bulk_operations_controller.js          | 15 +++++++--------
 ambari-web/app/messages.js                           |  4 ++--
 .../main/host/delete_hosts_result_popup.hbs          |  8 ++++++--
 3 files changed, 15 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/7029e7fa/ambari-web/app/controllers/main/host/bulk_operations_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/host/bulk_operations_controller.js b/ambari-web/app/controllers/main/host/bulk_operations_controller.js
index b921cd5..34d61d8 100644
--- a/ambari-web/app/controllers/main/host/bulk_operations_controller.js
+++ b/ambari-web/app/controllers/main/host/bulk_operations_controller.js
@@ -717,6 +717,7 @@ App.BulkOperationsController = Em.Controller.extend({
 
       didInsertElement: function() {
         this.set('expanded', hostsToDelete.length <= minShown);
+        this._super();
       },
 
       onPrimary: function() {
@@ -763,6 +764,8 @@ App.BulkOperationsController = Em.Controller.extend({
         name: 'host.host_component.delete_components',
         sender: self,
         data: {
+          hostNames,
+          componentName: operationData.componentName,
           data: JSON.stringify({
             RequestInfo: {
               query: 'HostRoles/host_name.in(' + hostNames.join(',') + ')&HostRoles/component_name.in(' + operationData.componentName + ')'
@@ -783,7 +786,7 @@ App.BulkOperationsController = Em.Controller.extend({
     var undeletableHosts = [];
     if (arg1 == "error") {
       var request = arg0;
-      var params = arg4;
+      let params = arg4;
       var response = JSON.parse(request.responseText);
       var host = Ember.Object.create({
         error: {
@@ -797,7 +800,7 @@ App.BulkOperationsController = Em.Controller.extend({
       undeletableHosts.push(host);
     } else {
       var data = arg0;
-      var params = arg2;
+      let params = arg2;
       if (data) {
         data.deleteResult.forEach(function (host) {
           if (host.deleted) {
@@ -812,12 +815,7 @@ App.BulkOperationsController = Em.Controller.extend({
           }
         });
       } else {
-        var host = {
-          deleted: {
-            key: params.hosts[0]
-          }
-        };
-        deletedHosts.push(host);
+        deletedHosts.pushObjects(params.hostNames.map(hostName => ({deleted: {key: `${hostName}/${params.componentName}`}})));
       }
     }
 
@@ -831,6 +829,7 @@ App.BulkOperationsController = Em.Controller.extend({
         message: Em.I18n.t('hosts.bulkOperation.delete.component.dryRun.message').format(undeletableHosts.length),
         undeletableHosts: undeletableHosts,
         deletedHosts: deletedHosts.sortProperty('deleted.key'),
+        deleteComponents: true,
         onToggleHost: function (host) {
           host.contexts[0].toggleProperty('isCollapsed');
         }

http://git-wip-us.apache.org/repos/asf/ambari/blob/7029e7fa/ambari-web/app/messages.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index 5e1d08f..f75d801 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -2597,8 +2597,8 @@ Em.I18n.translations = {
   'hosts.bulkOperation.confirmation.delete.component.minimum.body': 'At least {0} {1} should be installed in the cluster.',
   'hosts.bulkOperation.confirmation.delete.component.nothingToDo.body': '{0} are neither installed on selected hosts nor in the states that can be deleted.',
   'hosts.bulkOperation.confirmation.delete.component.skip':'The following hosts are skipped as {0} on them are not in the states that can be deleted.',
-  'hosts.bulkOperation.delete.component.result.header':'Delete Hosts',
-  'hosts.bulkOperation.delete.component.result.body': 'The following hosts were deleted successfully:',
+  'hosts.bulkOperation.delete.component.result.header':'Delete Components',
+  'hosts.bulkOperation.delete.component.result.body': 'The following components were deleted successfully:',
   'hosts.bulkOperation.delete.component.dryRun.message':'There are <strong>{0} host(s)</strong> that cannot be deleted (expand for reason):',
 
   'hosts.selectHostsDialog.title': 'Select Configuration Group Hosts',

http://git-wip-us.apache.org/repos/asf/ambari/blob/7029e7fa/ambari-web/app/templates/main/host/delete_hosts_result_popup.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/host/delete_hosts_result_popup.hbs b/ambari-web/app/templates/main/host/delete_hosts_result_popup.hbs
index eb6f89c..2e074bc 100644
--- a/ambari-web/app/templates/main/host/delete_hosts_result_popup.hbs
+++ b/ambari-web/app/templates/main/host/delete_hosts_result_popup.hbs
@@ -16,7 +16,11 @@
 * limitations under the License.
 }}
 {{#if view.deletedHosts}}
-  <p>{{t hosts.bulkOperation.deleteHosts.result.body}}</p>
+  {{#if view.deleteComponents}}
+    <p>{{t hosts.bulkOperation.delete.component.result.body}}</p>
+  {{else}}
+    <p>{{t hosts.bulkOperation.deleteHosts.result.body}}</p>
+  {{/if}}
 {{/if}}
 
 {{#each deletedHost in view.deletedHosts}}
@@ -39,4 +43,4 @@
       </div>
     </div>
   {{/each}}
-{{/if}}
\ No newline at end of file
+{{/if}}


[16/36] ambari git commit: AMBARI-21430 - Allow Multiple Versions of Stack Tools to Co-Exist (jonathanhurley)

Posted by lp...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_tools.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_tools.json b/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_tools.json
index 535b9d9..62562f8 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_tools.json
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_tools.json
@@ -1,4 +1,14 @@
 {
-  "stack_selector": ["distro-select", "/usr/bin/distro-select", "distro-select"],
-  "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"]
-}
\ No newline at end of file
+  "PERF": {
+    "stack_selector": [
+      "distro-select",
+      "/usr/bin/distro-select",
+      "distro-select"
+    ],
+    "conf_selector": [
+      "conf-select",
+      "/usr/bin/conf-select",
+      "conf-select"
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
index ca579ea..bade238 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
@@ -46,9 +46,11 @@ import org.apache.ambari.server.controller.KerberosHelper;
 import org.apache.ambari.server.controller.StackConfigurationResponse;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.PropertyDependencyInfo;
 import org.apache.ambari.server.state.PropertyInfo;
 import org.apache.ambari.server.state.ServiceInfo;
+import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.ValueAttributesInfo;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
 import org.apache.ambari.server.topology.AdvisedConfiguration;
@@ -66,6 +68,7 @@ import org.apache.ambari.server.topology.HostGroupInfo;
 import org.apache.ambari.server.topology.InvalidTopologyException;
 import org.apache.ambari.server.topology.TopologyRequest;
 import org.apache.commons.lang.StringUtils;
+import org.easymock.EasyMock;
 import org.easymock.EasyMockRule;
 import org.easymock.EasyMockSupport;
 import org.easymock.Mock;
@@ -95,6 +98,10 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
 
   private static final Configuration EMPTY_CONFIG = new Configuration(Collections.<String, Map<String, String>>emptyMap(), Collections.<String, Map<String, Map<String, String>>>emptyMap());
   private final Map<String, Collection<String>> serviceComponents = new HashMap<>();
+  private final Map<String, Map<String, String>> stackProperties = new HashMap<>();
+
+  private final String STACK_NAME = "testStack";
+  private final String STACK_VERSION = "1";
 
   @Rule
   public EasyMockRule mocks = new EasyMockRule(this);
@@ -129,13 +136,16 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
   @Mock
   private TopologyRequest topologyRequestMock;
 
+  @Mock(type = MockType.NICE)
+  private ConfigHelper configHelper;
+
   @Before
   public void init() throws Exception {
     expect(bp.getStack()).andReturn(stack).anyTimes();
     expect(bp.getName()).andReturn("test-bp").anyTimes();
 
-    expect(stack.getName()).andReturn("testStack").anyTimes();
-    expect(stack.getVersion()).andReturn("1").anyTimes();
+    expect(stack.getName()).andReturn(STACK_NAME).atLeastOnce();
+    expect(stack.getVersion()).andReturn(STACK_VERSION).atLeastOnce();
     // return false for all components since for this test we don't care about the value
     expect(stack.isMasterComponent((String) anyObject())).andReturn(false).anyTimes();
     expect(stack.getConfigurationPropertiesWithMetadata(anyObject(String.class), anyObject(String.class))).andReturn(Collections.<String, Stack.ConfigProperty>emptyMap()).anyTimes();
@@ -225,6 +235,11 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
 
     Set<String> emptySet = Collections.emptySet();
     expect(stack.getExcludedConfigurationTypes(anyObject(String.class))).andReturn(emptySet).anyTimes();
+
+    expect(ambariContext.getConfigHelper()).andReturn(configHelper).anyTimes();
+    expect(configHelper.getDefaultStackProperties(
+        EasyMock.eq(new StackId(STACK_NAME, STACK_VERSION)))).andReturn(stackProperties).anyTimes();
+
     expect(ambariContext.isClusterKerberosEnabled(1)).andReturn(true).once();
     expect(ambariContext.getClusterName(1L)).andReturn("clusterName").anyTimes();
     PowerMock.mockStatic(AmbariServer.class);
@@ -234,14 +249,14 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     expect(controller.getKerberosHelper()).andReturn(kerberosHelper).anyTimes();
     expect(controller.getClusters()).andReturn(clusters).anyTimes();
     expect(kerberosHelper.getKerberosDescriptor(cluster)).andReturn(kerberosDescriptor).anyTimes();
-    Set<String> properties = new HashSet<String>();
+    Set<String> properties = new HashSet<>();
     properties.add("core-site/hadoop.security.auth_to_local");
     expect(kerberosDescriptor.getAllAuthToLocalProperties()).andReturn(properties).anyTimes();
   }
 
   @After
   public void tearDown() {
-    reset(bp, serviceInfo, stack, ambariContext);
+    reset(bp, serviceInfo, stack, ambariContext, configHelper);
   }
 
   @Test
@@ -6322,13 +6337,16 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     topology.getAdvisedConfigurations().putAll(createAdvisedConfigMap());
     topology.setConfigRecommendationStrategy(ConfigRecommendationStrategy.ONLY_STACK_DEFAULTS_APPLY);
     BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
+
     reset(stack);
+    expect(stack.getName()).andReturn(STACK_NAME).anyTimes();
+    expect(stack.getVersion()).andReturn(STACK_VERSION).anyTimes();
     expect(stack.getConfiguration(bp.getServices())).andReturn(createStackDefaults()).anyTimes();
 
     Set<String> emptySet = Collections.emptySet();
     expect(stack.getExcludedConfigurationTypes(anyObject(String.class))).andReturn(emptySet).anyTimes();
-
     replay(stack);
+
     // WHEN
     Set<String> configTypeUpdated = configProcessor.doUpdateForClusterCreate();
     // THEN
@@ -6379,13 +6397,17 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     topology.getAdvisedConfigurations().putAll(createAdvisedConfigMap());
     topology.setConfigRecommendationStrategy(ConfigRecommendationStrategy.ONLY_STACK_DEFAULTS_APPLY);
     BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
+
     reset(stack);
+    expect(stack.getName()).andReturn(STACK_NAME).anyTimes();
+    expect(stack.getVersion()).andReturn(STACK_VERSION).anyTimes();
     expect(stack.getConfiguration(bp.getServices())).andReturn(createStackDefaults()).anyTimes();
 
     Set<String> emptySet = Collections.emptySet();
     expect(stack.getExcludedConfigurationTypes(anyObject(String.class))).andReturn(emptySet).anyTimes();
 
     replay(stack);
+
     // WHEN
     configProcessor.doUpdateForClusterCreate();
     // THEN
@@ -8050,6 +8072,10 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
   @Test
   public void testValuesTrimming() throws Exception {
     reset(stack);
+
+    expect(stack.getName()).andReturn(STACK_NAME).anyTimes();
+    expect(stack.getVersion()).andReturn(STACK_VERSION).anyTimes();
+
     Map<String, Map<String, String>> properties = new HashMap<>();
 
     Map<String, String> hdfsSite = new HashMap<>();
@@ -8073,6 +8099,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
       new StackConfigurationResponse(null, null, null, null, "hdfs-site", null, Collections.singleton(PropertyInfo.PropertyType.PASSWORD), null, null, null)));
     propertyConfigs.put("test.host", new Stack.ConfigProperty(
       new StackConfigurationResponse(null, null, null, null, "hdfs-site", null, null, null, valueAttributesInfoHost, null)));
+
     expect(stack.getServiceForConfigType("hdfs-site")).andReturn("HDFS").anyTimes();
     expect(stack.getConfigurationPropertiesWithMetadata("HDFS", "hdfs-site")).andReturn(propertyConfigs).anyTimes();
 
@@ -8144,7 +8171,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     throws InvalidTopologyException {
 
 
-    replay(stack, serviceInfo, ambariContext, controller, kerberosHelper, kerberosDescriptor, clusters, cluster);
+    replay(stack, serviceInfo, ambariContext, configHelper, controller, kerberosHelper, kerberosDescriptor, clusters, cluster);
 
     Map<String, HostGroupInfo> hostGroupInfo = new HashMap<>();
     Collection<String> allServices = new HashSet<>();
@@ -8207,7 +8234,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
       this.name = name;
       this.components = components;
       this.hosts = hosts;
-      this.configuration = new Configuration(Collections.<String, Map<String, String>>emptyMap(),
+      configuration = new Configuration(Collections.<String, Map<String, String>>emptyMap(),
         Collections.<String, Map<String, Map<String, String>>>emptyMap());
     }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
index 32a5358..39aee82 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
@@ -1414,8 +1414,8 @@ public class ClusterStackVersionResourceProviderTest {
     expect(cluster.getClusterId()).andReturn(1L).anyTimes();
     expect(cluster.getHosts()).andReturn(hostsForCluster.values()).atLeastOnce();
     expect(cluster.getServices()).andReturn(serviceMap).anyTimes();
-    expect(cluster.getServiceComponentHosts(anyObject(String.class))).andReturn(
-        serviceComponentHosts).anyTimes();
+    expect(cluster.getServiceComponentHosts(anyObject(String.class))).andReturn(serviceComponentHosts).anyTimes();
+    expect(cluster.getCurrentStackVersion()).andReturn(stackId).atLeastOnce();
 
     expect(repositoryVersionDAOMock.findByStackAndVersion(anyObject(StackId.class),
         anyObject(String.class))).andReturn(repoVersionEntity);

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterConfigurationRequestTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterConfigurationRequestTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterConfigurationRequestTest.java
index 8b08dc4..5535256 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterConfigurationRequestTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterConfigurationRequestTest.java
@@ -50,6 +50,8 @@ import org.apache.ambari.server.controller.internal.Stack;
 import org.apache.ambari.server.serveraction.kerberos.KerberosInvalidConfigurationException;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.ConfigHelper;
+import org.apache.ambari.server.state.StackId;
 import org.easymock.Capture;
 import org.easymock.CaptureType;
 import org.easymock.EasyMock;
@@ -103,6 +105,13 @@ public class ClusterConfigurationRequestTest {
   @Mock(type = MockType.NICE)
   private KerberosHelper kerberosHelper;
 
+  @Mock(type = MockType.NICE)
+  private ConfigHelper configHelper;
+
+  private final String STACK_NAME = "testStack";
+  private final String STACK_VERSION = "1";
+  private final Map<String, Map<String, String>> stackProperties = new HashMap<>();
+
   /**
    * testConfigType config type should be in updatedConfigTypes, as no custom property in Blueprint
    * ==> Kerberos config property should be updated
@@ -221,6 +230,8 @@ public class ClusterConfigurationRequestTest {
     expect(clusters.getCluster("testCluster")).andReturn(cluster).anyTimes();
 
     expect(blueprint.getStack()).andReturn(stack).anyTimes();
+    expect(stack.getName()).andReturn(STACK_NAME).anyTimes();
+    expect(stack.getVersion()).andReturn(STACK_VERSION).anyTimes();
     expect(stack.getServiceForConfigType("testConfigType")).andReturn("KERBEROS").anyTimes();
     expect(stack.getAllConfigurationTypes(anyString())).andReturn(Collections.singletonList("testConfigType")
     ).anyTimes();
@@ -246,6 +257,7 @@ public class ClusterConfigurationRequestTest {
     expect(blueprint.getComponents("KERBEROS")).andReturn(kerberosComponents).anyTimes();
     expect(blueprint.getComponents("ZOOKEPER")).andReturn(zookeeperComponents).anyTimes();
 
+    expect(topology.getAmbariContext()).andReturn(ambariContext).anyTimes();
     expect(topology.getConfigRecommendationStrategy()).andReturn(ConfigRecommendationStrategy.NEVER_APPLY).anyTimes();
     expect(topology.getBlueprint()).andReturn(blueprint).anyTimes();
     expect(blueprint.isValidConfigType("testConfigType")).andReturn(true).anyTimes();
@@ -256,10 +268,14 @@ public class ClusterConfigurationRequestTest {
     expect(topology.getHostGroupsForComponent(anyString())).andReturn(Collections.<String>emptyList())
       .anyTimes();
 
-      expect(ambariContext.getClusterName(Long.valueOf(1))).andReturn("testCluster").anyTimes();
+    expect(ambariContext.getConfigHelper()).andReturn(configHelper).anyTimes();
+    expect(ambariContext.getClusterName(Long.valueOf(1))).andReturn("testCluster").anyTimes();
     expect(ambariContext.createConfigurationRequests(EasyMock.<Map<String, Object>>anyObject())).andReturn(Collections
       .<ConfigurationRequest>emptyList()).anyTimes();
 
+    expect(configHelper.getDefaultStackProperties(
+        EasyMock.eq(new StackId(STACK_NAME, STACK_VERSION)))).andReturn(stackProperties).anyTimes();
+
     if (kerberosConfig == null) {
       kerberosConfig = new HashMap<>();
       Map<String, String> properties = new HashMap<>();
@@ -277,15 +293,14 @@ public class ClusterConfigurationRequestTest {
       (captureUpdatedConfigTypes));
     expectLastCall();
 
-    PowerMock.replay(stack, blueprint, topology, controller, clusters, kerberosHelper, ambariContext,
-      AmbariContext
-        .class);
+    PowerMock.replay(stack, blueprint, topology, controller, clusters, kerberosHelper,
+        ambariContext, AmbariContext.class, configHelper);
 
     ClusterConfigurationRequest clusterConfigurationRequest = new ClusterConfigurationRequest(
       ambariContext, topology, false, stackAdvisorBlueprintProcessor, true);
     clusterConfigurationRequest.process();
 
-    verify(blueprint, topology, ambariContext, controller, kerberosHelper);
+    verify(blueprint, topology, ambariContext, controller, kerberosHelper, configHelper);
 
 
     String clusterName = captureClusterName.getValue();
@@ -308,8 +323,9 @@ public class ClusterConfigurationRequestTest {
     expect(clusters.getCluster("testCluster")).andReturn(cluster).anyTimes();
 
     expect(blueprint.getStack()).andReturn(stack).anyTimes();
-    expect(stack.getAllConfigurationTypes(anyString())).andReturn(Collections.singletonList("testConfigType")
-    ).anyTimes();
+    expect(stack.getName()).andReturn(STACK_NAME).anyTimes();
+    expect(stack.getVersion()).andReturn(STACK_VERSION).anyTimes();
+    expect(stack.getAllConfigurationTypes(anyString())).andReturn(Collections.<String>singletonList("testConfigType")).anyTimes();
     expect(stack.getExcludedConfigurationTypes(anyString())).andReturn(Collections.<String>emptySet()).anyTimes();
     expect(stack.getConfigurationPropertiesWithMetadata(anyString(), anyString())).andReturn(Collections.<String,
       Stack.ConfigProperty>emptyMap()).anyTimes();
@@ -331,25 +347,29 @@ public class ClusterConfigurationRequestTest {
     expect(blueprint.getComponents("KERBEROS")).andReturn(kerberosComponents).anyTimes();
     expect(blueprint.getComponents("ZOOKEPER")).andReturn(zookeeperComponents).anyTimes();
 
+    expect(topology.getAmbariContext()).andReturn(ambariContext).anyTimes();
     expect(topology.getConfigRecommendationStrategy()).andReturn(ConfigRecommendationStrategy.NEVER_APPLY).anyTimes();
     expect(topology.getBlueprint()).andReturn(blueprint).anyTimes();
     expect(topology.getConfiguration()).andReturn(stackConfig).anyTimes();
     expect(topology.getHostGroupInfo()).andReturn(Collections.<String, HostGroupInfo>emptyMap()).anyTimes();
     expect(topology.getClusterId()).andReturn(Long.valueOf(1)).anyTimes();
+
+    expect(ambariContext.getConfigHelper()).andReturn(configHelper).anyTimes();
     expect(ambariContext.getClusterName(Long.valueOf(1))).andReturn("testCluster").anyTimes();
     expect(ambariContext.createConfigurationRequests(EasyMock.<Map<String, Object>>anyObject())).andReturn(Collections
       .<ConfigurationRequest>emptyList()).anyTimes();
 
+    expect(configHelper.getDefaultStackProperties(
+        EasyMock.eq(new StackId(STACK_NAME, STACK_VERSION)))).andReturn(stackProperties).anyTimes();
 
     PowerMock.replay(stack, blueprint, topology, controller, clusters, ambariContext,
-      AmbariContext
-        .class);
+        AmbariContext.class, configHelper);
 
     ClusterConfigurationRequest clusterConfigurationRequest = new ClusterConfigurationRequest(
       ambariContext, topology, false, stackAdvisorBlueprintProcessor);
     clusterConfigurationRequest.process();
 
-    verify(blueprint, topology, ambariContext, controller);
+    verify(blueprint, topology, ambariContext, controller, configHelper);
 
   }
 
@@ -365,6 +385,7 @@ public class ClusterConfigurationRequestTest {
     hg1.setConfiguration(createConfigurationsForHostGroup());
     hostGroupInfoMap.put("hg1", hg1);
 
+    expect(topology.getAmbariContext()).andReturn(ambariContext).anyTimes();
     expect(topology.getConfiguration()).andReturn(configuration).anyTimes();
     expect(topology.getBlueprint()).andReturn(blueprint).anyTimes();
     expect(topology.getHostGroupInfo()).andReturn(hostGroupInfoMap);
@@ -377,7 +398,12 @@ public class ClusterConfigurationRequestTest {
     expect(blueprint.isValidConfigType("cluster-env")).andReturn(true).anyTimes();
     expect(blueprint.isValidConfigType("global")).andReturn(true).anyTimes();
 
-    EasyMock.replay(stack, blueprint, topology);
+    expect(ambariContext.getConfigHelper()).andReturn(configHelper).anyTimes();
+
+    expect(configHelper.getDefaultStackProperties(
+        EasyMock.eq(new StackId(STACK_NAME, STACK_VERSION)))).andReturn(stackProperties).anyTimes();
+
+    EasyMock.replay(stack, blueprint, topology, ambariContext, configHelper);
     // WHEN
     new ClusterConfigurationRequest(ambariContext, topology, false, stackAdvisorBlueprintProcessor);
     // THEN
@@ -388,7 +414,7 @@ public class ClusterConfigurationRequestTest {
 
     assertFalse("SPARK service not present in topology host group config thus 'spark-env' config type should be removed from config.", hg1.getConfiguration().getFullAttributes().containsKey("spark-env"));
     assertTrue("HDFS service is present in topology host group config thus 'hdfs-site' config type should be left in the config.", hg1.getConfiguration().getFullAttributes().containsKey("hdfs-site"));
-    verify(stack, blueprint, topology);
+    verify(stack, blueprint, topology, ambariContext, configHelper);
   }
 
   @Test
@@ -409,6 +435,7 @@ public class ClusterConfigurationRequestTest {
     hg1.setConfiguration(createConfigurationsForHostGroup());
     hostGroupInfoMap.put("hg1", hg1);
 
+    expect(topology.getAmbariContext()).andReturn(ambariContext).anyTimes();
     expect(topology.getConfiguration()).andReturn(configuration).anyTimes();
     expect(topology.getBlueprint()).andReturn(blueprint).anyTimes();
     expect(topology.getHostGroupInfo()).andReturn(hostGroupInfoMap);
@@ -419,7 +446,12 @@ public class ClusterConfigurationRequestTest {
     expect(blueprint.isValidConfigType("cluster-env")).andReturn(true).anyTimes();
     expect(blueprint.isValidConfigType("global")).andReturn(true).anyTimes();
 
-    EasyMock.replay(stack, blueprint, topology);
+    expect(ambariContext.getConfigHelper()).andReturn(configHelper).anyTimes();
+
+    expect(configHelper.getDefaultStackProperties(
+        EasyMock.eq(new StackId(STACK_NAME, STACK_VERSION)))).andReturn(stackProperties).anyTimes();
+
+    EasyMock.replay(stack, blueprint, topology, ambariContext, configHelper);
 
     // When
 
@@ -431,7 +463,7 @@ public class ClusterConfigurationRequestTest {
 
     assertFalse("SPARK service not present in topology host group config thus 'spark-env' config type should be removed from config.", hg1.getConfiguration().getFullAttributes().containsKey("spark-env"));
     assertTrue("HDFS service is present in topology host group config thus 'hdfs-site' config type should be left in the config.", hg1.getConfiguration().getFullAttributes().containsKey("hdfs-site"));
-    verify(stack, blueprint, topology);
+    verify(stack, blueprint, topology, ambariContext, configHelper);
 
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/test/python/common-services/configs/hawq_default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/common-services/configs/hawq_default.json b/ambari-server/src/test/python/common-services/configs/hawq_default.json
index 79864a9..1b6fafb 100644
--- a/ambari-server/src/test/python/common-services/configs/hawq_default.json
+++ b/ambari-server/src/test/python/common-services/configs/hawq_default.json
@@ -73,7 +73,11 @@
         "cluster-env": {
             "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
-            "user_group": "hadoop"
+            "user_group": "hadoop",
+            "stack_name": "PHD",
+            "stack_root": "{\"PHD\": \"/usr/phd\"}",
+            "stack_tools": "{\n \"PHD\": { \"stack_selector\": [\"phd-select\", \"/usr/bin/phd-select\", \"phd-select\"],\n  \"conf_selector\": [\"conf-select\", \"/usr/bin/conf-select\", \"conf-select\"]\n}\n}",
+            "stack_features": "{\"PHD\":{\"stack_features\":[{\"name\":\"express_upgrade\",\"description\":\"Express upgrade support\",\"min_version\":\"3.0.0.0\"},{\"name\":\"rolling_upgrade\",\"description\":\"Rolling upgrade support\",\"min_version\":\"3.0.0.0\"},{\"name\":\"config_versioning\",\"description\":\"Configurable versions support\",\"min_version\":\"3.0.0.0\"}]\n}\n}"
         }
     },
     "clusterHostInfo": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/test/python/host_scripts/TestAlertDiskSpace.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/host_scripts/TestAlertDiskSpace.py b/ambari-server/src/test/python/host_scripts/TestAlertDiskSpace.py
index 0d47061..e6cce98 100644
--- a/ambari-server/src/test/python/host_scripts/TestAlertDiskSpace.py
+++ b/ambari-server/src/test/python/host_scripts/TestAlertDiskSpace.py
@@ -41,7 +41,11 @@ class TestAlertDiskSpace(RMFTestCase):
       total = 21673930752L, used = 5695861760L,
       free = 15978068992L, path="/")
 
-    res = alert_disk_space.execute(configurations={'{{cluster-env/stack_root}}': '/usr/hdp'})
+    configurations = {'{{cluster-env/stack_name}}': 'HDP',
+      '{{cluster-env/stack_root}}': '{"HDP":"/usr/hdp"}'}
+
+    res = alert_disk_space.execute(configurations=configurations)
+
     self.assertEqual(res,
       ('OK', ['Capacity Used: [26.28%, 5.7 GB], Capacity Total: [21.7 GB], path=/']))
 
@@ -50,7 +54,7 @@ class TestAlertDiskSpace(RMFTestCase):
       total = 21673930752L, used = 14521533603L,
       free = 7152397149L, path="/")
 
-    res = alert_disk_space.execute(configurations={'{{cluster-env/stack_root}}': '/usr/hdp'})
+    res = alert_disk_space.execute(configurations = configurations)
     self.assertEqual(res, (
       'WARNING',
       ['Capacity Used: [67.00%, 14.5 GB], Capacity Total: [21.7 GB], path=/']))
@@ -60,7 +64,7 @@ class TestAlertDiskSpace(RMFTestCase):
       total = 21673930752L, used = 20590234214L,
       free = 1083696538, path="/")
 
-    res = alert_disk_space.execute(configurations={'{{cluster-env/stack_root}}': '/usr/hdp'})
+    res = alert_disk_space.execute(configurations = configurations)
     self.assertEqual(res, ('CRITICAL',
     ['Capacity Used: [95.00%, 20.6 GB], Capacity Total: [21.7 GB], path=/']))
 
@@ -69,7 +73,7 @@ class TestAlertDiskSpace(RMFTestCase):
       total = 5418482688L, used = 1625544806L,
       free = 3792937882L, path="/")
 
-    res = alert_disk_space.execute(configurations={'{{cluster-env/stack_root}}': '/usr/hdp'})
+    res = alert_disk_space.execute(configurations = configurations)
     self.assertEqual(res, ('WARNING', [
       'Capacity Used: [30.00%, 1.6 GB], Capacity Total: [5.4 GB], path=/. Total free space is less than 5.0 GB']))
 
@@ -81,7 +85,7 @@ class TestAlertDiskSpace(RMFTestCase):
       total = 21673930752L, used = 5695861760L,
       free = 15978068992L, path="/usr/hdp")
 
-    res = alert_disk_space.execute(configurations={'{{cluster-env/stack_root}}': '/usr/hdp'})
+    res = alert_disk_space.execute(configurations = configurations)
     self.assertEqual(res,
       ('OK', ['Capacity Used: [26.28%, 5.7 GB], Capacity Total: [21.7 GB], path=/usr/hdp']))
 
@@ -90,6 +94,6 @@ class TestAlertDiskSpace(RMFTestCase):
       total = 5418482688L, used = 1625544806L,
       free = 3792937882L, path="/usr/hdp")
 
-    res = alert_disk_space.execute(configurations={'{{cluster-env/stack_root}}': '/usr/hdp'})
+    res = alert_disk_space.execute(configurations = configurations)
     self.assertEqual(res, (
       'WARNING', ["Capacity Used: [30.00%, 1.6 GB], Capacity Total: [5.4 GB], path=/usr/hdp. Total free space is less than 5.0 GB"]))


[12/36] ambari git commit: AMBARI-21430 - Allow Multiple Versions of Stack Tools to Co-Exist (jonathanhurley)

Posted by lp...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-secured.json b/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-secured.json
index 4e7d857..bcadd03 100644
--- a/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-secured.json
+++ b/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-secured.json
@@ -1,873 +1,873 @@
 {
     "localComponents": [
-        "SECONDARY_NAMENODE", 
-        "HDFS_CLIENT", 
-        "DATANODE", 
-        "NAMENODE", 
-        "RANGER_ADMIN", 
-        "RANGER_TAGSYNC", 
-        "RANGER_USERSYNC", 
-        "ZOOKEEPER_SERVER", 
-        "ZOOKEEPER_CLIENT", 
-        "KERBEROS_CLIENT", 
+        "SECONDARY_NAMENODE",
+        "HDFS_CLIENT",
+        "DATANODE",
+        "NAMENODE",
+        "RANGER_ADMIN",
+        "RANGER_TAGSYNC",
+        "RANGER_USERSYNC",
+        "ZOOKEEPER_SERVER",
+        "ZOOKEEPER_CLIENT",
+        "KERBEROS_CLIENT",
         "RANGER_KMS_SERVER"
-    ], 
+    ],
     "configuration_attributes": {
-        "ranger-kms-site": {}, 
-        "ranger-hdfs-audit": {}, 
-        "ssl-client": {}, 
-        "ranger-admin-site": {}, 
-        "kms-log4j": {}, 
-        "ranger-hdfs-policymgr-ssl": {}, 
-        "tagsync-application-properties": {}, 
-        "ranger-env": {}, 
-        "ranger-ugsync-site": {}, 
-        "ranger-hdfs-plugin-properties": {}, 
-        "ranger-kms-security": {}, 
-        "kerberos-env": {}, 
-        "kms-properties": {}, 
-        "admin-properties": {}, 
-        "ranger-kms-policymgr-ssl": {}, 
+        "ranger-kms-site": {},
+        "ranger-hdfs-audit": {},
+        "ssl-client": {},
+        "ranger-admin-site": {},
+        "kms-log4j": {},
+        "ranger-hdfs-policymgr-ssl": {},
+        "tagsync-application-properties": {},
+        "ranger-env": {},
+        "ranger-ugsync-site": {},
+        "ranger-hdfs-plugin-properties": {},
+        "ranger-kms-security": {},
+        "kerberos-env": {},
+        "kms-properties": {},
+        "admin-properties": {},
+        "ranger-kms-policymgr-ssl": {},
         "hdfs-site": {
             "final": {
-                "dfs.datanode.data.dir": "true", 
-                "dfs.namenode.http-address": "true", 
-                "dfs.datanode.failed.volumes.tolerated": "true", 
-                "dfs.support.append": "true", 
-                "dfs.namenode.name.dir": "true", 
+                "dfs.datanode.data.dir": "true",
+                "dfs.namenode.http-address": "true",
+                "dfs.datanode.failed.volumes.tolerated": "true",
+                "dfs.support.append": "true",
+                "dfs.namenode.name.dir": "true",
                 "dfs.webhdfs.enabled": "true"
             }
-        }, 
-        "ranger-tagsync-site": {}, 
-        "tagsync-log4j": {}, 
-        "ranger-kms-audit": {}, 
-        "hadoop-policy": {}, 
-        "hdfs-log4j": {}, 
-        "usersync-log4j": {}, 
-        "krb5-conf": {}, 
-        "kms-site": {}, 
+        },
+        "ranger-tagsync-site": {},
+        "tagsync-log4j": {},
+        "ranger-kms-audit": {},
+        "hadoop-policy": {},
+        "hdfs-log4j": {},
+        "usersync-log4j": {},
+        "krb5-conf": {},
+        "kms-site": {},
         "core-site": {
             "final": {
                 "fs.defaultFS": "true"
             }
-        }, 
-        "hadoop-env": {}, 
-        "zookeeper-log4j": {}, 
-        "ssl-server": {}, 
-        "ranger-site": {}, 
-        "zookeeper-env": {}, 
-        "admin-log4j": {}, 
-        "zoo.cfg": {}, 
-        "ranger-hdfs-security": {}, 
-        "usersync-properties": {}, 
-        "kms-env": {}, 
-        "dbks-site": {}, 
+        },
+        "hadoop-env": {},
+        "zookeeper-log4j": {},
+        "ssl-server": {},
+        "ranger-site": {},
+        "zookeeper-env": {},
+        "admin-log4j": {},
+        "zoo.cfg": {},
+        "ranger-hdfs-security": {},
+        "usersync-properties": {},
+        "kms-env": {},
+        "dbks-site": {},
         "cluster-env": {}
-    }, 
-    "public_hostname": "c6401.ambari.apache.org", 
-    "commandId": "43-0", 
-    "hostname": "c6401.ambari.apache.org", 
-    "kerberosCommandParams": [], 
-    "serviceName": "RANGER_KMS", 
-    "role": "RANGER_KMS_SERVER", 
-    "forceRefreshConfigTagsBeforeExecution": [], 
-    "requestId": 43, 
+    },
+    "public_hostname": "c6401.ambari.apache.org",
+    "commandId": "43-0",
+    "hostname": "c6401.ambari.apache.org",
+    "kerberosCommandParams": [],
+    "serviceName": "RANGER_KMS",
+    "role": "RANGER_KMS_SERVER",
+    "forceRefreshConfigTagsBeforeExecution": [],
+    "requestId": 43,
     "agentConfigParams": {
         "agent": {
             "parallel_execution": 0
         }
-    }, 
-    "clusterName": "c1", 
-    "commandType": "EXECUTION_COMMAND", 
-    "taskId": 200, 
-    "roleParams": {}, 
+    },
+    "clusterName": "c1",
+    "commandType": "EXECUTION_COMMAND",
+    "taskId": 200,
+    "roleParams": {},
     "configurationTags": {
         "ranger-kms-site": {
             "tag": "version1467026737262"
-        }, 
+        },
         "ranger-hdfs-audit": {
             "tag": "version1466705299922"
-        }, 
+        },
         "ssl-client": {
             "tag": "version1"
-        }, 
+        },
         "ranger-admin-site": {
             "tag": "version1467016680635"
-        }, 
+        },
         "kms-log4j": {
             "tag": "version1467026737262"
-        }, 
+        },
         "ranger-hdfs-policymgr-ssl": {
             "tag": "version1466705299922"
-        }, 
+        },
         "tagsync-application-properties": {
             "tag": "version1467016680511"
-        }, 
+        },
         "ranger-env": {
             "tag": "version1466705299949"
-        }, 
+        },
         "ranger-ugsync-site": {
             "tag": "version1467016680537"
-        }, 
+        },
         "ranger-hdfs-plugin-properties": {
             "tag": "version1466705299922"
-        }, 
+        },
         "ranger-kms-security": {
             "tag": "version1467026737262"
-        }, 
+        },
         "kerberos-env": {
             "tag": "version1467016537243"
-        }, 
+        },
         "admin-log4j": {
             "tag": "version1466705299949"
-        }, 
+        },
         "admin-properties": {
             "tag": "version1466705299949"
-        }, 
+        },
         "ranger-kms-policymgr-ssl": {
             "tag": "version1467026737262"
-        }, 
+        },
         "hdfs-site": {
             "tag": "version1467016680401"
-        }, 
+        },
         "ranger-tagsync-site": {
             "tag": "version1467016680586"
-        }, 
+        },
         "zoo.cfg": {
             "tag": "version1"
-        }, 
+        },
         "ranger-kms-audit": {
             "tag": "version1467026737262"
-        }, 
+        },
         "hadoop-policy": {
             "tag": "version1"
-        }, 
+        },
         "hdfs-log4j": {
             "tag": "version1"
-        }, 
+        },
         "usersync-log4j": {
             "tag": "version1466705299949"
-        }, 
+        },
         "krb5-conf": {
             "tag": "version1467016537243"
-        }, 
+        },
         "kms-site": {
             "tag": "version1467026751210"
-        }, 
+        },
         "core-site": {
             "tag": "version1467026751256"
-        }, 
+        },
         "hadoop-env": {
             "tag": "version1467016680446"
-        }, 
+        },
         "zookeeper-log4j": {
             "tag": "version1"
-        }, 
+        },
         "ssl-server": {
             "tag": "version1"
-        }, 
+        },
         "ranger-site": {
             "tag": "version1466705299949"
-        }, 
+        },
         "zookeeper-env": {
             "tag": "version1467016680492"
-        }, 
+        },
         "kms-properties": {
             "tag": "version1467026737262"
-        }, 
+        },
         "tagsync-log4j": {
             "tag": "version1466705299949"
-        }, 
+        },
         "ranger-hdfs-security": {
             "tag": "version1466705299922"
-        }, 
+        },
         "usersync-properties": {
             "tag": "version1466705299949"
-        }, 
+        },
         "kms-env": {
             "tag": "version1467026737262"
-        }, 
+        },
         "dbks-site": {
             "tag": "version1467026751234"
-        }, 
+        },
         "cluster-env": {
             "tag": "version1467016680567"
         }
-    }, 
-    "roleCommand": "START", 
+    },
+    "roleCommand": "START",
     "hostLevelParams": {
-        "agent_stack_retry_on_unavailability": "false", 
-        "stack_name": "HDP", 
-        "package_version": "2_5_0_0_*", 
+        "agent_stack_retry_on_unavailability": "false",
+        "stack_name": "HDP",
+        "package_version": "2_5_0_0_*",
         "custom_mysql_jdbc_name": "mysql-connector-java.jar",
         "previous_custom_mysql_jdbc_name": "mysql-connector-java-old.jar",
-        "host_sys_prepped": "false", 
-        "ambari_db_rca_username": "mapred", 
-        "current_version": "2.5.0.0-801", 
-        "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar", 
-        "agent_stack_retry_count": "5", 
-        "stack_version": "2.5", 
-        "jdk_name": "jdk-8u60-linux-x64.tar.gz", 
-        "ambari_db_rca_driver": "org.postgresql.Driver", 
-        "java_home": "/usr/jdk64/jdk1.7.0_45", 
-        "repository_version_id": "1", 
-        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
-        "not_managed_hdfs_path_list": "[\"/tmp\"]", 
-        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
-        "java_version": "8", 
-        "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.5\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]", 
-        "package_list": "[{\"name\":\"ranger_${stack_version}-kms\",\"condition\":\"\",\"skipUpgrade\":false}]", 
-        "db_name": "ambari", 
-        "group_list": "[\"kms\",\"ranger\",\"hadoop\",\"users\"]", 
-        "agentCacheDir": "/var/lib/ambari-agent/cache", 
-        "ambari_db_rca_password": "mapred", 
-        "jce_name": "UnlimitedJCEPolicyJDK7.zip", 
-        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar", 
-        "db_driver_filename": "mysql-connector-java.jar", 
-        "user_list": "[\"kms\",\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]", 
+        "host_sys_prepped": "false",
+        "ambari_db_rca_username": "mapred",
+        "current_version": "2.5.0.0-801",
+        "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar",
+        "agent_stack_retry_count": "5",
+        "stack_version": "2.5",
+        "jdk_name": "jdk-8u60-linux-x64.tar.gz",
+        "ambari_db_rca_driver": "org.postgresql.Driver",
+        "java_home": "/usr/jdk64/jdk1.7.0_45",
+        "repository_version_id": "1",
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
+        "not_managed_hdfs_path_list": "[\"/tmp\"]",
+        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
+        "java_version": "8",
+        "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.5\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]",
+        "package_list": "[{\"name\":\"ranger_${stack_version}-kms\",\"condition\":\"\",\"skipUpgrade\":false}]",
+        "db_name": "ambari",
+        "group_list": "[\"kms\",\"ranger\",\"hadoop\",\"users\"]",
+        "agentCacheDir": "/var/lib/ambari-agent/cache",
+        "ambari_db_rca_password": "mapred",
+        "jce_name": "UnlimitedJCEPolicyJDK7.zip",
+        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
+        "db_driver_filename": "mysql-connector-java.jar",
+        "user_list": "[\"kms\",\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]",
         "clientsToUpdateConfigs": "[\"*\"]"
-    }, 
+    },
     "commandParams": {
-        "service_package_folder": "common-services/RANGER_KMS/0.5.0.2.3/package", 
-        "script": "scripts/kms_server.py", 
-        "hooks_folder": "HDP/2.0.6/hooks", 
-        "version": "2.5.0.0-801", 
-        "max_duration_for_retries": "0", 
-        "command_retry_enabled": "false", 
-        "command_timeout": "600", 
+        "service_package_folder": "common-services/RANGER_KMS/0.5.0.2.3/package",
+        "script": "scripts/kms_server.py",
+        "hooks_folder": "HDP/2.0.6/hooks",
+        "version": "2.5.0.0-801",
+        "max_duration_for_retries": "0",
+        "command_retry_enabled": "false",
+        "command_timeout": "600",
         "script_type": "PYTHON"
-    }, 
-    "forceRefreshConfigTags": [], 
-    "stageId": 0, 
+    },
+    "forceRefreshConfigTags": [],
+    "stageId": 0,
     "clusterHostInfo": {
         "snamenode_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ambari_server_use_ssl": [
             "false"
-        ], 
+        ],
         "all_ping_ports": [
             "8670"
-        ], 
+        ],
         "ranger_tagsync_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ranger_kms_server_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ranger_usersync_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "all_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "slave_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "namenode_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ambari_server_port": [
             "8080"
-        ], 
+        ],
         "ranger_admin_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "all_racks": [
             "/default-rack"
-        ], 
+        ],
         "all_ipv4_ips": [
             "172.22.83.73"
-        ], 
+        ],
         "ambari_server_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "zookeeper_hosts": [
             "c6401.ambari.apache.org"
         ]
-    }, 
+    },
     "configurations": {
         "ranger-kms-site": {
-            "ranger.service.https.port": "9393", 
-            "ranger.service.https.attrib.ssl.enabled": "false", 
-            "xa.webapp.dir": "./webapp", 
-            "ranger.service.host": "{{kms_host}}", 
-            "ranger.service.shutdown.port": "7085", 
-            "ranger.contextName": "/kms", 
+            "ranger.service.https.port": "9393",
+            "ranger.service.https.attrib.ssl.enabled": "false",
+            "xa.webapp.dir": "./webapp",
+            "ranger.service.host": "{{kms_host}}",
+            "ranger.service.shutdown.port": "7085",
+            "ranger.contextName": "/kms",
             "ranger.service.http.port": "{{kms_port}}"
-        }, 
+        },
         "ranger-hdfs-audit": {
             "xasecure.audit.destination.solr.zookeepers": "NONE",
-            "xasecure.audit.destination.solr.urls": "", 
-            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool", 
+            "xasecure.audit.destination.solr.urls": "",
+            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool",
             "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool",
-            "xasecure.audit.destination.hdfs": "true", 
-            "xasecure.audit.destination.solr": "false", 
-            "xasecure.audit.provider.summary.enabled": "false", 
-            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit", 
+            "xasecure.audit.destination.hdfs": "true",
+            "xasecure.audit.destination.solr": "false",
+            "xasecure.audit.provider.summary.enabled": "false",
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
             "xasecure.audit.is.enabled": "true"
-        }, 
+        },
         "ssl-client": {
-            "ssl.client.truststore.reload.interval": "10000", 
-            "ssl.client.keystore.password": "bigdata", 
-            "ssl.client.truststore.type": "jks", 
-            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks", 
-            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks", 
-            "ssl.client.truststore.password": "bigdata", 
+            "ssl.client.truststore.reload.interval": "10000",
+            "ssl.client.keystore.password": "bigdata",
+            "ssl.client.truststore.type": "jks",
+            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks",
+            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks",
+            "ssl.client.truststore.password": "bigdata",
             "ssl.client.keystore.type": "jks"
-        }, 
+        },
         "ranger-admin-site": {
-            "ranger.admin.kerberos.cookie.domain": "{{ranger_host}}", 
-            "ranger.kms.service.user.hdfs": "hdfs", 
-            "ranger.spnego.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}", 
-            "ranger.plugins.hive.serviceuser": "hive", 
-            "ranger.lookup.kerberos.keytab": "/etc/security/keytabs/rangerlookup.service.keytab", 
-            "ranger.plugins.kms.serviceuser": "kms", 
-            "ranger.service.https.attrib.ssl.enabled": "false", 
-            "ranger.sso.browser.useragent": "Mozilla,chrome", 
-            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01", 
-            "ranger.plugins.hbase.serviceuser": "hbase", 
+            "ranger.admin.kerberos.cookie.domain": "{{ranger_host}}",
+            "ranger.kms.service.user.hdfs": "hdfs",
+            "ranger.spnego.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
+            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}",
+            "ranger.plugins.hive.serviceuser": "hive",
+            "ranger.lookup.kerberos.keytab": "/etc/security/keytabs/rangerlookup.service.keytab",
+            "ranger.plugins.kms.serviceuser": "kms",
+            "ranger.service.https.attrib.ssl.enabled": "false",
+            "ranger.sso.browser.useragent": "Mozilla,chrome",
+            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01",
+            "ranger.plugins.hbase.serviceuser": "hbase",
             "ranger.plugins.hdfs.serviceuser": "hdfs",
-            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}", 
-            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net", 
-            "ranger.plugins.knox.serviceuser": "knox", 
-            "ranger.ldap.base.dn": "dc=example,dc=com", 
-            "ranger.sso.publicKey": "", 
-            "ranger.admin.kerberos.cookie.path": "/", 
+            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}",
+            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net",
+            "ranger.plugins.knox.serviceuser": "knox",
+            "ranger.ldap.base.dn": "dc=example,dc=com",
+            "ranger.sso.publicKey": "",
+            "ranger.admin.kerberos.cookie.path": "/",
             "ranger.service.https.attrib.clientAuth": "want",
-            "ranger.jpa.jdbc.user": "{{ranger_db_user}}", 
-            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})", 
-            "ranger.ldap.group.roleattribute": "cn", 
-            "ranger.plugins.kafka.serviceuser": "kafka", 
-            "ranger.admin.kerberos.principal": "rangeradmin/_HOST@EXAMPLE.COM", 
-            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
+            "ranger.jpa.jdbc.user": "{{ranger_db_user}}",
+            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})",
+            "ranger.ldap.group.roleattribute": "cn",
+            "ranger.plugins.kafka.serviceuser": "kafka",
+            "ranger.admin.kerberos.principal": "rangeradmin/_HOST@EXAMPLE.COM",
+            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
             "ranger.credential.provider.path": "/etc/ranger/admin/rangeradmin.jceks",
-            "ranger.ldap.referral": "ignore", 
-            "ranger.service.http.port": "6080", 
-            "ranger.ldap.user.searchfilter": "(uid={0})", 
-            "ranger.plugins.atlas.serviceuser": "atlas", 
-            "ranger.truststore.password": "changeit", 
-            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
-            "ranger.audit.solr.password": "NONE", 
-            "ranger.audit.solr.zookeepers": "NONE", 
-            "ranger.lookup.kerberos.principal": "rangerlookup/_HOST@EXAMPLE.COM", 
-            "ranger.service.https.port": "6182", 
-            "ranger.plugins.storm.serviceuser": "storm", 
-            "ranger.externalurl": "{{ranger_external_url}}", 
-            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
-            "ranger.kms.service.user.hive": "", 
-            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
-            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}", 
-            "ranger.service.host": "{{ranger_host}}", 
+            "ranger.ldap.referral": "ignore",
+            "ranger.service.http.port": "6080",
+            "ranger.ldap.user.searchfilter": "(uid={0})",
+            "ranger.plugins.atlas.serviceuser": "atlas",
+            "ranger.truststore.password": "changeit",
+            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+            "ranger.audit.solr.password": "NONE",
+            "ranger.audit.solr.zookeepers": "NONE",
+            "ranger.lookup.kerberos.principal": "rangerlookup/_HOST@EXAMPLE.COM",
+            "ranger.service.https.port": "6182",
+            "ranger.plugins.storm.serviceuser": "storm",
+            "ranger.externalurl": "{{ranger_external_url}}",
+            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+            "ranger.kms.service.user.hive": "",
+            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}",
+            "ranger.service.host": "{{ranger_host}}",
             "ranger.service.https.attrib.keystore.keyalias": "rangeradmin",
-            "ranger.service.https.attrib.keystore.pass": "xasecure", 
-            "ranger.unixauth.remote.login.enabled": "true", 
-            "ranger.jpa.jdbc.credential.alias": "rangeradmin", 
-            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
-            "ranger.audit.solr.username": "ranger_solr", 
-            "ranger.sso.enabled": "false", 
-            "ranger.audit.solr.urls": "", 
-            "ranger.ldap.ad.domain": "", 
-            "ranger.plugins.yarn.serviceuser": "yarn", 
-            "ranger.audit.source.type": "solr", 
-            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
-            "ranger.ldap.url": "{{ranger_ug_ldap_url}}", 
-            "ranger.authentication.method": "UNIX", 
-            "ranger.service.http.enabled": "true", 
-            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}", 
-            "ranger.ldap.ad.referral": "ignore", 
-            "ranger.ldap.ad.base.dn": "dc=example,dc=com", 
-            "ranger.jpa.jdbc.password": "_", 
-            "ranger.spnego.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
-            "ranger.sso.providerurl": "", 
-            "ranger.unixauth.service.hostname": "{{ugsync_host}}", 
-            "ranger.admin.kerberos.keytab": "/etc/security/keytabs/rangeradmin.service.keytab", 
-            "ranger.admin.kerberos.token.valid.seconds": "30", 
-            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver", 
+            "ranger.service.https.attrib.keystore.pass": "xasecure",
+            "ranger.unixauth.remote.login.enabled": "true",
+            "ranger.jpa.jdbc.credential.alias": "rangeradmin",
+            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+            "ranger.audit.solr.username": "ranger_solr",
+            "ranger.sso.enabled": "false",
+            "ranger.audit.solr.urls": "",
+            "ranger.ldap.ad.domain": "",
+            "ranger.plugins.yarn.serviceuser": "yarn",
+            "ranger.audit.source.type": "solr",
+            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
+            "ranger.ldap.url": "{{ranger_ug_ldap_url}}",
+            "ranger.authentication.method": "UNIX",
+            "ranger.service.http.enabled": "true",
+            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}",
+            "ranger.ldap.ad.referral": "ignore",
+            "ranger.ldap.ad.base.dn": "dc=example,dc=com",
+            "ranger.jpa.jdbc.password": "_",
+            "ranger.spnego.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+            "ranger.sso.providerurl": "",
+            "ranger.unixauth.service.hostname": "{{ugsync_host}}",
+            "ranger.admin.kerberos.keytab": "/etc/security/keytabs/rangeradmin.service.keytab",
+            "ranger.admin.kerberos.token.valid.seconds": "30",
+            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver",
             "ranger.unixauth.service.port": "5151"
-        }, 
+        },
         "kms-log4j": {
             "content": "\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#    http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License. See accompanying LICENSE file.\n#\n\n# If the Java System property 'kms.log.dir' is not defined at KMS start up time\n# Setup sets its value to '${kms.home}/logs'\n\nlog4j.appender.kms=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.kms.DatePattern='.'yyyy-MM-dd\nlog4j.appender.kms.File=${kms.log.dir}/kms.log\nlog4j.appender.kms.Append=true\nlog4j.appender.kms.layout=org.apache.log4j.PatternLayout\
 nlog4j.appender.kms.layout.ConversionPattern=%d{ISO8601} %-5p %c{1} - %m%n\n\nlog4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.kms-audit.DatePattern='.'yyyy-MM-dd\nlog4j.appender.kms-audit.File=${kms.log.dir}/kms-audit.log\nlog4j.appender.kms-audit.Append=true\nlog4j.appender.kms-audit.layout=org.apache.log4j.PatternLayout\nlog4j.appender.kms-audit.layout.ConversionPattern=%d{ISO8601} %m%n\n\nlog4j.logger.kms-audit=INFO, kms-audit\nlog4j.additivity.kms-audit=false\n\nlog4j.rootLogger=ALL, kms\nlog4j.logger.org.apache.hadoop.conf=ERROR\nlog4j.logger.org.apache.hadoop=INFO\nlog4j.logger.com.sun.jersey.server.wadl.generators.WadlGeneratorJAXBGrammarGenerator=OFF"
-        }, 
+        },
         "ranger-hdfs-policymgr-ssl": {
-            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks", 
-            "xasecure.policymgr.clientssl.truststore.password": "changeit", 
-            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}", 
-            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks", 
-            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}", 
+            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks",
+            "xasecure.policymgr.clientssl.truststore.password": "changeit",
+            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}",
+            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks",
+            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}",
             "xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
-        }, 
+        },
         "tagsync-application-properties": {
-            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181", 
-            "atlas.kafka.security.protocol": "SASL_PLAINTEXT", 
-            "atlas.jaas.KafkaClient.option.principal": "{{tagsync_jaas_principal}}", 
-            "atlas.jaas.KafkaClient.option.keyTab": "{{tagsync_keytab_path}}", 
-            "atlas.kafka.entities.group.id": "ranger_entities_consumer", 
-            "atlas.jaas.KafkaClient.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule", 
-            "atlas.jaas.KafkaClient.option.serviceName": "kafka", 
-            "atlas.kafka.bootstrap.servers": "localhost:6667", 
-            "atlas.jaas.KafkaClient.option.useKeyTab": "true", 
-            "atlas.jaas.KafkaClient.option.storeKey": "true", 
-            "atlas.jaas.KafkaClient.loginModuleControlFlag": "required", 
+            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181",
+            "atlas.kafka.security.protocol": "SASL_PLAINTEXT",
+            "atlas.jaas.KafkaClient.option.principal": "{{tagsync_jaas_principal}}",
+            "atlas.jaas.KafkaClient.option.keyTab": "{{tagsync_keytab_path}}",
+            "atlas.kafka.entities.group.id": "ranger_entities_consumer",
+            "atlas.jaas.KafkaClient.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "atlas.jaas.KafkaClient.option.serviceName": "kafka",
+            "atlas.kafka.bootstrap.servers": "localhost:6667",
+            "atlas.jaas.KafkaClient.option.useKeyTab": "true",
+            "atlas.jaas.KafkaClient.option.storeKey": "true",
+            "atlas.jaas.KafkaClient.loginModuleControlFlag": "required",
             "atlas.kafka.sasl.kerberos.service.name": "kafka"
-        }, 
+        },
         "ranger-env": {
-            "ranger_solr_shards": "1", 
-            "ranger_solr_config_set": "ranger_audits", 
-            "ranger_user": "ranger", 
-            "xml_configurations_supported": "true", 
-            "ranger-atlas-plugin-enabled": "No", 
-            "ranger-hbase-plugin-enabled": "No", 
-            "ranger-yarn-plugin-enabled": "No", 
-            "bind_anonymous": "false", 
-            "ranger_admin_username": "amb_ranger_admin", 
-            "admin_password": "admin", 
-            "is_solrCloud_enabled": "false", 
-            "ranger-storm-plugin-enabled": "No", 
-            "ranger-hdfs-plugin-enabled": "No", 
-            "ranger_group": "ranger", 
-            "ranger-knox-plugin-enabled": "No", 
-            "ranger_admin_log_dir": "/var/log/ranger/admin", 
-            "ranger-kafka-plugin-enabled": "No", 
-            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306", 
-            "ranger-hive-plugin-enabled": "No", 
-            "xasecure.audit.destination.solr": "false", 
-            "ranger_pid_dir": "/var/run/ranger", 
-            "xasecure.audit.destination.hdfs": "true", 
-            "admin_username": "admin", 
-            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit", 
-            "create_db_dbuser": "true", 
-            "ranger_solr_collection_name": "ranger_audits", 
-            "ranger_admin_password": "P1!q9xa96SMi5NCl", 
+            "ranger_solr_shards": "1",
+            "ranger_solr_config_set": "ranger_audits",
+            "ranger_user": "ranger",
+            "xml_configurations_supported": "true",
+            "ranger-atlas-plugin-enabled": "No",
+            "ranger-hbase-plugin-enabled": "No",
+            "ranger-yarn-plugin-enabled": "No",
+            "bind_anonymous": "false",
+            "ranger_admin_username": "amb_ranger_admin",
+            "admin_password": "admin",
+            "is_solrCloud_enabled": "false",
+            "ranger-storm-plugin-enabled": "No",
+            "ranger-hdfs-plugin-enabled": "No",
+            "ranger_group": "ranger",
+            "ranger-knox-plugin-enabled": "No",
+            "ranger_admin_log_dir": "/var/log/ranger/admin",
+            "ranger-kafka-plugin-enabled": "No",
+            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306",
+            "ranger-hive-plugin-enabled": "No",
+            "xasecure.audit.destination.solr": "false",
+            "ranger_pid_dir": "/var/run/ranger",
+            "xasecure.audit.destination.hdfs": "true",
+            "admin_username": "admin",
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
+            "create_db_dbuser": "true",
+            "ranger_solr_collection_name": "ranger_audits",
+            "ranger_admin_password": "P1!q9xa96SMi5NCl",
             "ranger_usersync_log_dir": "/var/log/ranger/usersync"
-        }, 
+        },
         "ranger-ugsync-site": {
-            "ranger.usersync.ldap.binddn": "", 
-            "ranger.usersync.policymgr.username": "rangerusersync", 
-            "ranger.usersync.policymanager.mockrun": "false", 
-            "ranger.usersync.group.searchbase": "", 
-            "ranger.usersync.ldap.bindalias": "testldapalias", 
-            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks", 
-            "ranger.usersync.port": "5151", 
-            "ranger.usersync.pagedresultssize": "500", 
-            "ranger.usersync.group.memberattributename": "", 
-            "ranger.usersync.kerberos.principal": "rangerusersync/_HOST@EXAMPLE.COM", 
-            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder", 
-            "ranger.usersync.ldap.referral": "ignore", 
-            "ranger.usersync.group.searchfilter": "", 
-            "ranger.usersync.ldap.user.objectclass": "person", 
-            "ranger.usersync.logdir": "{{usersync_log_dir}}", 
-            "ranger.usersync.ldap.user.searchfilter": "", 
-            "ranger.usersync.ldap.groupname.caseconversion": "none", 
-            "ranger.usersync.ldap.ldapbindpassword": "", 
-            "ranger.usersync.unix.minUserId": "500", 
-            "ranger.usersync.policymanager.maxrecordsperapicall": "1000", 
-            "ranger.usersync.group.nameattribute": "", 
-            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password", 
-            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks", 
-            "ranger.usersync.user.searchenabled": "false", 
-            "ranger.usersync.group.usermapsyncenabled": "true", 
-            "ranger.usersync.ldap.bindkeystore": "", 
-            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof", 
-            "ranger.usersync.kerberos.keytab": "/etc/security/keytabs/rangerusersync.service.keytab", 
-            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe", 
-            "ranger.usersync.group.objectclass": "", 
-            "ranger.usersync.ldap.user.searchscope": "sub", 
-            "ranger.usersync.unix.password.file": "/etc/passwd", 
-            "ranger.usersync.ldap.user.nameattribute": "", 
-            "ranger.usersync.pagedresultsenabled": "true", 
-            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}", 
-            "ranger.usersync.group.search.first.enabled": "false", 
-            "ranger.usersync.group.searchenabled": "false", 
-            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder", 
-            "ranger.usersync.ssl": "true", 
-            "ranger.usersync.ldap.url": "", 
-            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org", 
-            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
-            "ranger.usersync.ldap.user.searchbase": "", 
-            "ranger.usersync.ldap.username.caseconversion": "none", 
-            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
-            "ranger.usersync.keystore.password": "UnIx529p", 
-            "ranger.usersync.unix.group.file": "/etc/group", 
-            "ranger.usersync.filesource.file": "/tmp/usergroup.txt", 
-            "ranger.usersync.group.searchscope": "", 
-            "ranger.usersync.truststore.password": "changeit", 
-            "ranger.usersync.enabled": "true", 
-            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000", 
+            "ranger.usersync.ldap.binddn": "",
+            "ranger.usersync.policymgr.username": "rangerusersync",
+            "ranger.usersync.policymanager.mockrun": "false",
+            "ranger.usersync.group.searchbase": "",
+            "ranger.usersync.ldap.bindalias": "testldapalias",
+            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks",
+            "ranger.usersync.port": "5151",
+            "ranger.usersync.pagedresultssize": "500",
+            "ranger.usersync.group.memberattributename": "",
+            "ranger.usersync.kerberos.principal": "rangerusersync/_HOST@EXAMPLE.COM",
+            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder",
+            "ranger.usersync.ldap.referral": "ignore",
+            "ranger.usersync.group.searchfilter": "",
+            "ranger.usersync.ldap.user.objectclass": "person",
+            "ranger.usersync.logdir": "{{usersync_log_dir}}",
+            "ranger.usersync.ldap.user.searchfilter": "",
+            "ranger.usersync.ldap.groupname.caseconversion": "none",
+            "ranger.usersync.ldap.ldapbindpassword": "",
+            "ranger.usersync.unix.minUserId": "500",
+            "ranger.usersync.policymanager.maxrecordsperapicall": "1000",
+            "ranger.usersync.group.nameattribute": "",
+            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password",
+            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks",
+            "ranger.usersync.user.searchenabled": "false",
+            "ranger.usersync.group.usermapsyncenabled": "true",
+            "ranger.usersync.ldap.bindkeystore": "",
+            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof",
+            "ranger.usersync.kerberos.keytab": "/etc/security/keytabs/rangerusersync.service.keytab",
+            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe",
+            "ranger.usersync.group.objectclass": "",
+            "ranger.usersync.ldap.user.searchscope": "sub",
+            "ranger.usersync.unix.password.file": "/etc/passwd",
+            "ranger.usersync.ldap.user.nameattribute": "",
+            "ranger.usersync.pagedresultsenabled": "true",
+            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}",
+            "ranger.usersync.group.search.first.enabled": "false",
+            "ranger.usersync.group.searchenabled": "false",
+            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder",
+            "ranger.usersync.ssl": "true",
+            "ranger.usersync.ldap.url": "",
+            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org",
+            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+            "ranger.usersync.ldap.user.searchbase": "",
+            "ranger.usersync.ldap.username.caseconversion": "none",
+            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+            "ranger.usersync.keystore.password": "UnIx529p",
+            "ranger.usersync.unix.group.file": "/etc/group",
+            "ranger.usersync.filesource.file": "/tmp/usergroup.txt",
+            "ranger.usersync.group.searchscope": "",
+            "ranger.usersync.truststore.password": "changeit",
+            "ranger.usersync.enabled": "true",
+            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000",
             "ranger.usersync.filesource.text.delimiter": ","
-        }, 
+        },
         "ranger-hdfs-plugin-properties": {
-            "hadoop.rpc.protection": "authentication", 
-            "ranger-hdfs-plugin-enabled": "No", 
-            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
-            "policy_user": "ambari-qa", 
-            "common.name.for.certificate": "", 
+            "hadoop.rpc.protection": "authentication",
+            "ranger-hdfs-plugin-enabled": "No",
+            "REPOSITORY_CONFIG_USERNAME": "hadoop",
+            "policy_user": "ambari-qa",
+            "common.name.for.certificate": "",
             "REPOSITORY_CONFIG_PASSWORD": "hadoop"
-        }, 
+        },
         "ranger-kms-security": {
-            "ranger.plugin.kms.policy.pollIntervalMs": "30000", 
-            "ranger.plugin.kms.service.name": "{{repo_name}}", 
-            "ranger.plugin.kms.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient", 
-            "ranger.plugin.kms.policy.rest.ssl.config.file": "/etc/ranger/kms/conf/ranger-policymgr-ssl.xml", 
-            "ranger.plugin.kms.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache", 
+            "ranger.plugin.kms.policy.pollIntervalMs": "30000",
+            "ranger.plugin.kms.service.name": "{{repo_name}}",
+            "ranger.plugin.kms.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient",
+            "ranger.plugin.kms.policy.rest.ssl.config.file": "/etc/ranger/kms/conf/ranger-policymgr-ssl.xml",
+            "ranger.plugin.kms.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache",
             "ranger.plugin.kms.policy.rest.url": "{{policymgr_mgr_url}}"
-        }, 
+        },
         "kerberos-env": {
-            "kdc_hosts": "c6401.ambari.apache.org", 
-            "manage_auth_to_local": "true", 
-            "install_packages": "true", 
-            "realm": "EXAMPLE.COM", 
-            "encryption_types": "aes des3-cbc-sha1 rc4 des-cbc-md5", 
-            "ad_create_attributes_template": "\n{\n  \"objectClass\": [\"top\", \"person\", \"organizationalPerson\", \"user\"],\n  \"cn\": \"$principal_name\",\n  #if( $is_service )\n  \"servicePrincipalName\": \"$principal_name\",\n  #end\n  \"userPrincipalName\": \"$normalized_principal\",\n  \"unicodePwd\": \"$password\",\n  \"accountExpires\": \"0\",\n  \"userAccountControl\": \"66048\"\n}", 
-            "kdc_create_attributes": "", 
-            "admin_server_host": "c6401.ambari.apache.org", 
-            "group": "ambari-managed-principals", 
-            "password_length": "20", 
-            "ldap_url": "", 
-            "manage_identities": "true", 
-            "password_min_lowercase_letters": "1", 
-            "create_ambari_principal": "true", 
-            "service_check_principal_name": "${cluster_name|toLower()}-${short_date}", 
-            "executable_search_paths": "/usr/bin, /usr/kerberos/bin, /usr/sbin, /usr/lib/mit/bin, /usr/lib/mit/sbin", 
-            "password_chat_timeout": "5", 
-            "kdc_type": "mit-kdc", 
-            "set_password_expiry": "false", 
-            "password_min_punctuation": "1", 
-            "container_dn": "", 
-            "case_insensitive_username_rules": "false", 
-            "password_min_whitespace": "0", 
-            "password_min_uppercase_letters": "1", 
+            "kdc_hosts": "c6401.ambari.apache.org",
+            "manage_auth_to_local": "true",
+            "install_packages": "true",
+            "realm": "EXAMPLE.COM",
+            "encryption_types": "aes des3-cbc-sha1 rc4 des-cbc-md5",
+            "ad_create_attributes_template": "\n{\n  \"objectClass\": [\"top\", \"person\", \"organizationalPerson\", \"user\"],\n  \"cn\": \"$principal_name\",\n  #if( $is_service )\n  \"servicePrincipalName\": \"$principal_name\",\n  #end\n  \"userPrincipalName\": \"$normalized_principal\",\n  \"unicodePwd\": \"$password\",\n  \"accountExpires\": \"0\",\n  \"userAccountControl\": \"66048\"\n}",
+            "kdc_create_attributes": "",
+            "admin_server_host": "c6401.ambari.apache.org",
+            "group": "ambari-managed-principals",
+            "password_length": "20",
+            "ldap_url": "",
+            "manage_identities": "true",
+            "password_min_lowercase_letters": "1",
+            "create_ambari_principal": "true",
+            "service_check_principal_name": "${cluster_name|toLower()}-${short_date}",
+            "executable_search_paths": "/usr/bin, /usr/kerberos/bin, /usr/sbin, /usr/lib/mit/bin, /usr/lib/mit/sbin",
+            "password_chat_timeout": "5",
+            "kdc_type": "mit-kdc",
+            "set_password_expiry": "false",
+            "password_min_punctuation": "1",
+            "container_dn": "",
+            "case_insensitive_username_rules": "false",
+            "password_min_whitespace": "0",
+            "password_min_uppercase_letters": "1",
             "password_min_digits": "1"
-        }, 
+        },
         "kms-properties": {
-            "REPOSITORY_CONFIG_USERNAME": "keyadmin", 
-            "db_user": "rangerkms01", 
-            "DB_FLAVOR": "MYSQL", 
-            "db_password": "rangerkms01", 
-            "KMS_MASTER_KEY_PASSWD": "StrongPassword01", 
-            "db_root_user": "root", 
-            "db_name": "rangerkms01", 
-            "db_host": "c6401.ambari.apache.org", 
-            "db_root_password": "vagrant", 
-            "SQL_CONNECTOR_JAR": "{{driver_curl_target}}", 
+            "REPOSITORY_CONFIG_USERNAME": "keyadmin",
+            "db_user": "rangerkms01",
+            "DB_FLAVOR": "MYSQL",
+            "db_password": "rangerkms01",
+            "KMS_MASTER_KEY_PASSWD": "StrongPassword01",
+            "db_root_user": "root",
+            "db_name": "rangerkms01",
+            "db_host": "c6401.ambari.apache.org",
+            "db_root_password": "vagrant",
+            "SQL_CONNECTOR_JAR": "{{driver_curl_target}}",
             "REPOSITORY_CONFIG_PASSWORD": "keyadmin"
-        }, 
+        },
         "admin-properties": {
-            "db_user": "rangeradmin01", 
-            "DB_FLAVOR": "MYSQL", 
-            "db_password": "rangeradmin01", 
-            "db_root_user": "root", 
-            "policymgr_external_url": "http://c6401.ambari.apache.org:6080", 
-            "db_name": "ranger01", 
-            "db_host": "c6401.ambari.apache.org", 
-            "db_root_password": "vagrant", 
+            "db_user": "rangeradmin01",
+            "DB_FLAVOR": "MYSQL",
+            "db_password": "rangeradmin01",
+            "db_root_user": "root",
+            "policymgr_external_url": "http://c6401.ambari.apache.org:6080",
+            "db_name": "ranger01",
+            "db_host": "c6401.ambari.apache.org",
+            "db_root_password": "vagrant",
             "SQL_CONNECTOR_JAR": "{{driver_curl_target}}"
-        }, 
+        },
         "ranger-kms-policymgr-ssl": {
-            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/ranger-kms/conf/ranger-plugin-keystore.jks", 
-            "xasecure.policymgr.clientssl.truststore.password": "changeit", 
-            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}", 
-            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/ranger-kms/conf/ranger-plugin-truststore.jks", 
-            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}", 
+            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/ranger-kms/conf/ranger-plugin-keystore.jks",
+            "xasecure.policymgr.clientssl.truststore.password": "changeit",
+            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}",
+            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/ranger-kms/conf/ranger-plugin-truststore.jks",
+            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}",
             "xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
-        }, 
+        },
         "hdfs-site": {
-            "dfs.namenode.checkpoint.period": "21600", 
-            "dfs.namenode.avoid.write.stale.datanode": "true", 
-            "dfs.permissions.superusergroup": "hdfs", 
-            "dfs.namenode.startup.delay.block.deletion.sec": "3600", 
-            "dfs.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM", 
-            "dfs.heartbeat.interval": "3", 
-            "dfs.content-summary.limit": "5000", 
-            "dfs.support.append": "true", 
-            "dfs.datanode.address": "0.0.0.0:1019", 
-            "dfs.cluster.administrators": " hdfs", 
-            "dfs.namenode.audit.log.async": "true", 
-            "dfs.datanode.balance.bandwidthPerSec": "6250000", 
-            "dfs.namenode.safemode.threshold-pct": "1", 
-            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", 
-            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020", 
-            "dfs.permissions.enabled": "true", 
-            "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", 
-            "dfs.client.read.shortcircuit": "true", 
-            "dfs.https.port": "50470", 
-            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470", 
-            "nfs.file.dump.dir": "/tmp/.hdfs-nfs", 
-            "dfs.blocksize": "134217728", 
-            "dfs.blockreport.initialDelay": "120", 
-            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode", 
-            "dfs.namenode.fslock.fair": "false", 
-            "dfs.datanode.max.transfer.threads": "4096", 
-            "dfs.secondary.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "dfs.replication": "3", 
-            "dfs.namenode.handler.count": "50", 
-            "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
-            "fs.permissions.umask-mode": "022", 
-            "dfs.namenode.stale.datanode.interval": "30000", 
-            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
-            "dfs.datanode.failed.volumes.tolerated": "0", 
-            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data", 
-            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070", 
-            "dfs.webhdfs.enabled": "true", 
-            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding", 
-            "dfs.namenode.accesstime.precision": "0", 
-            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
-            "dfs.datanode.https.address": "0.0.0.0:50475", 
-            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary", 
-            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090", 
-            "nfs.exports.allowed.hosts": "* rw", 
-            "dfs.namenode.checkpoint.txns": "1000000", 
-            "dfs.datanode.http.address": "0.0.0.0:1022", 
-            "dfs.datanode.du.reserved": "33011188224", 
-            "dfs.client.read.shortcircuit.streams.cache.size": "4096", 
-            "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", 
-            "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "dfs.http.policy": "HTTP_ONLY", 
-            "dfs.block.access.token.enable": "true", 
-            "dfs.client.retry.policy.enabled": "false", 
-            "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", 
-            "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab", 
-            "dfs.namenode.name.dir.restore": "true", 
-            "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", 
-            "dfs.journalnode.https-address": "0.0.0.0:8481", 
-            "dfs.journalnode.http-address": "0.0.0.0:8480", 
-            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
-            "dfs.namenode.avoid.read.stale.datanode": "true", 
-            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
-            "dfs.datanode.data.dir.perm": "750", 
-            "dfs.encryption.key.provider.uri": "kms://http@c6401.ambari.apache.org:9292/kms", 
-            "dfs.replication.max": "50", 
+            "dfs.namenode.checkpoint.period": "21600",
+            "dfs.namenode.avoid.write.stale.datanode": "true",
+            "dfs.permissions.superusergroup": "hdfs",
+            "dfs.namenode.startup.delay.block.deletion.sec": "3600",
+            "dfs.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
+            "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM",
+            "dfs.heartbeat.interval": "3",
+            "dfs.content-summary.limit": "5000",
+            "dfs.support.append": "true",
+            "dfs.datanode.address": "0.0.0.0:1019",
+            "dfs.cluster.administrators": " hdfs",
+            "dfs.namenode.audit.log.async": "true",
+            "dfs.datanode.balance.bandwidthPerSec": "6250000",
+            "dfs.namenode.safemode.threshold-pct": "1",
+            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
+            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020",
+            "dfs.permissions.enabled": "true",
+            "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
+            "dfs.client.read.shortcircuit": "true",
+            "dfs.https.port": "50470",
+            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470",
+            "nfs.file.dump.dir": "/tmp/.hdfs-nfs",
+            "dfs.blocksize": "134217728",
+            "dfs.blockreport.initialDelay": "120",
+            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode",
+            "dfs.namenode.fslock.fair": "false",
+            "dfs.datanode.max.transfer.threads": "4096",
+            "dfs.secondary.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
+            "dfs.replication": "3",
+            "dfs.namenode.handler.count": "50",
+            "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+            "fs.permissions.umask-mode": "022",
+            "dfs.namenode.stale.datanode.interval": "30000",
+            "dfs.datanode.ipc.address": "0.0.0.0:8010",
+            "dfs.datanode.failed.volumes.tolerated": "0",
+            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data",
+            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070",
+            "dfs.webhdfs.enabled": "true",
+            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding",
+            "dfs.namenode.accesstime.precision": "0",
+            "dfs.namenode.write.stale.datanode.ratio": "1.0f",
+            "dfs.datanode.https.address": "0.0.0.0:50475",
+            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary",
+            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090",
+            "nfs.exports.allowed.hosts": "* rw",
+            "dfs.namenode.checkpoint.txns": "1000000",
+            "dfs.datanode.http.address": "0.0.0.0:1022",
+            "dfs.datanode.du.reserved": "33011188224",
+            "dfs.client.read.shortcircuit.streams.cache.size": "4096",
+            "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
+            "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
+            "dfs.http.policy": "HTTP_ONLY",
+            "dfs.block.access.token.enable": "true",
+            "dfs.client.retry.policy.enabled": "false",
+            "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
+            "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab",
+            "dfs.namenode.name.dir.restore": "true",
+            "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
+            "dfs.journalnode.https-address": "0.0.0.0:8481",
+            "dfs.journalnode.http-address": "0.0.0.0:8480",
+            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
+            "dfs.namenode.avoid.read.stale.datanode": "true",
+            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
+            "dfs.datanode.data.dir.perm": "750",
+            "dfs.encryption.key.provider.uri": "kms://http@c6401.ambari.apache.org:9292/kms",
+            "dfs.replication.max": "50",
             "dfs.namenode.name.dir": "/grid/0/hadoop/hdfs/namenode"
-        }, 
+        },
         "ranger-tagsync-site": {
-            "ranger.tagsync.dest.ranger.ssl.config.filename": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks", 
-            "ranger.tagsync.source.atlasrest.username": "", 
-            "ranger.tagsync.logdir": "/var/log/ranger/tagsync", 
-            "ranger.tagsync.source.atlasrest.download.interval.millis": "", 
-            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks", 
-            "ranger.tagsync.source.file.check.interval.millis": "", 
-            "ranger.tagsync.source.atlasrest.endpoint": "", 
-            "ranger.tagsync.dest.ranger.username": "rangertagsync", 
-            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}", 
-            "ranger.tagsync.kerberos.principal": "rangertagsync/_HOST@EXAMPLE.COM", 
-            "ranger.tagsync.kerberos.keytab": "/etc/security/keytabs/rangertagsync.service.keytab", 
-            "ranger.tagsync.source.atlas": "false", 
-            "ranger.tagsync.source.atlasrest": "false", 
-            "ranger.tagsync.source.file": "false", 
+            "ranger.tagsync.dest.ranger.ssl.config.filename": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks",
+            "ranger.tagsync.source.atlasrest.username": "",
+            "ranger.tagsync.logdir": "/var/log/ranger/tagsync",
+            "ranger.tagsync.source.atlasrest.download.interval.millis": "",
+            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks",
+            "ranger.tagsync.source.file.check.interval.millis": "",
+            "ranger.tagsync.source.atlasrest.endpoint": "",
+            "ranger.tagsync.dest.ranger.username": "rangertagsync",
+            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}",
+            "ranger.tagsync.kerberos.principal": "rangertagsync/_HOST@EXAMPLE.COM",
+            "ranger.tagsync.kerberos.keytab": "/etc/security/keytabs/rangertagsync.service.keytab",
+            "ranger.tagsync.source.atlas": "false",
+            "ranger.tagsync.source.atlasrest": "false",
+            "ranger.tagsync.source.file": "false",
             "ranger.tagsync.source.file.filename": ""
-        }, 
+        },
         "tagsync-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/tagsync
 .log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n"
-        }, 
+        },
         "ranger-kms-audit": {
-            "xasecure.audit.destination.solr.zookeepers": "NONE", 
-            "xasecure.audit.destination.solr.urls": "", 
-            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/ranger/kms/audit/solr/spool", 
-            "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/ranger/kms/audit/hdfs/spool", 
-            "xasecure.audit.destination.hdfs": "true", 
-            "xasecure.audit.destination.solr": "true", 
+            "xasecure.audit.destination.solr.zookeepers": "NONE",
+            "xasecure.audit.destination.solr.urls": "",
+            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/ranger/kms/audit/solr/spool",
+            "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/ranger/kms/audit/hdfs/spool",
+            "xasecure.audit.destination.hdfs": "true",
+            "xasecure.audit.destination.solr": "true",
             "xasecure.audit.provider.summary.enabled": "false",
-            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit", 
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
             "xasecure.audit.is.enabled": "true"
-        }, 
+        },
         "hadoop-policy": {
-            "security.job.client.protocol.acl": "*", 
-            "security.job.task.protocol.acl": "*", 
-            "security.datanode.protocol.acl": "*", 
-            "security.namenode.protocol.acl": "*", 
-            "security.client.datanode.protocol.acl": "*", 
-            "security.inter.tracker.protocol.acl": "*", 
-            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop", 
-            "security.client.protocol.acl": "*", 
-            "security.refresh.policy.protocol.acl": "hadoop", 
-            "security.admin.operations.protocol.acl": "hadoop", 
+            "security.job.client.protocol.acl": "*",
+            "security.job.task.protocol.acl": "*",
+            "security.datanode.protocol.acl": "*",
+            "security.namenode.protocol.acl": "*",
+            "security.client.datanode.protocol.acl": "*",
+            "security.inter.tracker.protocol.acl": "*",
+            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
+            "security.client.protocol.acl": "*",
+            "security.refresh.policy.protocol.acl": "hadoop",
+            "security.admin.operations.protocol.acl": "hadoop",
             "security.inter.datanode.protocol.acl": "*"
-        }, 
+        },
         "hdfs-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.lo
 gger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender
 .console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logg
 er}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=
 false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# NameNode metrics logging.\n# The default is to retain two namenode-metrics.log files up to 64MB each.\n#\nnamenode.metrics.logger=INFO,NullAppender\nlog4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}\nlog4j.additivity.NameNodeMetricsLog=false\nlog4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log\nlog4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n\nlog4j.appender.NNMETRICSRFA.MaxBackupIndex=1\nlog4j.appender.NNMETRICSRFA.MaxFileSize=64MB\n\n#\n# mapred audit logging\n#\nmapred
 .audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging leve
 ls\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateCh
 ange=WARN"
-        }, 
+        },
         "usersync-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/usersync.
 log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n"
-        }, 
+        },
         "krb5-conf": {
-            "domains": "", 
-            "manage_krb5_conf": "true", 
-            "content": "\n[libdefaults]\n  renew_lifetime = 7d\n  forwardable = true\n  default_realm = {{realm}}\n  ticket_lifetime = 24h\n  dns_lookup_realm = false\n  dns_lookup_kdc = false\n  default_ccache_name = /tmp/krb5cc_%{uid}\n  #default_tgs_enctypes = {{encryption_types}}\n  #default_tkt_enctypes = {{encryption_types}}\n{% if domains %}\n[domain_realm]\n{%- for domain in domains.split(',') %}\n  {{domain|trim()}} = {{realm}}\n{%- endfor %}\n{% endif %}\n[logging]\n  default = FILE:/var/log/krb5kdc.log\n  admin_server = FILE:/var/log/kadmind.log\n  kdc = FILE:/var/log/krb5kdc.log\n\n[realms]\n  {{realm}} = {\n{%- if kdc_hosts > 0 -%}\n{%- set kdc_host_list = kdc_hosts.split(',')  -%}\n{%- if kdc_host_list and kdc_host_list|length > 0 %}\n    admin_server = {{admin_server_host|default(kdc_host_list[0]|trim(), True)}}\n{%- if kdc_host_list -%}\n{% for kdc_host in kdc_host_list %}\n    kdc = {{kdc_host|trim()}}\n{%- endfor -%}\n{% endif %}\n{%- endif %}\n{%- endif %}\n  }\n\
 n{# Append additional realm declarations below #}", 
+            "domains": "",
+            "manage_krb5_conf": "true",
+            "content": "\n[libdefaults]\n  renew_lifetime = 7d\n  forwardable = true\n  default_realm = {{realm}}\n  ticket_lifetime = 24h\n  dns_lookup_realm = false\n  dns_lookup_kdc = false\n  default_ccache_name = /tmp/krb5cc_%{uid}\n  #default_tgs_enctypes = {{encryption_types}}\n  #default_tkt_enctypes = {{encryption_types}}\n{% if domains %}\n[domain_realm]\n{%- for domain in domains.split(',') %}\n  {{domain|trim()}} = {{realm}}\n{%- endfor %}\n{% endif %}\n[logging]\n  default = FILE:/var/log/krb5kdc.log\n  admin_server = FILE:/var/log/kadmind.log\n  kdc = FILE:/var/log/krb5kdc.log\n\n[realms]\n  {{realm}} = {\n{%- if kdc_hosts > 0 -%}\n{%- set kdc_host_list = kdc_hosts.split(',')  -%}\n{%- if kdc_host_list and kdc_host_list|length > 0 %}\n    admin_server = {{admin_server_host|default(kdc_host_list[0]|trim(), True)}}\n{%- if kdc_host_list -%}\n{% for kdc_host in kdc_host_list %}\n    kdc = {{kdc_host|trim()}}\n{%- endfor -%}\n{% endif %}\n{%- endif %}\n{%- endif %}\n  }\n\
 n{# Append additional realm declarations below #}",
             "conf_dir": "/etc"
-        }, 
+        },
         "kms-site": {
-            "hadoop.kms.proxyuser.ranger.hosts": "*", 
-            "hadoop.kms.authentication.type": "kerberos", 
-            "hadoop.kms.proxyuser.ranger.groups": "*", 
-            "hadoop.kms.authentication.signer.secret.provider.zookeeper.path": "/hadoop-kms/hadoop-auth-signature-secret", 
-            "hadoop.kms.security.authorization.manager": "org.apache.ranger.authorization.kms.authorizer.RangerKmsAuthorizer", 
-            "hadoop.kms.authentication.kerberos.name.rules": "RULE:[1:$1@$0](ambari-qa-test_cluster01@EXAMPLE.COM)s/.*/ambari-qa/\nRULE:[1:$1@$0](hdfs-test_cluster01@EXAMPLE.COM)s/.*/hdfs/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nRULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](rangeradmin@EXAMPLE.COM)s/.*/ranger/\nRULE:[2:$1@$0](rangerkms@EXAMPLE.COM)s/.*/keyadmin/\nRULE:[2:$1@$0](rangertagsync@EXAMPLE.COM)s/.*/rangertagsync/\nRULE:[2:$1@$0](rangerusersync@EXAMPLE.COM)s/.*/rangerusersync/\nDEFAULT", 
-            "hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
-            "hadoop.kms.current.key.cache.timeout.ms": "30000", 
-            "hadoop.kms.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
-            "hadoop.kms.audit.aggregation.window.ms": "10000", 
-            "hadoop.kms.proxyuser.ranger.users": "*", 
-            "hadoop.kms.authentication.signer.secret.provider.zookeeper.auth.type": "kerberos", 
-            "hadoop.kms.key.provider.uri": "dbks://http@localhost:9292/kms", 
-            "hadoop.security.keystore.JavaKeyStoreProvider.password": "none", 
-            "hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "hadoop.kms.authentication.signer.secret.provider": "random", 
-            "hadoop.kms.authentication.signer.secret.provider.zookeeper.connection.string": "#HOSTNAME#:#PORT#,...", 
-            "hadoop.kms.cache.enable": "true", 
-            "hadoop.kms.cache.timeout.ms": "600000", 
+            "hadoop.kms.proxyuser.ranger.hosts": "*",
+            "hadoop.kms.authentication.type": "kerberos",
+            "hadoop.kms.proxyuser.ranger.groups": "*",
+            "hadoop.kms.authentication.signer.secret.provider.zookeeper.path": "/hadoop-kms/hadoop-auth-signature-secret",
+            "hadoop.kms.security.authorization.manager": "org.apache.ranger.authorization.kms.authorizer.RangerKmsAuthorizer",
+            "hadoop.kms.authentication.kerberos.name.rules": "RULE:[1:$1@$0](ambari-qa-test_cluster01@EXAMPLE.COM)s/.*/ambari-qa/\nRULE:[1:$1@$0](hdfs-test_cluster01@EXAMPLE.COM)s/.*/hdfs/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nRULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](rangeradmin@EXAMPLE.COM)s/.*/ranger/\nRULE:[2:$1@$0](rangerkms@EXAMPLE.COM)s/.*/keyadmin/\nRULE:[2:$1@$0](rangertagsync@EXAMPLE.COM)s/.*/rangertagsync/\nRULE:[2:$1@$0](rangerusersync@EXAMPLE.COM)s/.*/rangerusersync/\nDEFAULT",
+            "hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+            "hadoop.kms.current.key.cache.timeout.ms": "30000",
+            "hadoop.kms.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+            "hadoop.kms.audit.aggregation.window.ms": "10000",
+            "hadoop.kms.proxyuser.ranger.users": "*",
+            "hadoop.kms.authentication.signer.secret.provider.zookeeper.auth.type": "kerberos",
+            "hadoop.kms.key.provider.uri": "dbks://http@localhost:9292/kms",
+            "hadoop.security.keystore.JavaKeyStoreProvider.password": "none",
+            "hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
+            "hadoop.kms.authentication.signer.secret.provider": "random",
+            "hadoop.kms.authentication.signer.secret.provider.zookeeper.connection.string": "#HOSTNAME#:#PORT#,...",
+            "hadoop.kms.cache.enable": "true",
+            "hadoop.kms.cache.timeout.ms": "600000",
             "hadoop.kms.authentication.kerberos.principal": "*"
-        }, 
+        },
         "core-site": {
-            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py", 
-            "hadoop.proxyuser.hdfs.groups": "*", 
-            "fs.trash.interval": "360", 
-            "ipc.server.tcpnodelay": "true", 
-            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec", 
-            "ipc.client.idlethreshold": "8000", 
-            "io.file.buffer.size": "131072", 
-            "hadoop.proxyuser.ambari-server-test_cluster01.groups": "*", 
-            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", 
-            "hadoop.security.authentication": "kerberos", 
-            "mapreduce.jobtracker.webinterface.trusted": "false", 
-            "hadoop.proxyuser.kms.groups": "*", 
-            "hadoop.proxyuser.hdfs.hosts": "*", 
-            "hadoop.proxyuser.HTTP.groups": "users", 
-            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020", 
-            "ha.failover-controller.active-standby-elector.zk.op.retries": "120", 
-            "hadoop.security.key.provider.path": "kms://http@c6401.ambari.apache.org:9292/kms", 
-            "hadoop.security.authorization": "true", 
-            "hadoop.http.authentication.simple.anonymous.allowed": "true", 
-            "ipc.client.connect.max.retries": "50", 
-            "hadoop.security.auth_to_local": "RULE:[1:$1@$0](ambari-qa-test_cluster01@EXAMPLE.COM)s/.*/ambari-qa/\nRULE:[1:$1@$0](hdfs-test_cluster01@EXAMPLE.COM)s/.*/hdfs/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nRULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](rangeradmin@EXAMPLE.COM)s/.*/ranger/\nRULE:[2:$1@$0](rangerkms@EXAMPLE.COM)s/.*/keyadmin/\nRULE:[2:$1@$0](rangertagsync@EXAMPLE.COM)s/.*/rangertagsync/\nRULE:[2:$1@$0](rangerusersync@EXAMPLE.COM)s/.*/rangerusersync/\nDEFAULT", 
-            "hadoop.proxyuser.ambari-server-test_cluster01.hosts": "c6401.ambari.apache.org", 
+            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py",
+            "hadoop.proxyuser.hdfs.groups": "*",
+            "fs.trash.interval": "360",
+            "ipc.server.tcpnodelay": "true",
+            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec",
+            "ipc.client.idlethreshold": "8000",
+            "io.file.buffer.size": "131072",
+            "hadoop.proxyuser.ambari-server-test_cluster01.groups": "*",
+            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
+            "hadoop.security.authentication": "kerberos",
+            "mapreduce.jobtracker.webinterface.trusted": "false",
+            "hadoop.proxyuser.kms.groups": "*",
+            "hadoop.proxyuser.hdfs.hosts": "*",
+            "hadoop.proxyuser.HTTP.groups": "users",
+            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020",
+            "ha.failover-controller.active-standby-elector.zk.op.retries": "120",
+            "hadoop.security.key.provider.path": "kms://http@c6401.ambari.apache.org:9292/kms",
+            "hadoop.security.authorization": "true",
+            "hadoop.http.authentication.simple.anonymous.allowed": "true",
+            "ipc.client.connect.max.retries": "50",
+            "hadoop.security.auth_to_local": "RULE:[1:$1@$0](ambari-qa-test_cluster01@EXAMPLE.COM)s/.*/ambari-qa/\nRULE:[1:$1@$0](hdfs-test_cluster01@EXAMPLE.COM)s/.*/hdfs/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nRULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](rangeradmin@EXAMPLE.COM)s/.*/ranger/\nRULE:[2:$1@$0](rangerkms@EXAMPLE.COM)s/.*/keyadmin/\nRULE:[2:$1@$0](rangertagsync@EXAMPLE.COM)s/.*/rangertagsync/\nRULE:[2:$1@$0](rangerusersync@EXAMPLE.COM)s/.*/rangerusersync/\nDEFAULT",
+            "hadoop.proxyuser.ambari-server-test_cluster01.hosts": "c6401.ambari.apache.org",
             "ipc.client.connection.maxidletime": "30000"
-        }, 
+        },
         "hadoop-env": {
-            "keyserver_port": "", 
-            "proxyuser_group": "users", 
-            "hdfs_user_nproc_limit": "65536", 
-            "hdfs_log_dir_prefix": "/var/log/hadoop", 
-            "hdfs_user_nofile_limit": "128000", 
-            "hdfs_user": "hdfs", 
+            "keyserver_port": "",
+            "proxyuser_group": "users",
+            "hdfs_user_nproc_limit": "65536",
+            "hdfs_log_dir_prefix": "/var/log/hadoop",
+            "hdfs_user_nofile_limit": "128000",
+            "hdfs_user": "hdfs",
             "hdfs_principal_name": "hdfs-cl1@EXAMPLE.COM",
-            "keyserver_host": " ", 
-            "namenode_opt_maxnewsize": "128m", 
-            "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab", 
-            "namenode_opt_maxpermsize": "256m", 
-            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
 {{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
 node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/ha

<TRUNCATED>

[08/36] ambari git commit: AMBARI-21400. Upgrade Infra Solr version from 5.5.2 to 6.6.x (oleewere)

Posted by lp...@apache.org.
AMBARI-21400. Upgrade Infra Solr version from 5.5.2 to 6.6.x (oleewere)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a795f38c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a795f38c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a795f38c

Branch: refs/heads/feature-branch-AMBARI-21307
Commit: a795f38cd49c2c55c837b0daecaa63a67015d0eb
Parents: 1939dab
Author: oleewere <ol...@gmail.com>
Authored: Thu Jul 6 20:28:11 2017 +0200
Committer: oleewere <ol...@gmail.com>
Committed: Fri Jul 7 14:25:24 2017 +0200

----------------------------------------------------------------------
 .../libraries/functions/solr_cloud_util.py      |  10 +
 .../src/main/resources/solr                     | 826 +++++++++++++++----
 .../ambari/infra/solr/AmbariSolrCloudCLI.java   |  14 +
 .../infra/solr/AmbariSolrCloudClient.java       |   8 +
 .../commands/RemoveAdminHandlersCommand.java    |  46 ++
 .../commands/SetClusterPropertyZkCommand.java   |   6 +-
 .../InfraRuleBasedAuthorizationPluginTest.java  |   5 +
 ambari-infra/pom.xml                            |   2 +-
 .../configsets/audit_logs/conf/solrconfig.xml   |   3 +-
 .../configsets/hadoop_logs/conf/solrconfig.xml  |   3 +-
 .../main/configsets/history/conf/solrconfig.xml |   3 +-
 .../logsearch/dao/SolrSchemaFieldDao.java       |   2 +-
 ambari-logsearch/docker/Dockerfile              |   2 +-
 ambari-logsearch/docker/bin/start.sh            |   4 +-
 ambari-logsearch/pom.xml                        |   2 +-
 .../server/upgrade/UpgradeCatalog300.java       |  18 +
 .../0.1.0/package/scripts/params.py             |   3 +
 .../0.1.0/package/scripts/setup_infra_solr.py   |  17 +-
 .../properties/audit_logs-solrconfig.xml.j2     |   3 +-
 .../properties/service_logs-solrconfig.xml.j2   |   3 +-
 .../server/upgrade/UpgradeCatalog300Test.java   |  33 +
 .../stacks/2.4/AMBARI_INFRA/test_infra_solr.py  |   3 +
 22 files changed, 836 insertions(+), 180 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-common/src/main/python/resource_management/libraries/functions/solr_cloud_util.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/solr_cloud_util.py b/ambari-common/src/main/python/resource_management/libraries/functions/solr_cloud_util.py
index 1c5432b..12356ed 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/solr_cloud_util.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/solr_cloud_util.py
@@ -190,6 +190,16 @@ def secure_solr_znode(zookeeper_quorum, solr_znode, jaas_file, java64_home, sasl
   secure_solr_znode_cmd = format('{solr_cli_prefix} --secure-solr-znode --jaas-file {jaas_file} --sasl-users {sasl_users_str}')
   Execute(secure_solr_znode_cmd)
 
+def remove_admin_handlers(zookeeper_quorum, solr_znode, java64_home, collection, jaas_file, retry = 5, interval = 10):
+  """
+  Remove "solr.admin.AdminHandlers" request handler from collection config. Required for migrating to Solr 6 from Solr 5.
+  """
+  solr_cli_prefix = __create_solr_cloud_cli_prefix(zookeeper_quorum, solr_znode, java64_home)
+  remove_admin_handlers_cmd = format('{solr_cli_prefix} --remove-admin-handlers --collection {collection} --retry {retry} --interval {interval}')
+  if jaas_file is not None:
+    remove_admin_handlers_cmd+=format(' --jaas-file {jaas_file}')
+  Execute(remove_admin_handlers_cmd)
+
 def default_config(config, name, default_value):
   subdicts = filter(None, name.split('/'))
   if not config:

http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-infra/ambari-infra-assembly/src/main/resources/solr
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-assembly/src/main/resources/solr b/ambari-infra/ambari-infra-assembly/src/main/resources/solr
old mode 100644
new mode 100755
index bf504d9..6f2de8f
--- a/ambari-infra/ambari-infra-assembly/src/main/resources/solr
+++ b/ambari-infra/ambari-infra-assembly/src/main/resources/solr
@@ -49,6 +49,9 @@ SOLR_SCRIPT="$0"
 verbose=false
 THIS_OS=`uname -s`
 
+# What version of Java is required to run this version of Solr.
+JAVA_VER_REQ="1.8"
+
 stop_all=false
 
 # for now, we don't support running this script from cygwin due to problems
@@ -84,6 +87,7 @@ if [ -z "$SOLR_INCLUDE" ]; then
                /etc/default/solr.in.sh \
                /opt/solr/solr.in.sh; do
     if [ -r "$include" ]; then
+        SOLR_INCLUDE="$include"
         . "$include"
         break
     fi
@@ -116,16 +120,44 @@ else
   JAVA=java
 fi
 
-# test that Java exists and is executable on this server
-"$JAVA" -version >/dev/null 2>&1 || {
+if [ -z "$SOLR_STOP_WAIT" ]; then
+  SOLR_STOP_WAIT=180
+fi
+# test that Java exists, is executable and correct version
+JAVA_VER=$("$JAVA" -version 2>&1)
+if [[ $? -ne 0 ]] ; then
   echo >&2 "Java not found, or an error was encountered when running java."
-  echo >&2 "A working Java 7 or later is required to run Solr!"
-  echo >&2 "Please install Java or fix JAVA_HOME before running this script."
-  echo >&2 "Command that we tried: '${JAVA} -version'"
+  echo >&2 "A working Java $JAVA_VER_REQ JRE is required to run Solr!"
+  echo >&2 "Please install latest version of Java $JAVA_VER_REQ or set JAVA_HOME properly."
+  echo >&2 "Command that we tried: '${JAVA} -version', with response:"
+  echo >&2 "${JAVA_VER}"
+  echo >&2
+  echo >&2 "Debug information:"
+  echo >&2 "JAVA_HOME: ${JAVA_HOME:-N/A}"
   echo >&2 "Active Path:"
   echo >&2 "${PATH}"
   exit 1
-}
+else
+  JAVA_VER_NUM=$(echo $JAVA_VER | head -1 | awk -F '"' '/version/ {print $2}')
+  if [[ "$JAVA_VER_NUM" < "$JAVA_VER_REQ" ]] ; then
+    echo >&2 "Your current version of Java is too old to run this version of Solr"
+    echo >&2 "We found version $JAVA_VER_NUM, using command '${JAVA} -version', with response:"
+    echo >&2 "${JAVA_VER}"
+    echo >&2
+    echo >&2 "Please install latest version of Java $JAVA_VER_REQ or set JAVA_HOME properly."
+    echo >&2
+    echo >&2 "Debug information:"
+    echo >&2 "JAVA_HOME: ${JAVA_HOME:-N/A}"
+    echo >&2 "Active Path:"
+    echo >&2 "${PATH}"
+    exit 1
+  fi
+  JAVA_VENDOR="Oracle"
+  if [ "`echo $JAVA_VER | grep -i "IBM J9"`" != "" ]; then
+      JAVA_VENDOR="IBM J9"
+  fi
+fi
+
 
 # Select HTTP OR HTTPS related configurations
 SOLR_URL_SCHEME=http
@@ -134,30 +166,109 @@ SOLR_SSL_OPTS=""
 if [ -n "$SOLR_SSL_KEY_STORE" ]; then
   SOLR_JETTY_CONFIG+=("--module=https")
   SOLR_URL_SCHEME=https
-  SOLR_SSL_OPTS=" -Dsolr.jetty.keystore=$SOLR_SSL_KEY_STORE \
-    -Dsolr.jetty.keystore.password=$SOLR_SSL_KEY_STORE_PASSWORD \
-    -Dsolr.jetty.truststore=$SOLR_SSL_TRUST_STORE \
-    -Dsolr.jetty.truststore.password=$SOLR_SSL_TRUST_STORE_PASSWORD \
-    -Dsolr.jetty.ssl.needClientAuth=$SOLR_SSL_NEED_CLIENT_AUTH \
-    -Dsolr.jetty.ssl.wantClientAuth=$SOLR_SSL_WANT_CLIENT_AUTH"
+  SOLR_SSL_OPTS+=" -Dsolr.jetty.keystore=$SOLR_SSL_KEY_STORE"
+  if [ -n "$SOLR_SSL_KEY_STORE_PASSWORD" ]; then
+    SOLR_SSL_OPTS+=" -Dsolr.jetty.keystore.password=$SOLR_SSL_KEY_STORE_PASSWORD"
+  fi
+  if [ -n "$SOLR_SSL_KEY_STORE_TYPE" ]; then
+    SOLR_SSL_OPTS+=" -Dsolr.jetty.keystore.type=$SOLR_SSL_KEY_STORE_TYPE"
+  fi
+
+  if [ -n "$SOLR_SSL_TRUST_STORE" ]; then
+    SOLR_SSL_OPTS+=" -Dsolr.jetty.truststore=$SOLR_SSL_TRUST_STORE"
+  fi
+  if [ -n "$SOLR_SSL_TRUST_STORE_PASSWORD" ]; then
+    SOLR_SSL_OPTS+=" -Dsolr.jetty.truststore.password=$SOLR_SSL_TRUST_STORE_PASSWORD"
+  fi
+  if [ -n "$SOLR_SSL_TRUST_STORE_TYPE" ]; then
+    SOLR_SSL_OPTS+=" -Dsolr.jetty.truststore.type=$SOLR_SSL_TRUST_STORE_TYPE"
+  fi
+
+  if [ -n "$SOLR_SSL_NEED_CLIENT_AUTH" ]; then
+    SOLR_SSL_OPTS+=" -Dsolr.jetty.ssl.needClientAuth=$SOLR_SSL_NEED_CLIENT_AUTH"
+  fi
+  if [ -n "$SOLR_SSL_WANT_CLIENT_AUTH" ]; then
+    SOLR_SSL_OPTS+=" -Dsolr.jetty.ssl.wantClientAuth=$SOLR_SSL_WANT_CLIENT_AUTH"
+  fi
+
   if [ -n "$SOLR_SSL_CLIENT_KEY_STORE" ]; then
-    SOLR_SSL_OPTS+=" -Djavax.net.ssl.keyStore=$SOLR_SSL_CLIENT_KEY_STORE \
-      -Djavax.net.ssl.keyStorePassword=$SOLR_SSL_CLIENT_KEY_STORE_PASSWORD \
-      -Djavax.net.ssl.trustStore=$SOLR_SSL_CLIENT_TRUST_STORE \
-      -Djavax.net.ssl.trustStorePassword=$SOLR_SSL_CLIENT_TRUST_STORE_PASSWORD"
+    SOLR_SSL_OPTS+=" -Djavax.net.ssl.keyStore=$SOLR_SSL_CLIENT_KEY_STORE"
+
+    if [ -n "$SOLR_SSL_CLIENT_KEY_STORE_PASSWORD" ]; then
+      SOLR_SSL_OPTS+=" -Djavax.net.ssl.keyStorePassword=$SOLR_SSL_CLIENT_KEY_STORE_PASSWORD"
+    fi
+    if [ -n "$SOLR_SSL_CLIENT_KEY_STORE_TYPE" ]; then
+      SOLR_SSL_OPTS+=" -Djavax.net.ssl.keyStoreType=$SOLR_SSL_CLIENT_KEY_STORE_TYPE"
+    fi
+  else
+    if [ -n "$SOLR_SSL_KEY_STORE" ]; then
+      SOLR_SSL_OPTS+=" -Djavax.net.ssl.keyStore=$SOLR_SSL_KEY_STORE"
+    fi
+    if [ -n "$SOLR_SSL_KEY_STORE_PASSWORD" ]; then
+      SOLR_SSL_OPTS+=" -Djavax.net.ssl.keyStorePassword=$SOLR_SSL_KEY_STORE_PASSWORD"
+    fi
+    if [ -n "$SOLR_SSL_KEY_STORE_TYPE" ]; then
+      SOLR_SSL_OPTS+=" -Djavax.net.ssl.keyStoreType=$SOLR_SSL_KEYSTORE_TYPE"
+    fi
+  fi
+
+  if [ -n "$SOLR_SSL_CLIENT_TRUST_STORE" ]; then
+    SOLR_SSL_OPTS+=" -Djavax.net.ssl.trustStore=$SOLR_SSL_CLIENT_TRUST_STORE"
+
+    if [ -n "$SOLR_SSL_CLIENT_TRUST_STORE_PASSWORD" ]; then
+      SOLR_SSL_OPTS+=" -Djavax.net.ssl.trustStorePassword=$SOLR_SSL_CLIENT_TRUST_STORE_PASSWORD"
+    fi
+
+    if [ -n "$SOLR_SSL_CLIENT_TRUST_STORE_TYPE" ]; then
+      SOLR_SSL_OPTS+=" -Djavax.net.ssl.trustStoreType=$SOLR_SSL_CLIENT_TRUST_STORE_TYPE"
+    fi
   else
-    SOLR_SSL_OPTS+=" -Djavax.net.ssl.keyStore=$SOLR_SSL_KEY_STORE \
-      -Djavax.net.ssl.keyStorePassword=$SOLR_SSL_KEY_STORE_PASSWORD \
-      -Djavax.net.ssl.trustStore=$SOLR_SSL_TRUST_STORE \
-      -Djavax.net.ssl.trustStorePassword=$SOLR_SSL_TRUST_STORE_PASSWORD"
+    if [ -n "$SOLR_SSL_TRUST_STORE" ]; then
+      SOLR_SSL_OPTS+=" -Djavax.net.ssl.trustStore=$SOLR_SSL_TRUST_STORE"
+    fi
+
+    if [ -n "$SOLR_SSL_TRUST_STORE_PASSWORD" ]; then
+      SOLR_SSL_OPTS+=" -Djavax.net.ssl.trustStorePassword=$SOLR_SSL_TRUST_STORE_PASSWORD"
+    fi
+
+    if [ -n "$SOLR_SSL_TRUST_STORE_TYPE" ]; then
+      SOLR_SSL_OPTS+=" -Djavax.net.ssl.trustStoreType=$SOLR_SSL_TRUST_STORE_TYPE"
+    fi
   fi
 else
   SOLR_JETTY_CONFIG+=("--module=http")
 fi
 
 # Authentication options
+if [ -z "$SOLR_AUTH_TYPE" ] && [ -n "$SOLR_AUTHENTICATION_OPTS" ]; then
+  echo "WARNING: SOLR_AUTHENTICATION_OPTS environment variable configured without associated SOLR_AUTH_TYPE variable"
+  echo "         Please configure SOLR_AUTH_TYPE environment variable with the authentication type to be used."
+  echo "         Currently supported authentication types are [kerberos, basic]"
+fi
+
+if [ -n "$SOLR_AUTH_TYPE" ] && [ -n "$SOLR_AUTHENTICATION_CLIENT_CONFIGURER" ]; then
+  echo "WARNING: SOLR_AUTHENTICATION_CLIENT_CONFIGURER and SOLR_AUTH_TYPE environment variables are configured together."
+  echo "         Use SOLR_AUTH_TYPE environment variable to configure authentication type to be used. "
+  echo "         Currently supported authentication types are [kerberos, basic]"
+  echo "         The value of SOLR_AUTHENTICATION_CLIENT_CONFIGURER environment variable will be ignored"
+fi
+
+if [ -n "$SOLR_AUTH_TYPE" ]; then
+  case "$(echo $SOLR_AUTH_TYPE | awk '{print tolower($0)}')" in
+    basic)
+      SOLR_AUTHENTICATION_CLIENT_CONFIGURER="org.apache.solr.client.solrj.impl.PreemptiveBasicAuthConfigurer"
+      ;;
+    kerberos)
+      SOLR_AUTHENTICATION_CLIENT_CONFIGURER="org.apache.solr.client.solrj.impl.Krb5HttpClientConfigurer"
+      ;;
+    *)
+      echo "ERROR: Value specified for SOLR_AUTH_TYPE environment variable is invalid."
+      exit 1
+   esac
+fi
+
 if [ "$SOLR_AUTHENTICATION_CLIENT_CONFIGURER" != "" ]; then
-  AUTHC_CLIENT_CONFIGURER_ARG="-Dsolr.authentication.httpclient.configurer=$SOLR_AUTHENTICATION_CLIENT_CONFIGURER"
+  AUTHC_CLIENT_CONFIGURER_ARG="-Dsolr.httpclient.builder.factory=$SOLR_AUTHENTICATION_CLIENT_CONFIGURER"
 fi
 AUTHC_OPTS="$AUTHC_CLIENT_CONFIGURER_ARG $SOLR_AUTHENTICATION_OPTS"
 
@@ -179,7 +290,7 @@ function print_usage() {
   if [ -z "$CMD" ]; then
     echo ""
     echo "Usage: solr COMMAND OPTIONS"
-    echo "       where COMMAND is one of: start, stop, restart, status, healthcheck, create, create_core, create_collection, delete, version, zk"
+    echo "       where COMMAND is one of: start, stop, restart, status, healthcheck, create, create_core, create_collection, delete, version, zk, auth"
     echo ""
     echo "  Standalone server example (start Solr running in the background on port 8984):"
     echo ""
@@ -206,7 +317,7 @@ function print_usage() {
     echo ""
     echo "  -p <port>     Specify the port to start the Solr HTTP listener on; default is 8983"
     echo "                  The specified port (SOLR_PORT) will also be used to determine the stop port"
-    echo "                  STOP_PORT=(\$SOLR_PORT-1000) and JMX RMI listen port RMI_PORT=(1\$SOLR_PORT). "
+    echo "                  STOP_PORT=(\$SOLR_PORT-1000) and JMX RMI listen port RMI_PORT=(\$SOLR_PORT+10000). "
     echo "                  For instance, if you set -p 8985, then the STOP_PORT=7985 and RMI_PORT=18985"
     echo ""
     echo "  -d <dir>      Specify the Solr server directory; defaults to server"
@@ -237,7 +348,9 @@ function print_usage() {
     echo ""
     echo "  -noprompt     Don't prompt for input; accept all defaults when running examples that accept user input"
     echo ""
-    echo "  -V            Verbose messages from this script"
+    echo "  -v and -q     Verbose (-v) or quiet (-q) logging. Sets default log level to DEBUG or WARN instead of INFO"
+    echo ""
+    echo "  -V or -verbose Verbose messages from this script"
     echo ""
   elif [ "$CMD" == "stop" ]; then
     echo ""
@@ -354,28 +467,132 @@ function print_usage() {
     echo "                            Solr instance and will use the port of the first server it finds."
     echo ""
   elif [ "$CMD" == "zk" ]; then
-    echo "Usage: solr zk [-upconfig|-downconfig] [-d confdir] [-n configName] [-z zkHost]"
+    print_short_zk_usage ""
+    echo "         Be sure to check the Solr logs in case of errors."
+    echo ""
+    echo "             -z zkHost Optional Zookeeper connection string for all commands. If specified it"
+    echo "                        overrides the 'ZK_HOST=...'' defined in solr.in.sh."
+    echo ""
+    echo "         upconfig uploads a configset from the local machine to Zookeeper. (Backcompat: -upconfig)"
+    echo ""
+    echo "         downconfig downloads a configset from Zookeeper to the local machine. (Backcompat: -downconfig)"
+    echo ""
+    echo "             -n configName   Name of the configset in Zookeeper that will be the destination of"
+    echo "                             'upconfig' and the source for 'downconfig'."
+    echo ""
+    echo "             -d confdir      The local directory the configuration will be uploaded from for"
+    echo "                             'upconfig' or downloaded to for 'downconfig'. If 'confdir' is a child of"
+    echo "                             ...solr/server/solr/configsets' then the configs will be copied from/to"
+    echo "                             that directory. Otherwise it is interpreted as a simple local path."
+    echo ""
+    echo "         cp copies files or folders to/from Zookeeper or Zokeeper -> Zookeeper"
+    echo "             -r   Recursively copy <src> to <dst>. Command will fail if <src> has children and "
+    echo "                        -r is not specified. Optional"
+    echo ""
+    echo "             <src>, <dest> : [file:][/]path/to/local/file or zk:/path/to/zk/node"
+    echo "                             NOTE: <src> and <dest> may both be Zookeeper resources prefixed by 'zk:'"
+    echo "             When <src> is a zk resource, <dest> may be '.'"
+    echo "             If <dest> ends with '/', then <dest> will be a local folder or parent znode and the last"
+    echo "             element of the <src> path will be appended unless <src> also ends in a slash. "
+    echo "             <dest> may be zk:, which may be useful when using the cp -r form to backup/restore "
+    echo "             the entire zk state."
+    echo "             You must enclose local paths that end in a wildcard in quotes or just"
+    echo "             end the local path in a slash. That is,"
+    echo "             'bin/solr zk cp -r /some/dir/ zk:/ -z localhost:2181' is equivalent to"
+    echo "             'bin/solr zk cp -r \"/some/dir/*\" zk:/ -z localhost:2181'"
+    echo "             but 'bin/solr zk cp -r /some/dir/* zk:/ -z localhost:2181' will throw an error"
+    echo ""
+    echo "             here's an example of backup/restore for a ZK configuration:"
+    echo "             to copy to local: 'bin/solr zk cp -r zk:/ /some/dir -z localhost:2181'"
+    echo "             to restore to ZK: 'bin/solr zk cp -r /some/dir/ zk:/ -z localhost:2181'"
+    echo ""
+    echo "             The 'file:' prefix is stripped, thus 'file:/wherever' specifies an absolute local path and"
+    echo "             'file:somewhere' specifies a relative local path. All paths on Zookeeper are absolute."
+    echo ""
+    echo "             Zookeeper nodes CAN have data, so moving a single file to a parent znode"
+    echo "             will overlay the data on the parent Znode so specifying the trailing slash"
+    echo "             can be important."
+    echo ""
+    echo "             Wildcards are supported when copying from local, trailing only and must be quoted."
+    echo ""
+    echo "         rm deletes files or folders on Zookeeper"
+    echo "             -r     Recursively delete if <path> is a directory. Command will fail if <path>"
+    echo "                    has children and -r is not specified. Optional"
+    echo "             <path> : [zk:]/path/to/zk/node. <path> may not be the root ('/')"
+    echo ""
+    echo "         mv moves (renames) znodes on Zookeeper"
+    echo "             <src>, <dest> : Zookeeper nodes, the 'zk:' prefix is optional."
+    echo "             If <dest> ends with '/', then <dest> will be a parent znode"
+    echo "             and the last element of the <src> path will be appended."
+    echo "             Zookeeper nodes CAN have data, so moving a single file to a parent znode"
+    echo "             will overlay the data on the parent Znode so specifying the trailing slash"
+    echo "             is important."
+    echo ""
+    echo "         ls lists the znodes on Zookeeper"
+    echo "             -r recursively descends the path listing all znodes. Optional"
+    echo "             <path>: The Zookeeper path to use as the root."
+    echo ""
+    echo "             Only the node names are listed, not data"
+    echo ""
+    echo "         mkroot makes a znode on Zookeeper with no data. Can be used to make a path of arbitrary"
+    echo "             depth but primarily intended to create a 'chroot'."
     echo ""
-    echo "     -upconfig to move a configset from the local machine to Zookeeper."
+    echo "             <path>: The Zookeeper path to create. Leading slash is assumed if not present."
+    echo "                     Intermediate nodes are created as needed if not present."
     echo ""
-    echo "     -downconfig to move a configset from Zookeeper to the local machine."
+  elif [ "$CMD" == "auth" ]; then
     echo ""
-    echo "     -n configName    Name of the configset in Zookeeper that will be the destinatino of"
-    echo "                       'upconfig' and the source for 'downconfig'."
+    echo "Usage: solr auth enable [-type basicAuth] -credentials user:pass [-blockUnknown <true|false>] [-updateIncludeFileOnly <true|false>]"
+    echo "       solr auth enable [-type basicAuth] -prompt <true|false> [-blockUnknown <true|false>] [-updateIncludeFileOnly <true|false>]"
+    echo "       solr auth disable [-updateIncludeFileOnly <true|false>]"
     echo ""
-    echo "     -d confdir       The local directory the configuration will be uploaded from for"
-    echo "                      'upconfig' or downloaded to for 'downconfig'. For 'upconfig', this"
-    echo "                      can be one of the example configsets, basic_configs, data_driven_schema_configs or"
-    echo "                      sample_techproducts_configs or an arbitrary directory."
+    echo "  -type <type>                           The authentication mechanism to enable. Defaults to 'basicAuth'."
     echo ""
-    echo "     -z zkHost        Zookeeper connection string."
+    echo "  -credentials <user:pass>               The username and password of the initial user"
+    echo "                                         Note: only one of -prompt or -credentials must be provided"
     echo ""
-    echo "  NOTE: Solr must have been started least once (or have it running) before using this command."
-    echo "        This initialized Zookeeper for Solr"
+    echo "  -prompt <true|false>                   Prompts the user to provide the credentials"
+    echo "                                         Note: only one of -prompt or -credentials must be provided"
+    echo ""
+    echo "  -blockUnknown <true|false>             When true, this blocks out access to unauthenticated users. When not provided,"
+    echo "                                         this defaults to false (i.e. unauthenticated users can access all endpoints, except the"
+    echo "                                         operations like collection-edit, security-edit, core-admin-edit etc.). Check the reference"
+    echo "                                         guide for Basic Authentication for more details."
+    echo ""
+    echo "  -updateIncludeFileOnly <true|false>    Only update the solr.in.sh or solr.in.cmd file, and skip actual enabling/disabling"
+    echo "                                         authentication (i.e. don't update security.json)"
+    echo ""
+    echo "  -z zkHost                              Zookeeper connection string"
+    echo ""
+    echo "  -d <dir>                               Specify the Solr server directory"
+    echo ""
+    echo "  -s <dir>                               Specify the Solr home directory. This is where any credentials or authentication"
+    echo "                                         configuration files (e.g. basicAuth.conf) would be placed."
     echo ""
   fi
 } # end print_usage
 
+function print_short_zk_usage() {
+
+  if [ "$1" != "" ]; then
+    echo -e "\nERROR: $1\n"
+  fi
+
+  echo "  Usage: solr zk upconfig|downconfig -d <confdir> -n <configName> [-z zkHost]"
+  echo "         solr zk cp [-r] <src> <dest> [-z zkHost]"
+  echo "         solr zk rm [-r] <path> [-z zkHost]"
+  echo "         solr zk mv <src> <dest> [-z zkHost]"
+  echo "         solr zk ls [-r] <path> [-z zkHost]"
+  echo "         solr zk mkroot <path> [-z zkHost]"
+  echo ""
+
+  if [ "$1" == "" ]; then
+    echo "Type bin/solr zk -help for full usage help"
+  else
+    exit 1
+  fi
+}
+
 # used to show the script is still alive when waiting on work to complete
 function spinner() {
   local pid=$1
@@ -407,7 +624,7 @@ function solr_pid_by_port() {
 # extract the value of the -Djetty.port parameter from a running Solr process 
 function jetty_port() {
   SOLR_PID="$1"
-  SOLR_PROC=`ps auxww | grep -w $SOLR_PID | grep start\.jar | grep jetty.port`
+  SOLR_PROC=`ps auxww | grep -w $SOLR_PID | grep start\.jar | grep jetty\.port`
   IFS=' ' read -a proc_args <<< "$SOLR_PROC"
   for arg in "${proc_args[@]}"
     do
@@ -455,10 +672,10 @@ function get_info() {
     done < <(find "$SOLR_PID_DIR" -name "solr-*.pid" -type f)
   else
     # no pid files but check using ps just to be sure
-    numSolrs=`ps auxww | grep start\.jar | grep solr.solr.home | grep -v grep | wc -l | sed -e 's/^[ \t]*//'`
+    numSolrs=`ps auxww | grep start\.jar | grep solr\.solr\.home | grep -v grep | wc -l | sed -e 's/^[ \t]*//'`
     if [ "$numSolrs" != "0" ]; then
       echo -e "\nFound $numSolrs Solr nodes: "
-      PROCESSES=$(ps auxww | grep start\.jar | grep solr.solr.home | grep -v grep | awk '{print $2}' | sort -r)
+      PROCESSES=$(ps auxww | grep start\.jar | grep solr\.solr\.home | grep -v grep | awk '{print $2}' | sort -r)
       for ID in $PROCESSES
         do
           port=`jetty_port "$ID"`
@@ -490,9 +707,24 @@ function stop_solr() {
   SOLR_PID="$4"
 
   if [ "$SOLR_PID" != "" ]; then
-    echo -e "Sending stop command to Solr running on port $SOLR_PORT ... waiting 5 seconds to allow Jetty process $SOLR_PID to stop gracefully."
+    echo -e "Sending stop command to Solr running on port $SOLR_PORT ... waiting up to $SOLR_STOP_WAIT seconds to allow Jetty process $SOLR_PID to stop gracefully."
     "$JAVA" $SOLR_SSL_OPTS $AUTHC_OPTS -jar "$DIR/start.jar" "STOP.PORT=$STOP_PORT" "STOP.KEY=$STOP_KEY" --stop || true
-    (sleep 5) &
+      (loops=0
+      while true
+      do
+        CHECK_PID=`ps auxww | awk '{print $2}' | grep -w $SOLR_PID | sort -r | tr -d ' '`
+        if [ "$CHECK_PID" != "" ]; then
+          slept=$((loops * 2))
+          if [ $slept -lt $SOLR_STOP_WAIT ]; then
+            sleep 2
+            loops=$[$loops+1]
+          else
+            exit # subshell!
+          fi
+        else
+          exit # subshell!
+        fi
+      done) &
     spinner $!
     rm -f "$SOLR_PID_DIR/solr-$SOLR_PORT.pid"
   else
@@ -555,6 +787,12 @@ if [ "$SCRIPT_CMD" == "status" ]; then
   exit
 fi
 
+# assert tool
+if [ "$SCRIPT_CMD" == "assert" ]; then
+  run_tool assert $*
+  exit $?
+fi
+
 # run a healthcheck and exit if requested
 if [ "$SCRIPT_CMD" == "healthcheck" ]; then
 
@@ -571,7 +809,7 @@ if [ "$SCRIPT_CMD" == "healthcheck" ]; then
           ;;
           -z|-zkhost)          
               if [[ -z "$2" || "${2:0:1}" == "-" ]]; then
-                print_usage "$SCRIPT_CMD" "ZooKeepeer connection string is required when using the $1 option!"
+                print_usage "$SCRIPT_CMD" "ZooKeeper connection string is required when using the $1 option!"
                 exit 1
               fi
               ZK_HOST="$2"
@@ -617,6 +855,7 @@ if [[ "$SCRIPT_CMD" == "create" || "$SCRIPT_CMD" == "create_core" || "$SCRIPT_CM
 
   CREATE_NUM_SHARDS=1
   CREATE_REPFACT=1
+  FORCE=false
 
   if [ $# -gt 0 ]; then
     while true; do
@@ -669,6 +908,10 @@ if [[ "$SCRIPT_CMD" == "create" || "$SCRIPT_CMD" == "create_core" || "$SCRIPT_CM
               CREATE_PORT="$2"
               shift 2
           ;;
+          -force)
+              FORCE=true
+              shift
+          ;;
           -help|-usage)
               print_usage "$SCRIPT_CMD"
               exit 0
@@ -726,6 +969,11 @@ if [[ "$SCRIPT_CMD" == "create" || "$SCRIPT_CMD" == "create_core" || "$SCRIPT_CM
     exit 1
   fi
 
+  if [[ "$(whoami)" == "root" ]] && [[ "$FORCE" == "false" ]] ; then
+    echo "WARNING: Creating cores as the root user can cause Solr to fail and is not advisable. Exiting."
+    echo "         If you started Solr as root (not advisable either), force core creation by adding argument -force"
+    exit 1
+  fi
   if [ "$SCRIPT_CMD" == "create_core" ]; then
     run_tool create_core -name "$CREATE_NAME" -solrUrl "$SOLR_URL_SCHEME://$SOLR_TOOL_HOST:$CREATE_PORT/solr" \
       -confdir "$CREATE_CONFDIR" -configsetsDir "$SOLR_TIP/server/solr/configsets"
@@ -821,105 +1069,285 @@ if [[ "$SCRIPT_CMD" == "delete" ]]; then
   exit $?
 fi
 
-# Upload or download a configset to Zookeeper
+ZK_RECURSE=false
+# Zookeeper file maintenance (upconfig, downconfig, files up/down etc.)
+# It's a little clumsy to have the parsing go round and round for upconfig and downconfig, but that's
+# necessary for back-compat
 if [[ "$SCRIPT_CMD" == "zk" ]]; then
 
   if [ $# -gt 0 ]; then
     while true; do
       case "$1" in
-          -z|-zkhost)          
-              if [[ -z "$2" || "${2:0:1}" == "-" ]]; then
-                print_usage "$SCRIPT_CMD" "ZooKeepeer connection string is required when using the $1 option!"
-                exit 1
-              fi
-              ZK_HOST="$2"
-              shift 2
-          ;;
-          -n|-confname)
-              if [[ -z "$2" || "${2:0:1}" == "-" ]]; then
-                print_usage "$SCRIPT_CMD" "Configuration name is required when using the $1 option!"
-                exit 1
-              fi
-              CONFIGSET_CONFNAME="$2"
-              shift 2
-          ;;
-          -d|-confdir)
-              if [[ -z "$2" || "${2:0:1}" == "-" ]]; then
-                print_usage "$SCRIPT_CMD" "Configuration directory is required when using the $1 option!"
-                exit 1
-              fi
-              CONFIGSET_CONFDIR="$2"
-              shift 2
-          ;;
-          -upconfig)
-              ZK_OP="upconfig"
-              shift 1
-          ;;
-          -downconfig)
-              ZK_OP="downconfig"
-              shift 1
-          ;;
-          -help|-usage|-h)
-              print_usage "$SCRIPT_CMD"
-              exit 0
-          ;;
-          --)
-              shift
-              break
-          ;;
-          *)
-              if [ "$1" != "" ]; then
-                print_usage "$SCRIPT_CMD" "Unrecognized or misplaced argument: $1!"
-                exit 1
+        -upconfig|upconfig|-downconfig|downconfig|cp|rm|mv|ls|mkroot)
+            if [ "${1:0:1}" == "-" ]; then
+              ZK_OP=${1:1}
+            else
+              ZK_OP=$1
+            fi
+            shift 1
+        ;;
+        -z|-zkhost)
+            if [[ -z "$2" || "${2:0:1}" == "-" ]]; then
+              print_short_zk_usage "$SCRIPT_CMD" "ZooKeeper connection string is required when using the $1 option!"
+            fi
+            ZK_HOST="$2"
+            shift 2
+        ;;
+        -n|-confname)
+            if [[ -z "$2" || "${2:0:1}" == "-" ]]; then
+              print_short_zk_usage "$SCRIPT_CMD" "Configuration name is required when using the $1 option!"
+            fi
+            CONFIGSET_CONFNAME="$2"
+            shift 2
+        ;;
+        -d|-confdir)
+            if [[ -z "$2" || "${2:0:1}" == "-" ]]; then
+              print_short_zk_usage "$SCRIPT_CMD" "Configuration directory is required when using the $1 option!"
+            fi
+            CONFIGSET_CONFDIR="$2"
+            shift 2
+        ;;
+        -r)
+            ZK_RECURSE="true"
+            shift
+        ;;
+        -help|-usage|-h)
+            print_usage "$SCRIPT_CMD"
+            exit 0
+        ;;
+        --)
+            shift
+            break
+        ;;
+        *)  # Pick up <src> <dst> or <path> params for rm, ls, cp, mv, mkroot.
+            if [ "$1" == "" ]; then
+              break # out-of-args, stop looping
+            fi
+            if [ -z "$ZK_SRC" ]; then
+              ZK_SRC=$1
+            else
+              if [ -z "$ZK_DST" ]; then
+                ZK_DST=$1
               else
-                break # out-of-args, stop looping
+                print_short_zk_usage "Unrecognized or misplaced command $1. 'cp' with trailing asterisk requires quoting, see help text."
               fi
-          ;;
+            fi
+            shift
+        ;;
       esac
     done
   fi
 
   if [ -z "$ZK_OP" ]; then
-    echo "Zookeeper operation (one of '-upconfig' or  '-downconfig') is required!"
-    print_usage "$SCRIPT_CMD"
-    exit 1
+    print_short_zk_usage "Zookeeper operation (one of 'upconfig', 'downconfig', 'rm', 'mv', 'cp', 'ls', 'mkroot') is required!"
   fi
 
   if [ -z "$ZK_HOST" ]; then
-    echo "Zookeeper address (-z) argument is required!"
-    print_usage "$SCRIPT_CMD"
-    exit 1
+    print_short_zk_usage "Zookeeper address (-z) argument is required or ZK_HOST must be specified in the solr.in.sh file."
   fi
 
-  if [ -z "$CONFIGSET_CONFDIR" ]; then
-    echo "Local directory of the configset (-d) argument is required!"
-    print_usage "$SCRIPT_CMD"
-    exit 1
+  if [[ "$ZK_OP" == "upconfig" ||  "$ZK_OP" == "downconfig" ]]; then
+    if [ -z "$CONFIGSET_CONFDIR" ]; then
+      print_short_zk_usage "Local directory of the configset (-d) argument is required!"
+    fi
+
+    if [ -z "$CONFIGSET_CONFNAME" ]; then
+      print_short_zk_usage "Configset name on Zookeeper (-n) argument is required!"
+    fi
   fi
 
-  if [ -z "$CONFIGSET_CONFNAME" ]; then
-    echo "Configset name on Zookeeper (-n) argument is required!"
-    print_usage "$SCRIPT_CMD"
-    exit 1
+  if [[ "$ZK_OP" == "cp" || "$ZK_OP" == "mv" ]]; then
+    if [[ -z "$ZK_SRC" || -z "$ZK_DST" ]]; then
+      print_short_zk_usage "<source> and <destination> must be specified when using either the 'mv' or 'cp' commands."
+    fi
+    if [[ "$ZK_OP" == "cp" && "${ZK_SRC:0:3}" != "zk:" && "${ZK_DST:0:3}" != "zk:" ]]; then
+      print_short_zk_usage "One of the source or desintation paths must be prefixed by 'zk:' for the 'cp' command."
+    fi
   fi
 
-  if [ "$ZK_OP" == "upconfig" ]; then
-    run_tool "$ZK_OP" -confname "$CONFIGSET_CONFNAME" -confdir "$CONFIGSET_CONFDIR" -zkHost "$ZK_HOST" -configsetsDir "$SOLR_TIP/server/solr/configsets"
+  if [[ "$ZK_OP" == "mkroot" ]]; then
+    if [[ -z "$ZK_SRC" ]]; then
+      print_short_zk_usage "<path> must be specified when using the 'mkroot' command."
+    fi
+  fi
+
+
+  case "$ZK_OP" in
+    upconfig)
+      run_tool "$ZK_OP" -confname "$CONFIGSET_CONFNAME" -confdir "$CONFIGSET_CONFDIR" -zkHost "$ZK_HOST" -configsetsDir "$SOLR_TIP/server/solr/configsets"
+    ;;
+    downconfig)
+      run_tool "$ZK_OP" -confname "$CONFIGSET_CONFNAME" -confdir "$CONFIGSET_CONFDIR" -zkHost "$ZK_HOST"
+    ;;
+    rm)
+      if [ -z "$ZK_SRC" ]; then
+        print_short_zk_usage "Zookeeper path to remove must be specified when using the 'rm' command"
+      fi
+      run_tool "$ZK_OP" -path "$ZK_SRC" -zkHost "$ZK_HOST" -recurse "$ZK_RECURSE"
+    ;;
+    mv)
+      run_tool "$ZK_OP" -src "$ZK_SRC" -dst "$ZK_DST" -zkHost "$ZK_HOST"
+    ;;
+    cp)
+      run_tool "$ZK_OP" -src "$ZK_SRC" -dst "$ZK_DST" -zkHost "$ZK_HOST" -recurse "$ZK_RECURSE"
+    ;;
+    ls)
+      if [ -z "$ZK_SRC" ]; then
+        print_short_zk_usage "Zookeeper path to list must be specified when using the 'ls' command"
+      fi
+      run_tool "$ZK_OP" -path "$ZK_SRC" -recurse "$ZK_RECURSE" -zkHost "$ZK_HOST"
+    ;;
+    mkroot)
+      if [ -z "$ZK_SRC" ]; then
+        print_short_zk_usage "Zookeeper path to list must be specified when using the 'mkroot' command"
+      fi
+      run_tool "$ZK_OP" -path "$ZK_SRC" -zkHost "$ZK_HOST"
+    ;;
+    *)
+      print_short_zk_usage "Unrecognized Zookeeper operation $ZK_OP"
+    ;;
+  esac
+
+  exit $?
+fi
+
+if [[ "$SCRIPT_CMD" == "auth" ]]; then
+  declare -a AUTH_PARAMS
+  if [ $# -gt 0 ]; then
+    while true; do
+      case "$1" in
+        enable|disable)
+            AUTH_OP=$1
+            AUTH_PARAMS=("${AUTH_PARAMS[@]}" "$AUTH_OP")
+            shift
+        ;;
+        -z|-zkhost|zkHost)
+            ZK_HOST="$2"
+            AUTH_PARAMS=("${AUTH_PARAMS[@]}" "-zkHost" "$ZK_HOST")
+            shift 2
+        ;;
+        -t|-type)
+            AUTH_TYPE="$2"
+            AUTH_PARAMS=("${AUTH_PARAMS[@]}" "-type" "$AUTH_TYPE")
+            shift 2
+        ;;
+        -credentials)
+            AUTH_CREDENTIALS="$2"
+            AUTH_PARAMS=("${AUTH_PARAMS[@]}" "-credentials" "$AUTH_CREDENTIALS")
+            shift 2
+        ;;
+        -solrIncludeFile)
+            SOLR_INCLUDE="$2"
+            shift 2
+        ;;
+        -prompt)
+            AUTH_PARAMS=("${AUTH_PARAMS[@]}" "-prompt" "$2")
+            shift
+        ;;
+        -blockUnknown)
+            AUTH_PARAMS=("${AUTH_PARAMS[@]}" "-blockUnknown" "$2")
+            shift
+            break
+        ;;
+        -updateIncludeFileOnly)
+            AUTH_PARAMS=("${AUTH_PARAMS[@]}" "-updateIncludeFileOnly" "$2")
+            shift
+            break
+        ;;
+        -d|-dir)
+            if [[ -z "$2" || "${2:0:1}" == "-" ]]; then
+              print_usage "$SCRIPT_CMD" "Server directory is required when using the $1 option!"
+              exit 1
+            fi
+
+            if [[ "$2" == "." || "$2" == "./" || "$2" == ".." || "$2" == "../" ]]; then
+              SOLR_SERVER_DIR="$(pwd)/$2"
+            else
+              # see if the arg value is relative to the tip vs full path
+              if [[ "$2" != /* ]] && [[ -d "$SOLR_TIP/$2" ]]; then
+                SOLR_SERVER_DIR="$SOLR_TIP/$2"
+              else
+                SOLR_SERVER_DIR="$2"
+              fi
+            fi
+            # resolve it to an absolute path
+            SOLR_SERVER_DIR="$(cd "$SOLR_SERVER_DIR"; pwd)"
+            shift 2
+        ;;
+        -s|-solr.home)
+            if [[ -z "$2" || "${2:0:1}" == "-" ]]; then
+              print_usage "$SCRIPT_CMD" "Solr home directory is required when using the $1 option!"
+              exit 1
+            fi
+
+            SOLR_HOME="$2"
+            shift 2
+        ;;
+        -help|-usage|-h)
+            print_usage "$SCRIPT_CMD"
+            exit 0
+        ;;
+        --)
+            shift
+            break
+        ;;
+        *)
+            shift
+            break
+        ;;
+      esac
+    done
+  fi
+
+  if [ -z "$SOLR_SERVER_DIR" ]; then
+    SOLR_SERVER_DIR="$DEFAULT_SERVER_DIR"
+  fi
+  if [ ! -e "$SOLR_SERVER_DIR" ]; then
+    echo -e "\nSolr server directory $SOLR_SERVER_DIR not found!\n"
+    exit 1
+  fi
+  if [ -z "$SOLR_HOME" ]; then
+    SOLR_HOME="$SOLR_SERVER_DIR/solr"
   else
-    run_tool "$ZK_OP" -confname "$CONFIGSET_CONFNAME" -confdir "$CONFIGSET_CONFDIR" -zkHost "$ZK_HOST"
+    if [[ $SOLR_HOME != /* ]] && [[ -d "$SOLR_SERVER_DIR/$SOLR_HOME" ]]; then
+      SOLR_HOME="$SOLR_SERVER_DIR/$SOLR_HOME"
+      SOLR_PID_DIR="$SOLR_HOME"
+    elif [[ $SOLR_HOME != /* ]] && [[ -d "`pwd`/$SOLR_HOME" ]]; then
+      SOLR_HOME="$(pwd)/$SOLR_HOME"
+    fi
   fi
 
+  if [ -z "$AUTH_OP" ]; then
+    print_usage "$SCRIPT_CMD"
+    exit 0
+  fi
+
+  AUTH_PARAMS=("${AUTH_PARAMS[@]}" "-solrIncludeFile" "$SOLR_INCLUDE")
+
+  if [ -z "$AUTH_PORT" ]; then
+    for ID in `ps auxww | grep java | grep start\.jar | awk '{print $2}' | sort -r`
+      do
+        port=`jetty_port "$ID"`
+        if [ "$port" != "" ]; then
+          AUTH_PORT=$port
+          break
+        fi
+      done
+  fi
+  run_tool auth ${AUTH_PARAMS[@]} -solrUrl "$SOLR_URL_SCHEME://$SOLR_TOOL_HOST:$AUTH_PORT/solr" -authConfDir "$SOLR_HOME"
   exit $?
 fi
 
+
 # verify the command given is supported
-if [ "$SCRIPT_CMD" != "stop" ] && [ "$SCRIPT_CMD" != "start" ] && [ "$SCRIPT_CMD" != "restart" ] && [ "$SCRIPT_CMD" != "status" ]; then
+if [ "$SCRIPT_CMD" != "stop" ] && [ "$SCRIPT_CMD" != "start" ] && [ "$SCRIPT_CMD" != "restart" ] && [ "$SCRIPT_CMD" != "status" ] && [ "$SCRIPT_CMD" != "assert" ]; then
   print_usage "" "$SCRIPT_CMD is not a valid command!"
   exit 1
 fi
 
 # Run in foreground (default is to run in the background)
 FG="false"
+FORCE=false
 noprompt=false
 SOLR_OPTS=($SOLR_OPTS)
 PASS_TO_RUN_EXAMPLE=
@@ -1033,10 +1461,22 @@ if [ $# -gt 0 ]; then
             PASS_TO_RUN_EXAMPLE+=" --verbose"
             shift
         ;;
+        -v)
+            SOLR_LOG_LEVEL=DEBUG
+            shift
+        ;;
+        -q)
+            SOLR_LOG_LEVEL=WARN
+            shift
+        ;;
         -all)
             stop_all=true
             shift
         ;;
+        -force)
+            FORCE=true
+            shift
+        ;;
         --)
             shift
             break
@@ -1060,6 +1500,10 @@ if [ $# -gt 0 ]; then
   done
 fi
 
+if [[ $SOLR_LOG_LEVEL ]] ; then
+  SOLR_LOG_LEVEL_OPT="-Dsolr.log.level=$SOLR_LOG_LEVEL"
+fi
+
 if [ -z "$SOLR_SERVER_DIR" ]; then
   SOLR_SERVER_DIR="$DEFAULT_SERVER_DIR"
 fi
@@ -1157,13 +1601,21 @@ if [ -z "$STOP_PORT" ]; then
   STOP_PORT=`expr $SOLR_PORT - 1000`
 fi
 
+if [ "$SCRIPT_CMD" == "start" ] || [ "$SCRIPT_CMD" == "restart" ] ; then
+  if [[ "$(whoami)" == "root" ]] && [[ "$FORCE" == "false" ]] ; then
+    echo "WARNING: Starting Solr as the root user is a security risk and not considered best practice. Exiting."
+    echo "         Please consult the Reference Guide. To override this check, start with argument '-force'"
+    exit 1
+  fi
+fi
+
 if [[ "$SCRIPT_CMD" == "start" ]]; then
   # see if Solr is already running
   SOLR_PID=`solr_pid_by_port "$SOLR_PORT"`
 
   if [ -z "$SOLR_PID" ]; then
     # not found using the pid file ... but use ps to ensure not found
-    SOLR_PID=`ps auxww | grep start\.jar | grep -w $SOLR_PORT | grep -v grep | awk '{print $2}' | sort -r`
+    SOLR_PID=`ps auxww | grep start\.jar | grep -w "\-Djetty\.port=$SOLR_PORT" | grep -v grep | awk '{print $2}' | sort -r`
   fi
 
   if [ "$SOLR_PID" != "" ]; then
@@ -1176,7 +1628,7 @@ else
   SOLR_PID=`solr_pid_by_port "$SOLR_PORT"`
   if [ -z "$SOLR_PID" ]; then
     # not found using the pid file ... but use ps to ensure not found
-    SOLR_PID=`ps auxww | grep start\.jar | grep -w $SOLR_PORT | grep -v grep | awk '{print $2}' | sort -r`
+    SOLR_PID=`ps auxww | grep start\.jar | grep -w "\-Djetty\.port=$SOLR_PORT" | grep -v grep | awk '{print $2}' | sort -r`
   fi
   if [ "$SOLR_PID" != "" ]; then
     stop_solr "$SOLR_SERVER_DIR" "$SOLR_PORT" "$STOP_KEY" "$SOLR_PID"
@@ -1226,38 +1678,49 @@ if [ ! -e "$SOLR_HOME" ]; then
   echo -e "\nSolr home directory $SOLR_HOME not found!\n"
   exit 1
 fi
-
-# backup the log files before starting
-if [ -f "$SOLR_LOGS_DIR/solr.log" ]; then
-  if $verbose ; then
-    echo "Backing up $SOLR_LOGS_DIR/solr.log"
-  fi
-  mv "$SOLR_LOGS_DIR/solr.log" "$SOLR_LOGS_DIR/solr_log_$(date +"%Y%m%d_%H%M")"
+if $verbose ; then
+  q=""
+else
+  q="-q"
 fi
-
-if [ -f "$SOLR_LOGS_DIR/solr_gc.log" ]; then
-  if $verbose ; then
-    echo "Backing up $SOLR_LOGS_DIR/solr_gc.log"
-  fi
-  mv "$SOLR_LOGS_DIR/solr_gc.log" "$SOLR_LOGS_DIR/solr_gc_log_$(date +"%Y%m%d_%H%M")"
+if [ "${SOLR_LOG_PRESTART_ROTATION:=true}" == "true" ]; then
+  run_tool utils -s "$DEFAULT_SERVER_DIR" -l "$SOLR_LOGS_DIR" $q -remove_old_solr_logs 7 || echo "Failed removing old solr logs"
+  run_tool utils -s "$DEFAULT_SERVER_DIR" -l "$SOLR_LOGS_DIR" $q -archive_gc_logs $q     || echo "Failed archiving old GC logs"
+  run_tool utils -s "$DEFAULT_SERVER_DIR" -l "$SOLR_LOGS_DIR" $q -archive_console_logs   || echo "Failed archiving old console logs"
+  run_tool utils -s "$DEFAULT_SERVER_DIR" -l "$SOLR_LOGS_DIR" $q -rotate_solr_logs 9     || echo "Failed rotating old solr logs"
 fi
 
-java_ver_out=`echo "$("$JAVA" -version 2>&1)"`
-JAVA_VERSION=`echo $java_ver_out | grep "java version" | awk '{ print substr($3, 2, length($3)-2); }'`
-JAVA_VENDOR="Oracle"
-if [ "`echo $java_ver_out | grep -i "IBM J9"`" != "" ]; then
-  JAVA_VENDOR="IBM J9"
+# Establish default GC logging opts if no env var set (otherwise init to sensible default)
+if [ -z ${GC_LOG_OPTS+x} ]; then
+  if [[ "$JAVA_VER_NUM" < "9" ]] ; then
+    GC_LOG_OPTS=('-verbose:gc' '-XX:+PrintHeapAtGC' '-XX:+PrintGCDetails' \
+                 '-XX:+PrintGCDateStamps' '-XX:+PrintGCTimeStamps' '-XX:+PrintTenuringDistribution' \
+                 '-XX:+PrintGCApplicationStoppedTime')
+  else
+    GC_LOG_OPTS=('-Xlog:gc*')
+  fi
+else
+  GC_LOG_OPTS=($GC_LOG_OPTS)
 fi
 
-# if verbose gc logging enabled, setup the location of the log file
+# if verbose gc logging enabled, setup the location of the log file and rotation
 if [ "$GC_LOG_OPTS" != "" ]; then
-  gc_log_flag="-Xloggc"
-  if [ "$JAVA_VENDOR" == "IBM J9" ]; then
-    gc_log_flag="-Xverbosegclog"
+  if [[ "$JAVA_VER_NUM" < "9" ]] ; then
+    gc_log_flag="-Xloggc"
+    if [ "$JAVA_VENDOR" == "IBM J9" ]; then
+      gc_log_flag="-Xverbosegclog"
+    fi
+    GC_LOG_OPTS+=("$gc_log_flag:$SOLR_LOGS_DIR/solr_gc.log" '-XX:+UseGCLogFileRotation' '-XX:NumberOfGCLogFiles=9' '-XX:GCLogFileSize=20M')
+  else
+    # http://openjdk.java.net/jeps/158
+    for i in "${!GC_LOG_OPTS[@]}";
+    do
+      # for simplicity, we only look at the prefix '-Xlog:gc'
+      # (if 'all' or multiple tags are used starting with anything other then 'gc' the user is on their own)
+      # if a single additional ':' exists in param, then there is already an explicit output specifier
+      GC_LOG_OPTS[$i]=$(echo ${GC_LOG_OPTS[$i]} | sed "s|^\(-Xlog:gc[^:]*$\)|\1:file=$SOLR_LOGS_DIR/solr_gc.log:time,uptime:filecount=9,filesize=20000|")
+    done
   fi
-  GC_LOG_OPTS=($GC_LOG_OPTS "$gc_log_flag:$SOLR_LOGS_DIR/solr_gc.log")
-else
-  GC_LOG_OPTS=()
 fi
 
 # If ZK_HOST is defined, the assume SolrCloud mode
@@ -1298,7 +1761,11 @@ fi
 if [ "$ENABLE_REMOTE_JMX_OPTS" == "true" ]; then
 
   if [ -z "$RMI_PORT" ]; then
-    RMI_PORT="1$SOLR_PORT"
+    RMI_PORT=`expr $SOLR_PORT + 10000`
+    if [ $RMI_PORT -gt 65535 ]; then
+      echo -e "\nRMI_PORT is $RMI_PORT, which is invalid!\n"
+      exit 1
+    fi
   fi
 
   REMOTE_JMX_OPTS=('-Dcom.sun.management.jmxremote' \
@@ -1324,6 +1791,12 @@ else
   JAVA_MEM_OPTS=("-Xms$SOLR_HEAP" "-Xmx$SOLR_HEAP")
 fi
 
+# Pick default for Java thread stack size, and then add to SOLR_OPTS
+if [ -z ${SOLR_JAVA_STACK_SIZE+x} ]; then
+  SOLR_JAVA_STACK_SIZE='-Xss256k'
+fi
+SOLR_OPTS+=($SOLR_JAVA_STACK_SIZE)
+
 if [ -z "$SOLR_TIMEZONE" ]; then
   SOLR_TIMEZONE='UTC'
 fi
@@ -1336,20 +1809,28 @@ function launch_solr() {
   
   SOLR_ADDL_ARGS="$2"
 
-  GC_TUNE=($GC_TUNE)
-  # deal with Java version specific GC and other flags
-  if [ "${JAVA_VERSION:0:3}" == "1.7" ]; then
-    # Specific Java version hacking
-    GC_TUNE+=('-XX:CMSFullGCsBeforeCompaction=1' '-XX:CMSTriggerPermRatio=80')
-    if [ "$JAVA_VENDOR" != "IBM J9" ]; then
-      JAVA_MINOR_VERSION=${JAVA_VERSION:(-2)}
-      if [[ $JAVA_MINOR_VERSION -ge 40 && $JAVA_MINOR_VERSION -le 51 ]]; then
-        GC_TUNE+=('-XX:-UseSuperWord')
-        echo -e "\nWARNING: Java version $JAVA_VERSION has known bugs with Lucene and requires the -XX:-UseSuperWord flag. Please consider upgrading your JVM.\n"
-      fi
-    fi
+  # define default GC_TUNE
+  if [ -z ${GC_TUNE+x} ]; then
+      GC_TUNE=('-XX:NewRatio=3' \
+        '-XX:SurvivorRatio=4' \
+        '-XX:TargetSurvivorRatio=90' \
+        '-XX:MaxTenuringThreshold=8' \
+        '-XX:+UseConcMarkSweepGC' \
+        '-XX:+UseParNewGC' \
+        '-XX:ConcGCThreads=4' '-XX:ParallelGCThreads=4' \
+        '-XX:+CMSScavengeBeforeRemark' \
+        '-XX:PretenureSizeThreshold=64m' \
+        '-XX:+UseCMSInitiatingOccupancyOnly' \
+        '-XX:CMSInitiatingOccupancyFraction=50' \
+        '-XX:CMSMaxAbortablePrecleanTime=6000' \
+        '-XX:+CMSParallelRemarkEnabled' \
+        '-XX:+ParallelRefProcEnabled' \
+        '-XX:-OmitStackTraceInFastThrow')
+  else
+    GC_TUNE=($GC_TUNE)
   fi
 
+
   # If SSL-related system props are set, add them to SOLR_OPTS
   if [ -n "$SOLR_SSL_OPTS" ]; then
     # If using SSL and solr.jetty.https.port not set explicitly, use the jetty.port
@@ -1380,17 +1861,22 @@ function launch_solr() {
     fi
 
     if [ "$SOLR_OPTS" != "" ]; then
-      echo -e "    SOLR_OPTS        = ${SOLR_OPTS[@]}"
+      echo -e "    SOLR_OPTS       = ${SOLR_OPTS[@]}"
     fi
 
     if [ "$SOLR_ADDL_ARGS" != "" ]; then
-      echo -e "    SOLR_ADDL_ARGS   = $SOLR_ADDL_ARGS"
+      echo -e "    SOLR_ADDL_ARGS  = $SOLR_ADDL_ARGS"
     fi
 
     if [ "$ENABLE_REMOTE_JMX_OPTS" == "true" ]; then
       echo -e "    RMI_PORT        = $RMI_PORT"
       echo -e "    REMOTE_JMX_OPTS = ${REMOTE_JMX_OPTS[@]}"
     fi
+
+    if [ "$SOLR_LOG_LEVEL" != "" ]; then
+      echo -e "    SOLR_LOG_LEVEL  = $SOLR_LOG_LEVEL"
+    fi
+
     echo -e "\n"
   fi
     
@@ -1403,7 +1889,7 @@ function launch_solr() {
   fi
 
   SOLR_START_OPTS=('-server' "${JAVA_MEM_OPTS[@]}" "${GC_TUNE[@]}" "${GC_LOG_OPTS[@]}" \
-    "${REMOTE_JMX_OPTS[@]}" "${CLOUD_MODE_OPTS[@]}" \
+    "${REMOTE_JMX_OPTS[@]}" "${CLOUD_MODE_OPTS[@]}" $SOLR_LOG_LEVEL_OPT -Dsolr.log.dir="$SOLR_LOGS_DIR" \
     "-Djetty.port=$SOLR_PORT" "-DSTOP.PORT=$stop_port" "-DSTOP.KEY=$STOP_KEY" \
     "${SOLR_HOST_ARG[@]}" "-Duser.timezone=$SOLR_TIMEZONE" \
     "-Djetty.home=$SOLR_SERVER_DIR" "-Dsolr.solr.home=$SOLR_HOME" "-Dsolr.install.dir=$SOLR_TIP" \
@@ -1413,37 +1899,57 @@ function launch_solr() {
     IN_CLOUD_MODE=" in SolrCloud mode"
   fi
 
-  mkdir -p "$SOLR_LOGS_DIR"
+  mkdir -p "$SOLR_LOGS_DIR" 2>/dev/null
+  if [ $? -ne 0 ]; then
+    echo -e "\nERROR: Logs directory $SOLR_LOGS_DIR could not be created. Exiting"
+    exit 1
+  fi
+  if [ ! -w "$SOLR_LOGS_DIR" ]; then
+    echo -e "\nERROR: Logs directory $SOLR_LOGS_DIR is not writable. Exiting"
+    exit 1
+  fi
+  case "$SOLR_LOGS_DIR" in
+    contexts|etc|lib|modules|resources|scripts|solr|solr-webapp)
+      echo -e "\nERROR: Logs directory $SOLR_LOGS_DIR is invalid. Reserved for the system. Exiting"
+      exit 1
+      ;;
+  esac
 
   if [ "$run_in_foreground" == "true" ]; then
-    echo -e "\nStarting Solr$IN_CLOUD_MODE on port $SOLR_PORT from $SOLR_SERVER_DIR\n"
-    exec "$JAVA" "${SOLR_START_OPTS[@]}" $SOLR_ADDL_ARGS -jar start.jar "${SOLR_JETTY_CONFIG[@]}"
+    exec "$JAVA" "${SOLR_START_OPTS[@]}" $SOLR_ADDL_ARGS -Dsolr.kerberos.name.rules="$SOLR_KERB_NAME_RULES" -jar start.jar "${SOLR_JETTY_CONFIG[@]}"
   else
     # run Solr in the background
-    nohup "$JAVA" "${SOLR_START_OPTS[@]}" $SOLR_ADDL_ARGS -Dsolr.kerberos.name.rules="$SOLR_KERB_NAME_RULES" \
+    nohup "$JAVA" "${SOLR_START_OPTS[@]}" $SOLR_ADDL_ARGS -Dsolr.kerberos.name.rules="$SOLR_KERB_NAME_RULES" -Dsolr.log.muteconsole \
 	"-XX:OnOutOfMemoryError=$SOLR_TIP/bin/oom_solr.sh $SOLR_PORT $SOLR_LOGS_DIR" \
         -jar start.jar "${SOLR_JETTY_CONFIG[@]}" \
 	1>"$SOLR_LOGS_DIR/solr-$SOLR_PORT-console.log" 2>&1 & echo $! > "$SOLR_PID_DIR/solr-$SOLR_PORT.pid"
 
+    # check if /proc/sys/kernel/random/entropy_avail exists then check output of cat /proc/sys/kernel/random/entropy_avail to see if less than 300
+    if [[ -f /proc/sys/kernel/random/entropy_avail ]] && (( `cat /proc/sys/kernel/random/entropy_avail` < 300)); then
+	echo "Warning: Available entropy is low. As a result, use of the UUIDField, SSL, or any other features that require"
+	echo "RNG might not work properly. To check for the amount of available entropy, use 'cat /proc/sys/kernel/random/entropy_avail'."
+	echo ""
+    fi
     # no lsof on cygwin though
     if hash lsof 2>/dev/null ; then  # hash returns true if lsof is on the path
-      echo -n "Waiting up to 30 seconds to see Solr running on port $SOLR_PORT"
+      echo -n "Waiting up to $SOLR_STOP_WAIT seconds to see Solr running on port $SOLR_PORT"
       # Launch in a subshell to show the spinner
       (loops=0
       while true
       do
         running=`lsof -PniTCP:$SOLR_PORT -sTCP:LISTEN`
         if [ -z "$running" ]; then
-          if [ $loops -lt 6 ]; then
-            sleep 5
+	  slept=$((loops * 2))
+          if [ $slept -lt $SOLR_STOP_WAIT ]; then
+            sleep 2
             loops=$[$loops+1]
           else
-            echo -e "Still not seeing Solr listening on $SOLR_PORT after 30 seconds!"
+            echo -e "Still not seeing Solr listening on $SOLR_PORT after $SOLR_STOP_WAIT seconds!"
             tail -30 "$SOLR_LOGS_DIR/solr.log"
             exit # subshell!
           fi
         else
-          SOLR_PID=`ps auxww | grep start\.jar | grep -w $SOLR_PORT | grep -v grep | awk '{print $2}' | sort -r`
+          SOLR_PID=`ps auxww | grep start\.jar | grep -w "\-Djetty\.port=$SOLR_PORT" | grep -v grep | awk '{print $2}' | sort -r`
           echo -e "\nStarted Solr server on port $SOLR_PORT (pid=$SOLR_PID). Happy searching!\n"
           exit # subshell!
         fi
@@ -1452,7 +1958,7 @@ function launch_solr() {
     else
       echo -e "NOTE: Please install lsof as this script needs it to determine if Solr is listening on port $SOLR_PORT."
       sleep 10
-      SOLR_PID=`ps auxww | grep start\.jar | grep -w $SOLR_PORT | grep -v grep | awk '{print $2}' | sort -r`
+      SOLR_PID=`ps auxww | grep start\.jar | grep -w "\-Djetty\.port=$SOLR_PORT" | grep -v grep | awk '{print $2}' | sort -r`
       echo -e "\nStarted Solr server on port $SOLR_PORT (pid=$SOLR_PID). Happy searching!\n"
       return;
     fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudCLI.java
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudCLI.java b/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudCLI.java
index e3a1e79..70bc232 100644
--- a/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudCLI.java
+++ b/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudCLI.java
@@ -52,12 +52,14 @@ public class AmbariSolrCloudCLI {
   private static final String UNSECURE_ZNODE_COMMAND = "unsecure-znode";
   private static final String SECURE_SOLR_ZNODE_COMMAND = "secure-solr-znode";
   private static final String SECURITY_JSON_LOCATION = "security-json-location";
+  private static final String REMOVE_ADMIN_HANDLERS = "remove-admin-handlers";
   private static final String CMD_LINE_SYNTAX =
     "\n./solrCloudCli.sh --create-collection -z host1:2181,host2:2181/ambari-solr -c collection -cs conf_set"
       + "\n./solrCloudCli.sh --upload-config -z host1:2181,host2:2181/ambari-solr -d /tmp/myconfig_dir -cs config_set"
       + "\n./solrCloudCli.sh --download-config -z host1:2181,host2:2181/ambari-solr -cs config_set -d /tmp/myonfig_dir"
       + "\n./solrCloudCli.sh --check-config -z host1:2181,host2:2181/ambari-solr -cs config_set"
       + "\n./solrCloudCli.sh --create-shard -z host1:2181,host2:2181/ambari-solr -c collection -sn myshard"
+      + "\n./solrCloudCli.sh --remove-admin-handlers -z host1:2181,host2:2181/ambari-solr -c collection"
       + "\n./solrCloudCli.sh --create-znode -z host1:2181,host2:2181 -zn /ambari-solr"
       + "\n./solrCloudCli.sh --check-znode -z host1:2181,host2:2181 -zn /ambari-solr"
       + "\n./solrCloudCli.sh --cluster-prop -z host1:2181,host2:2181/ambari-solr -cpn urlScheme -cpn http"
@@ -137,6 +139,11 @@ public class AmbariSolrCloudCLI {
       .desc("Disable security for znode")
       .build();
 
+    final Option removeAdminHandlerOption = Option.builder("rah")
+      .longOpt(REMOVE_ADMIN_HANDLERS)
+      .desc("Remove AdminHandlers request handler from solrconfig.xml")
+      .build();
+
     final Option shardNameOption = Option.builder("sn")
       .longOpt("shard-name")
       .desc("Name of the shard for create-shard command")
@@ -328,6 +335,7 @@ public class AmbariSolrCloudCLI {
 
     options.addOption(helpOption);
     options.addOption(retryOption);
+    options.addOption(removeAdminHandlerOption);
     options.addOption(intervalOption);
     options.addOption(zkConnectStringOption);
     options.addOption(configSetOption);
@@ -414,6 +422,9 @@ public class AmbariSolrCloudCLI {
       } else if (cli.hasOption("uz")) {
         command = UNSECURE_ZNODE_COMMAND;
         validateRequiredOptions(cli, command, zkConnectStringOption, znodeOption, jaasFileOption);
+      } else if (cli.hasOption("rah")) {
+        command = REMOVE_ADMIN_HANDLERS;
+        validateRequiredOptions(cli, command, zkConnectStringOption, collectionOption);
       } else {
         List<String> commands = Arrays.asList(CREATE_COLLECTION_COMMAND, CREATE_SHARD_COMMAND, UPLOAD_CONFIG_COMMAND,
           DOWNLOAD_CONFIG_COMMAND, CONFIG_CHECK_COMMAND, SET_CLUSTER_PROP, CREATE_ZNODE, SECURE_ZNODE_COMMAND, UNSECURE_ZNODE_COMMAND,
@@ -539,6 +550,9 @@ public class AmbariSolrCloudCLI {
         case SECURE_SOLR_ZNODE_COMMAND:
           solrCloudClient = clientBuilder.build();
           solrCloudClient.secureSolrZnode();
+        case REMOVE_ADMIN_HANDLERS:
+          solrCloudClient = clientBuilder.build();
+          solrCloudClient.removeAdminHandlerFromCollectionConfig();
           break;
         default:
           throw new AmbariSolrCloudClientException(String.format("Not found command: '%s'", command));

http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudClient.java
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudClient.java b/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudClient.java
index 9479679..96c07a3 100644
--- a/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudClient.java
+++ b/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudClient.java
@@ -27,6 +27,7 @@ import org.apache.ambari.infra.solr.commands.EnableKerberosPluginSolrZkCommand;
 import org.apache.ambari.infra.solr.commands.GetShardsCommand;
 import org.apache.ambari.infra.solr.commands.GetSolrHostsCommand;
 import org.apache.ambari.infra.solr.commands.ListCollectionCommand;
+import org.apache.ambari.infra.solr.commands.RemoveAdminHandlersCommand;
 import org.apache.ambari.infra.solr.commands.SecureSolrZNodeZkCommand;
 import org.apache.ambari.infra.solr.commands.SecureZNodeZkCommand;
 import org.apache.ambari.infra.solr.commands.SetClusterPropertyZkCommand;
@@ -257,6 +258,13 @@ public class AmbariSolrCloudClient {
     return new GetSolrHostsCommand(getRetryTimes(), getInterval()).run(this);
   }
 
+  /**
+   * Remove solr.admin.AdminHandlers requestHandler from solrconfi.xml
+   */
+  public boolean removeAdminHandlerFromCollectionConfig() throws Exception {
+    return new RemoveAdminHandlersCommand(getRetryTimes(), getInterval()).run(this);
+  }
+
   public String getZkConnectString() {
     return zkConnectString;
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/commands/RemoveAdminHandlersCommand.java
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/commands/RemoveAdminHandlersCommand.java b/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/commands/RemoveAdminHandlersCommand.java
new file mode 100644
index 0000000..32fae7b
--- /dev/null
+++ b/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/commands/RemoveAdminHandlersCommand.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.solr.commands;
+
+import org.apache.ambari.infra.solr.AmbariSolrCloudClient;
+import org.apache.solr.common.cloud.SolrZkClient;
+import org.apache.solr.common.cloud.SolrZooKeeper;
+import org.apache.zookeeper.data.Stat;
+
+public class RemoveAdminHandlersCommand extends AbstractZookeeperRetryCommand<Boolean> {
+
+  public RemoveAdminHandlersCommand(int maxRetries, int interval) {
+    super(maxRetries, interval);
+  }
+
+  @Override
+  protected Boolean executeZkCommand(AmbariSolrCloudClient client, SolrZkClient zkClient, SolrZooKeeper solrZooKeeper) throws Exception {
+    String solrConfigXmlPath = String.format("/configs/%s/solrconfig.xml", client.getCollection());
+    if (zkClient.exists(solrConfigXmlPath, true)) {
+      Stat stat = new Stat();
+      byte[] solrConfigXmlBytes = zkClient.getData(solrConfigXmlPath, null, stat, true);
+      String solrConfigStr = new String(solrConfigXmlBytes);
+      if (solrConfigStr.contains("class=\"solr.admin.AdminHandlers\"")) {
+        byte[] newSolrConfigXmlBytes = new String(solrConfigXmlBytes).replaceAll("(?s)<requestHandler name=\"/admin/\".*?class=\"solr.admin.AdminHandlers\" />", "").getBytes();
+        zkClient.setData(solrConfigXmlPath, newSolrConfigXmlBytes, stat.getVersion() + 1, true);
+      }
+    }
+    return true;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/commands/SetClusterPropertyZkCommand.java
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/commands/SetClusterPropertyZkCommand.java b/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/commands/SetClusterPropertyZkCommand.java
index 34597c6..e79773e 100644
--- a/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/commands/SetClusterPropertyZkCommand.java
+++ b/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/commands/SetClusterPropertyZkCommand.java
@@ -19,9 +19,9 @@
 package org.apache.ambari.infra.solr.commands;
 
 import org.apache.ambari.infra.solr.AmbariSolrCloudClient;
+import org.apache.solr.common.cloud.ClusterProperties;
 import org.apache.solr.common.cloud.SolrZkClient;
 import org.apache.solr.common.cloud.SolrZooKeeper;
-import org.apache.solr.common.cloud.ZkStateReader;
 
 public class SetClusterPropertyZkCommand extends AbstractZookeeperRetryCommand<String>{
 
@@ -33,8 +33,8 @@ public class SetClusterPropertyZkCommand extends AbstractZookeeperRetryCommand<S
   protected String executeZkCommand(AmbariSolrCloudClient client, SolrZkClient zkClient, SolrZooKeeper solrZooKeeper) throws Exception {
     String propertyName = client.getPropName();
     String propertyValue = client.getPropValue();
-    ZkStateReader reader = new ZkStateReader(zkClient);
-    reader.setClusterProperty(propertyName, propertyValue);
+    ClusterProperties clusterProperties = new ClusterProperties(zkClient);
+    clusterProperties.setClusterProperty(propertyName, propertyValue);
     return propertyValue;
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-infra/ambari-infra-solr-plugin/src/test/java/org/apache/ambari/infra/security/InfraRuleBasedAuthorizationPluginTest.java
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-solr-plugin/src/test/java/org/apache/ambari/infra/security/InfraRuleBasedAuthorizationPluginTest.java b/ambari-infra/ambari-infra-solr-plugin/src/test/java/org/apache/ambari/infra/security/InfraRuleBasedAuthorizationPluginTest.java
index ee84969..f1f842d 100644
--- a/ambari-infra/ambari-infra-solr-plugin/src/test/java/org/apache/ambari/infra/security/InfraRuleBasedAuthorizationPluginTest.java
+++ b/ambari-infra/ambari-infra-solr-plugin/src/test/java/org/apache/ambari/infra/security/InfraRuleBasedAuthorizationPluginTest.java
@@ -242,6 +242,11 @@ public class InfraRuleBasedAuthorizationPluginTest {
     public String getResource() {
       return (String) values.get("resource");
     }
+
+    @Override
+    public Object getHandler() {
+      return null;
+    }
   }
 
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-infra/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-infra/pom.xml b/ambari-infra/pom.xml
index 4f5c29c..908abb4 100644
--- a/ambari-infra/pom.xml
+++ b/ambari-infra/pom.xml
@@ -31,7 +31,7 @@
 
   <properties>
     <jdk.version>1.8</jdk.version>
-    <solr.version>5.5.2</solr.version>
+    <solr.version>6.6.0</solr.version>
     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
     <python.ver>python &gt;= 2.6</python.ver>
     <deb.python.ver>python (&gt;= 2.6)</deb.python.ver>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-logsearch/ambari-logsearch-server/src/main/configsets/audit_logs/conf/solrconfig.xml
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/configsets/audit_logs/conf/solrconfig.xml b/ambari-logsearch/ambari-logsearch-server/src/main/configsets/audit_logs/conf/solrconfig.xml
index 7af91df..b1290a4 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/configsets/audit_logs/conf/solrconfig.xml
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/configsets/audit_logs/conf/solrconfig.xml
@@ -1063,8 +1063,7 @@
        Admin Handlers - This will register all the standard admin
        RequestHandlers.  
     -->
-  <requestHandler name="/admin/" 
-                  class="solr.admin.AdminHandlers" />
+
   <!-- This single handler is equivalent to the following... -->
   <!--
      <requestHandler name="/admin/luke"       class="solr.admin.LukeRequestHandler" />

http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-logsearch/ambari-logsearch-server/src/main/configsets/hadoop_logs/conf/solrconfig.xml
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/configsets/hadoop_logs/conf/solrconfig.xml b/ambari-logsearch/ambari-logsearch-server/src/main/configsets/hadoop_logs/conf/solrconfig.xml
index 59f778f..f0e46a0 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/configsets/hadoop_logs/conf/solrconfig.xml
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/configsets/hadoop_logs/conf/solrconfig.xml
@@ -1063,8 +1063,7 @@
        Admin Handlers - This will register all the standard admin
        RequestHandlers.  
     -->
-  <requestHandler name="/admin/" 
-                  class="solr.admin.AdminHandlers" />
+
   <!-- This single handler is equivalent to the following... -->
   <!--
      <requestHandler name="/admin/luke"       class="solr.admin.LukeRequestHandler" />

http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-logsearch/ambari-logsearch-server/src/main/configsets/history/conf/solrconfig.xml
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/configsets/history/conf/solrconfig.xml b/ambari-logsearch/ambari-logsearch-server/src/main/configsets/history/conf/solrconfig.xml
index 8244a08..1827444 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/configsets/history/conf/solrconfig.xml
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/configsets/history/conf/solrconfig.xml
@@ -1063,8 +1063,7 @@
        Admin Handlers - This will register all the standard admin
        RequestHandlers.  
     -->
-  <requestHandler name="/admin/" 
-                  class="solr.admin.AdminHandlers" />
+
   <!-- This single handler is equivalent to the following... -->
   <!--
      <requestHandler name="/admin/luke"       class="solr.admin.LukeRequestHandler" />

http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/dao/SolrSchemaFieldDao.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/dao/SolrSchemaFieldDao.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/dao/SolrSchemaFieldDao.java
index d99694b..71f9f29 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/dao/SolrSchemaFieldDao.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/dao/SolrSchemaFieldDao.java
@@ -141,7 +141,7 @@ public class SolrSchemaFieldDao {
         try (CloseableHttpClient httpClient = HttpClientUtil.createClient(null)) {
           HttpGet request = new HttpGet(replica.getCoreUrl() + LUKE_REQUEST_URL_SUFFIX);
           HttpResponse response = httpClient.execute(request);
-          NamedList<Object> lukeData = (NamedList<Object>) new JavaBinCodec(null, null).unmarshal(response.getEntity().getContent());
+          NamedList<Object> lukeData = (NamedList<Object>) new JavaBinCodec().unmarshal(response.getEntity().getContent());
           LukeResponse lukeResponse = new LukeResponse();
           lukeResponse.setResponse(lukeData);
           lukeResponses.add(lukeResponse);

http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-logsearch/docker/Dockerfile
----------------------------------------------------------------------
diff --git a/ambari-logsearch/docker/Dockerfile b/ambari-logsearch/docker/Dockerfile
index 2b8fd5d..1e4135e 100644
--- a/ambari-logsearch/docker/Dockerfile
+++ b/ambari-logsearch/docker/Dockerfile
@@ -60,7 +60,7 @@ RUN npm install -g npm@2.1.11
 RUN npm install -g brunch@1.7.20
 
 # Install Solr
-ENV SOLR_VERSION 5.5.2
+ENV SOLR_VERSION 6.6.0
 RUN wget --no-check-certificate -O /root/solr-$SOLR_VERSION.tgz http://public-repo-1.hortonworks.com/ARTIFACTS/dist/lucene/solr/$SOLR_VERSION/solr-$SOLR_VERSION.tgz
 RUN cd /root && tar -zxvf /root/solr-$SOLR_VERSION.tgz
 ADD bin/start.sh /root/start.sh

http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-logsearch/docker/bin/start.sh
----------------------------------------------------------------------
diff --git a/ambari-logsearch/docker/bin/start.sh b/ambari-logsearch/docker/bin/start.sh
index 28ebf65..c4ee06c 100644
--- a/ambari-logsearch/docker/bin/start.sh
+++ b/ambari-logsearch/docker/bin/start.sh
@@ -70,7 +70,7 @@ function generate_keys() {
 
 function start_solr() {
   echo "Starting Solr..."
-  /root/solr-$SOLR_VERSION/bin/solr start -cloud -s /root/logsearch_solr_index/data -verbose
+  /root/solr-$SOLR_VERSION/bin/solr start -cloud -s /root/logsearch_solr_index/data -verbose -force
   touch /var/log/ambari-logsearch-solr/solr.log
 
   if [ $LOGSEARCH_SOLR_SSL_ENABLED == 'true'  ]
@@ -78,7 +78,7 @@ function start_solr() {
     echo "Setting urlScheme as https and restarting solr..."
     $ZKCLI -zkhost localhost:9983 -cmd clusterprop -name urlScheme -val https
     /root/solr-$SOLR_VERSION/bin/solr stop
-    /root/solr-$SOLR_VERSION/bin/solr start -cloud -s /root/logsearch_solr_index/data -verbose
+    /root/solr-$SOLR_VERSION/bin/solr start -cloud -s /root/logsearch_solr_index/data -verbose -force
   fi
 }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-logsearch/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-logsearch/pom.xml b/ambari-logsearch/pom.xml
index 82943e4..2be11ee 100644
--- a/ambari-logsearch/pom.xml
+++ b/ambari-logsearch/pom.xml
@@ -45,7 +45,7 @@
     <deb.python.ver>python (&gt;= 2.6)</deb.python.ver>
     <deb.architecture>amd64</deb.architecture>
     <deb.dependency.list>${deb.python.ver}</deb.dependency.list>
-    <solr.version>5.5.2</solr.version>
+    <solr.version>6.6.0</solr.version>
     <hadoop.version>2.7.2</hadoop.version>
     <common.io.version>2.5</common.io.version>
   </properties>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog300.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog300.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog300.java
index 5fdc885..b4502d6 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog300.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog300.java
@@ -369,6 +369,24 @@ public class UpgradeCatalog300 extends AbstractUpgradeCatalog {
               updateConfigurationPropertiesForCluster(cluster, "logsearch-log4j", Collections.singletonMap("content", content), true, true);
             }
           }
+
+          Config logsearchServiceLogsConfig = cluster.getDesiredConfigByType("logsearch-service_logs-solrconfig");
+          if (logsearchServiceLogsConfig != null) {
+            String content = logsearchServiceLogsConfig.getProperties().get("content");
+            if (content.contains("class=\"solr.admin.AdminHandlers\"")) {
+              content = content.replaceAll("(?s)<requestHandler name=\"/admin/\".*?class=\"solr.admin.AdminHandlers\" />", "");
+              updateConfigurationPropertiesForCluster(cluster, "logsearch-service_logs-solrconfig", Collections.singletonMap("content", content), true, true);
+            }
+          }
+
+          Config logsearchAuditLogsConfig = cluster.getDesiredConfigByType("logsearch-audit_logs-solrconfig");
+          if (logsearchAuditLogsConfig != null) {
+            String content = logsearchAuditLogsConfig.getProperties().get("content");
+            if (content.contains("class=\"solr.admin.AdminHandlers\"")) {
+              content = content.replaceAll("(?s)<requestHandler name=\"/admin/\".*?class=\"solr.admin.AdminHandlers\" />", "");
+              updateConfigurationPropertiesForCluster(cluster, "logsearch-audit_logs-solrconfig", Collections.singletonMap("content", content), true, true);
+            }
+          }
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/params.py
index 5f547f3..e4ea885 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/params.py
@@ -80,6 +80,9 @@ solr_client_dir = '/usr/lib/ambari-infra-solr-client'
 solr_bindir = solr_dir + '/bin'
 cloud_scripts = solr_dir + '/server/scripts/cloud-scripts'
 
+logsearch_hosts = default("/clusterHostInfo/logsearch_server_hosts", [])
+has_logsearch = len(logsearch_hosts) > 0
+
 if "infra-solr-env" in config['configurations']:
   infra_solr_hosts = config['clusterHostInfo']['infra_solr_hosts']
   infra_solr_znode = config['configurations']['infra-solr-env']['infra_solr_znode']

http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/setup_infra_solr.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/setup_infra_solr.py b/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/setup_infra_solr.py
index f3dbcf3..7427584 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/setup_infra_solr.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/setup_infra_solr.py
@@ -85,6 +85,11 @@ def setup_infra_solr(name = None):
 
     create_ambari_solr_znode()
 
+    if params.has_logsearch:
+      cleanup_logsearch_collections(params.logsearch_service_logs_collection, jaas_file)
+      cleanup_logsearch_collections(params.logsearch_audit_logs_collection, jaas_file)
+      cleanup_logsearch_collections('history', jaas_file)
+
     security_json_file_location = custom_security_json_location \
       if params.infra_solr_security_json_content and str(params.infra_solr_security_json_content).strip() \
       else format("{infra_solr_conf}/security.json") # security.json file to upload
@@ -141,4 +146,14 @@ def create_ambari_solr_znode():
     zookeeper_quorum=params.zookeeper_quorum,
     solr_znode=params.infra_solr_znode,
     java64_home=params.java64_home,
-    retry=30, interval=5)
\ No newline at end of file
+    retry=30, interval=5)
+
+def cleanup_logsearch_collections(collection, jaas_file):
+  import params
+  solr_cloud_util.remove_admin_handlers(
+    zookeeper_quorum=params.zookeeper_quorum,
+    solr_znode=params.infra_solr_znode,
+    java64_home=params.java64_home,
+    jaas_file=jaas_file,
+    collection=collection
+  )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/audit_logs-solrconfig.xml.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/audit_logs-solrconfig.xml.j2 b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/audit_logs-solrconfig.xml.j2
index 63879e7..d56990a 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/audit_logs-solrconfig.xml.j2
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/audit_logs-solrconfig.xml.j2
@@ -1063,8 +1063,7 @@ this file, see http://wiki.apache.org/solr/SolrConfigXml.
   Admin Handlers - This will register all the standard admin
   RequestHandlers.
   -->
-  <requestHandler name="/admin/"
-                  class="solr.admin.AdminHandlers"/>
+
   <!-- This single handler is equivalent to the following... -->
   <!--
   <requestHandler name="/admin/luke"       class="solr.admin.LukeRequestHandler" />

http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/service_logs-solrconfig.xml.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/service_logs-solrconfig.xml.j2 b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/service_logs-solrconfig.xml.j2
index b6a4d1d..ed80e84 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/service_logs-solrconfig.xml.j2
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/service_logs-solrconfig.xml.j2
@@ -1063,8 +1063,7 @@ this file, see http://wiki.apache.org/solr/SolrConfigXml.
   Admin Handlers - This will register all the standard admin
   RequestHandlers.
   -->
-  <requestHandler name="/admin/"
-                  class="solr.admin.AdminHandlers"/>
+
   <!-- This single handler is equivalent to the following... -->
   <!--
   <requestHandler name="/admin/luke"       class="solr.admin.LukeRequestHandler" />

http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog300Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog300Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog300Test.java
index a342baa..d7bdf75 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog300Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog300Test.java
@@ -351,11 +351,38 @@ public class UpgradeCatalog300Test {
     expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(logSearchLog4jCapture), anyString(),
         EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).once();
 
+    Map<String, String> oldLogSearchServiceLogsConf = ImmutableMap.of(
+      "content", "<before/><requestHandler name=\"/admin/\"   class=\"solr.admin.AdminHandlers\" /><after/>");
+
+    Map<String, String> expectedLogSearchServiceLogsConf = ImmutableMap.of(
+      "content", "<before/><after/>");
+
+    Config confLogSearchServiceLogsConf = easyMockSupport.createNiceMock(Config.class);
+    expect(cluster.getDesiredConfigByType("logsearch-service_logs-solrconfig")).andReturn(confLogSearchServiceLogsConf).atLeastOnce();
+    expect(confLogSearchServiceLogsConf.getProperties()).andReturn(oldLogSearchServiceLogsConf).anyTimes();
+    Capture<Map<String, String>> logSearchServiceLogsConfCapture = EasyMock.newCapture();
+    expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(logSearchServiceLogsConfCapture), anyString(),
+      EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).once();
+
+    Map<String, String> oldLogSearchAuditLogsConf = ImmutableMap.of(
+      "content", "<before/><requestHandler name=\"/admin/\"   class=\"solr.admin.AdminHandlers\" /><after/>");
+
+    Map<String, String> expectedLogSearchAuditLogsConf = ImmutableMap.of(
+      "content", "<before/><after/>");
+
+    Config confLogSearchAuditLogsConf = easyMockSupport.createNiceMock(Config.class);
+    expect(cluster.getDesiredConfigByType("logsearch-audit_logs-solrconfig")).andReturn(confLogSearchAuditLogsConf).atLeastOnce();
+    expect(confLogSearchAuditLogsConf.getProperties()).andReturn(oldLogSearchAuditLogsConf).anyTimes();
+    Capture<Map<String, String>> logSearchAuditLogsConfCapture = EasyMock.newCapture();
+    expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(logSearchAuditLogsConfCapture), anyString(),
+      EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).once();
+
     replay(clusters, cluster);
     replay(controller, injector2);
     replay(confSomethingElse1, confSomethingElse2, confLogSearchConf1, confLogSearchConf2);
     replay(logSearchPropertiesConf, logFeederPropertiesConf);
     replay(mockLogFeederLog4j, mockLogSearchLog4j);
+    replay(confLogSearchServiceLogsConf, confLogSearchAuditLogsConf);
     new UpgradeCatalog300(injector2).updateLogSearchConfigs();
     easyMockSupport.verifyAll();
 
@@ -376,5 +403,11 @@ public class UpgradeCatalog300Test {
 
     Map<String, String> updatedLogSearchLog4j = logSearchLog4jCapture.getValue();
     assertTrue(Maps.difference(expectedLogSearchLog4j, updatedLogSearchLog4j).areEqual());
+
+    Map<String, String> updatedServiceLogsConf = logSearchServiceLogsConfCapture.getValue();
+    assertTrue(Maps.difference(expectedLogSearchServiceLogsConf, updatedServiceLogsConf).areEqual());
+
+    Map<String, String> updatedAuditLogsConf = logSearchAuditLogsConfCapture.getValue();
+    assertTrue(Maps.difference(expectedLogSearchAuditLogsConf, updatedAuditLogsConf).areEqual());
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/a795f38c/ambari-server/src/test/python/stacks/2.4/AMBARI_INFRA/test_infra_solr.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.4/AMBARI_INFRA/test_infra_solr.py b/ambari-server/src/test/python/stacks/2.4/AMBARI_INFRA/test_infra_solr.py
index 87304cd..e1fa1d8 100644
--- a/ambari-server/src/test/python/stacks/2.4/AMBARI_INFRA/test_infra_solr.py
+++ b/ambari-server/src/test/python/stacks/2.4/AMBARI_INFRA/test_infra_solr.py
@@ -106,6 +106,9 @@ class TestInfraSolr(RMFTestCase):
                                 )
 
       self.assertResourceCalled('Execute', 'ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181 --znode /infra-solr --create-znode --retry 30 --interval 5')
+      self.assertResourceCalled('Execute', 'ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --remove-admin-handlers --collection hadoop_logs --retry 5 --interval 10')
+      self.assertResourceCalled('Execute', 'ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --remove-admin-handlers --collection audit_logs --retry 5 --interval 10')
+      self.assertResourceCalled('Execute', 'ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --remove-admin-handlers --collection history --retry 5 --interval 10')
       self.assertResourceCalled('Execute', 'ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --cluster-prop --property-name urlScheme --property-value http')
       self.assertResourceCalled('Execute', 'ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181 --znode /infra-solr --setup-kerberos-plugin')
 


[27/36] ambari git commit: AMBARI-21443. Start All service not getting invoked after regenerate keytabs (akovalenko)

Posted by lp...@apache.org.
AMBARI-21443. Start All service not getting invoked after regenerate keytabs (akovalenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/0b397cdf
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/0b397cdf
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/0b397cdf

Branch: refs/heads/feature-branch-AMBARI-21307
Commit: 0b397cdff959e04947fd6b15ada7a7c6a06aa55b
Parents: 880853a
Author: Aleksandr Kovalenko <ak...@hortonworks.com>
Authored: Tue Jul 11 17:22:37 2017 +0300
Committer: Aleksandr Kovalenko <ak...@hortonworks.com>
Committed: Tue Jul 11 19:39:22 2017 +0300

----------------------------------------------------------------------
 ambari-web/app/controllers/main/service.js      | 13 +++++++++++-
 ambari-web/app/utils/ajax/ajax.js               | 22 ++++++++++++++++++++
 .../test/controllers/main/service_test.js       |  4 ++--
 3 files changed, 36 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/0b397cdf/ambari-web/app/controllers/main/service.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/service.js b/ambari-web/app/controllers/main/service.js
index eb9df0d..343105f 100644
--- a/ambari-web/app/controllers/main/service.js
+++ b/ambari-web/app/controllers/main/service.js
@@ -177,9 +177,20 @@ App.MainServiceController = Em.ArrayController.extend(App.SupportClientConfigsDo
   },
 
   /**
-   * Restart all services - stops all services, then starts them back
+   * Restart all services - restarts by sending one RESTART command
    */
   restartAllServices: function () {
+    App.ajax.send({
+      name: 'restart.allServices',
+      sender: this,
+      showLoadingPopup: true
+    });
+  },
+
+  /**
+   * Restart all services - stops all services, then starts them back
+   */
+  stopAndStartAllServices: function () {
     this.silentStopAllServices();
   },
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/0b397cdf/ambari-web/app/utils/ajax/ajax.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/ajax/ajax.js b/ambari-web/app/utils/ajax/ajax.js
index 5919091..d6e6dfa 100644
--- a/ambari-web/app/utils/ajax/ajax.js
+++ b/ambari-web/app/utils/ajax/ajax.js
@@ -2356,6 +2356,28 @@ var urls = {
     }
   },
 
+  'restart.allServices': {
+    'real': '/clusters/{clusterName}/requests',
+    'mock': '',
+    'format': function (data) {
+      return {
+        type: 'POST',
+        data: JSON.stringify({
+          "RequestInfo": {
+            "command": "RESTART",
+            "context": 'Restart all services',
+            "operation_level": 'host_component'
+          },
+          "Requests/resource_filters": [
+            {
+              "hosts_predicate": "HostRoles/cluster_name=" + data.clusterName
+            }
+          ]
+        })
+      }
+    }
+  },
+
   'restart.staleConfigs': {
     'real': "/clusters/{clusterName}/requests",
     'mock': "",

http://git-wip-us.apache.org/repos/asf/ambari/blob/0b397cdf/ambari-web/test/controllers/main/service_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/main/service_test.js b/ambari-web/test/controllers/main/service_test.js
index 7ed7641..57a3eb4 100644
--- a/ambari-web/test/controllers/main/service_test.js
+++ b/ambari-web/test/controllers/main/service_test.js
@@ -433,7 +433,7 @@ describe('App.MainServiceController', function () {
 
   });
 
-  describe("#restartAllServices()", function() {
+  describe("#stopAndStartAllServices()", function() {
 
     beforeEach(function() {
       sinon.stub(mainServiceController, 'silentStopAllServices');
@@ -443,7 +443,7 @@ describe('App.MainServiceController', function () {
     });
 
     it("silentStopAllServices should be called", function() {
-      mainServiceController.restartAllServices();
+      mainServiceController.stopAndStartAllServices();
       expect(mainServiceController.silentStopAllServices.calledOnce).to.be.true;
     });
   });


[31/36] ambari git commit: AMBARI-21447 Log Feeder should support logs without date (time only) (mgergely)

Posted by lp...@apache.org.
AMBARI-21447 Log Feeder should support logs without date (time only) (mgergely)

Change-Id: I853447134873b10fdd3fd604fd84630a9caf9d03


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9f788c38
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9f788c38
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9f788c38

Branch: refs/heads/feature-branch-AMBARI-21307
Commit: 9f788c386667bfeb82fff7c35287a5fdb175c349
Parents: 31b9d77
Author: Miklos Gergely <mg...@hortonworks.com>
Authored: Wed Jul 12 16:55:48 2017 +0200
Committer: Miklos Gergely <mg...@hortonworks.com>
Committed: Wed Jul 12 16:55:48 2017 +0200

----------------------------------------------------------------------
 .../ambari/logfeeder/mapper/MapperDate.java     | 42 +++++++++++++-------
 1 file changed, 28 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/9f788c38/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperDate.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperDate.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperDate.java
index 305688b..e099161 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperDate.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperDate.java
@@ -19,6 +19,7 @@
 
 package org.apache.ambari.logfeeder.mapper;
 
+import java.text.ParseException;
 import java.text.SimpleDateFormat;
 import java.util.Calendar;
 import java.util.Date;
@@ -79,20 +80,7 @@ public class MapperDate extends Mapper {
           jsonObj.put(LogFeederConstants.IN_MEMORY_TIMESTAMP, ((Date) value).getTime());
         } else if (targetDateFormatter != null) {
           if (srcDateFormatter != null) {
-            Date srcDate = srcDateFormatter.parse(value.toString());
-            //set year in src_date when src_date does not have year component
-            if (!srcDateFormatter.toPattern().contains("yy")) {
-              Calendar currentCalendar = Calendar.getInstance();
-              Calendar logDateCalendar = Calendar.getInstance();
-              logDateCalendar.setTimeInMillis(srcDate.getTime());
-              if (logDateCalendar.get(Calendar.MONTH) > currentCalendar.get(Calendar.MONTH)) {
-                // set previous year as a log year  when log month is grater than current month
-                srcDate = DateUtils.setYears(srcDate, currentCalendar.get(Calendar.YEAR) - 1);
-              } else {
-                // set current year as a log year
-                srcDate = DateUtils.setYears(srcDate, currentCalendar.get(Calendar.YEAR));
-              }
-            }
+            Date srcDate = getSourceDate(value);
             value = targetDateFormatter.format(srcDate);
             jsonObj.put(LogFeederConstants.IN_MEMORY_TIMESTAMP, srcDate.getTime());
           } else {
@@ -111,4 +99,30 @@ public class MapperDate extends Mapper {
     }
     return value;
   }
+
+  private Date getSourceDate(Object value) throws ParseException {
+    Date srcDate = srcDateFormatter.parse(value.toString());
+    
+    Calendar currentCalendar = Calendar.getInstance();
+    
+    if (!srcDateFormatter.toPattern().contains("dd")) {
+      //set year/month/date in src_date when src_date does not have date component
+      srcDate = DateUtils.setYears(srcDate, currentCalendar.get(Calendar.YEAR));
+      srcDate = DateUtils.setMonths(srcDate, currentCalendar.get(Calendar.MONTH));
+      srcDate = DateUtils.setDays(srcDate, currentCalendar.get(Calendar.DAY_OF_MONTH));
+      // if with the current date the time stamp is after the current one, it must be previous day
+      if (srcDate.getTime() > currentCalendar.getTimeInMillis()) {
+        srcDate = DateUtils.addDays(srcDate, -1);
+      }      
+    } else if (!srcDateFormatter.toPattern().contains("yy")) {
+      //set year in src_date when src_date does not have year component
+      srcDate = DateUtils.setYears(srcDate, currentCalendar.get(Calendar.YEAR));
+      // if with the current year the time stamp is after the current one, it must be previous year
+      if (srcDate.getTime() > currentCalendar.getTimeInMillis()) {
+        srcDate = DateUtils.addYears(srcDate, -1);
+      }
+    }
+    
+    return srcDate;
+  }
 }


[10/36] ambari git commit: AMBARI-21430 - Allow Multiple Versions of Stack Tools to Co-Exist (jonathanhurley)

Posted by lp...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-secured.json b/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-secured.json
index fa791c1..64e7d52 100644
--- a/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-secured.json
+++ b/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-secured.json
@@ -1,101 +1,101 @@
 {
     "localComponents": [
-        "SECONDARY_NAMENODE", 
-        "HDFS_CLIENT", 
-        "DATANODE", 
-        "NAMENODE", 
-        "RANGER_ADMIN", 
-        "RANGER_TAGSYNC", 
-        "RANGER_USERSYNC", 
-        "ZOOKEEPER_SERVER", 
-        "ZOOKEEPER_CLIENT", 
+        "SECONDARY_NAMENODE",
+        "HDFS_CLIENT",
+        "DATANODE",
+        "NAMENODE",
+        "RANGER_ADMIN",
+        "RANGER_TAGSYNC",
+        "RANGER_USERSYNC",
+        "ZOOKEEPER_SERVER",
+        "ZOOKEEPER_CLIENT",
         "KERBEROS_CLIENT",
         "LOGSEARCH_SOLR",
         "LOGSEARCH_SOLR_CLIENT"
-    ], 
+    ],
     "configuration_attributes": {
-        "ranger-hdfs-audit": {}, 
-        "ssl-client": {}, 
-        "ranger-admin-site": {}, 
-        "ranger-hdfs-policymgr-ssl": {}, 
-        "tagsync-application-properties": {}, 
-        "ranger-env": {}, 
-        "usersync-log4j": {}, 
-        "ranger-hdfs-plugin-properties": {}, 
-        "kerberos-env": {}, 
-        "admin-properties": {}, 
-        "ranger-ugsync-site": {}, 
+        "ranger-hdfs-audit": {},
+        "ssl-client": {},
+        "ranger-admin-site": {},
+        "ranger-hdfs-policymgr-ssl": {},
+        "tagsync-application-properties": {},
+        "ranger-env": {},
+        "usersync-log4j": {},
+        "ranger-hdfs-plugin-properties": {},
+        "kerberos-env": {},
+        "admin-properties": {},
+        "ranger-ugsync-site": {},
         "hdfs-site": {
             "final": {
-                "dfs.datanode.data.dir": "true", 
-                "dfs.namenode.http-address": "true", 
-                "dfs.datanode.failed.volumes.tolerated": "true", 
-                "dfs.support.append": "true", 
-                "dfs.namenode.name.dir": "true", 
+                "dfs.datanode.data.dir": "true",
+                "dfs.namenode.http-address": "true",
+                "dfs.datanode.failed.volumes.tolerated": "true",
+                "dfs.support.append": "true",
+                "dfs.namenode.name.dir": "true",
                 "dfs.webhdfs.enabled": "true"
             }
-        }, 
+        },
         "ranger-tagsync-site": {},
-        "ranger-tagsync-policymgr-ssl": {}, 
+        "ranger-tagsync-policymgr-ssl": {},
         "zoo.cfg": {},
         "hadoop-policy": {},
-        "hdfs-log4j": {}, 
-        "krb5-conf": {}, 
+        "hdfs-log4j": {},
+        "krb5-conf": {},
         "core-site": {
             "final": {
                 "fs.defaultFS": "true"
             }
-        }, 
-        "hadoop-env": {}, 
-        "zookeeper-log4j": {}, 
-        "ssl-server": {}, 
-        "ranger-site": {}, 
-        "admin-log4j": {}, 
-        "tagsync-log4j": {}, 
-        "ranger-hdfs-security": {}, 
+        },
+        "hadoop-env": {},
+        "zookeeper-log4j": {},
+        "ssl-server": {},
+        "ranger-site": {},
+        "admin-log4j": {},
+        "tagsync-log4j": {},
+        "ranger-hdfs-security": {},
         "ranger-solr-configuration": {},
         "usersync-properties": {},
         "zookeeper-env": {},
         "infra-solr-env": {},
         "infra-solr-client-log4j": {},
         "cluster-env": {}
-    }, 
-    "public_hostname": "c6401.ambari.apache.org", 
-    "commandId": "41-2", 
-    "hostname": "c6401.ambari.apache.org", 
-    "kerberosCommandParams": [], 
-    "serviceName": "RANGER", 
-    "role": "RANGER_ADMIN", 
-    "forceRefreshConfigTagsBeforeExecution": [], 
-    "requestId": 41, 
+    },
+    "public_hostname": "c6401.ambari.apache.org",
+    "commandId": "41-2",
+    "hostname": "c6401.ambari.apache.org",
+    "kerberosCommandParams": [],
+    "serviceName": "RANGER",
+    "role": "RANGER_ADMIN",
+    "forceRefreshConfigTagsBeforeExecution": [],
+    "requestId": 41,
     "agentConfigParams": {
         "agent": {
             "parallel_execution": 0
         }
-    }, 
-    "clusterName": "test_Cluster01", 
-    "commandType": "EXECUTION_COMMAND", 
-    "taskId": 186, 
-    "roleParams": {}, 
+    },
+    "clusterName": "test_Cluster01",
+    "commandType": "EXECUTION_COMMAND",
+    "taskId": 186,
+    "roleParams": {},
     "configurationTags": {
         "ranger-hdfs-audit": {
             "tag": "version1466705299922"
-        }, 
+        },
         "ssl-client": {
             "tag": "version1"
-        }, 
+        },
         "ranger-admin-site": {
             "tag": "version1467016680635"
-        }, 
+        },
         "ranger-hdfs-policymgr-ssl": {
             "tag": "version1466705299922"
-        }, 
+        },
         "tagsync-application-properties": {
             "tag": "version1467016680511"
-        }, 
+        },
         "ranger-env": {
             "tag": "version1466705299949"
-        }, 
+        },
         "ranger-ugsync-site": {
             "tag": "version1467016680537"
         },
@@ -104,52 +104,52 @@
         },
         "ranger-hdfs-plugin-properties": {
             "tag": "version1466705299922"
-        }, 
+        },
         "kerberos-env": {
             "tag": "version1467016537243"
-        }, 
+        },
         "admin-properties": {
             "tag": "version1466705299949"
-        }, 
+        },
         "hdfs-site": {
             "tag": "version1467016680401"
-        }, 
+        },
         "ranger-tagsync-site": {
             "tag": "version1467016680586"
-        }, 
+        },
         "zoo.cfg": {
             "tag": "version1"
-        }, 
+        },
         "hadoop-policy": {
             "tag": "version1"
-        }, 
+        },
         "hdfs-log4j": {
             "tag": "version1"
-        }, 
+        },
         "usersync-log4j": {
             "tag": "version1466705299949"
-        }, 
+        },
         "krb5-conf": {
             "tag": "version1467016537243"
-        }, 
+        },
         "core-site": {
             "tag": "version1467016680612"
-        }, 
+        },
         "hadoop-env": {
             "tag": "version1467016680446"
-        }, 
+        },
         "zookeeper-log4j": {
             "tag": "version1"
-        }, 
+        },
         "ssl-server": {
             "tag": "version1"
-        }, 
+        },
         "ranger-site": {
             "tag": "version1466705299949"
-        }, 
+        },
         "admin-log4j": {
             "tag": "version1466705299949"
-        }, 
+        },
         "tagsync-log4j": {
             "tag": "version1466705299949"
         },
@@ -158,10 +158,10 @@
         },
         "ranger-hdfs-security": {
             "tag": "version1466705299922"
-        }, 
+        },
         "usersync-properties": {
             "tag": "version1466705299949"
-        }, 
+        },
         "zookeeper-env": {
             "tag": "version1467016680492"
         },
@@ -174,116 +174,116 @@
         "cluster-env": {
             "tag": "version1467016680567"
         }
-    }, 
-    "roleCommand": "START", 
+    },
+    "roleCommand": "START",
     "hostLevelParams": {
-        "agent_stack_retry_on_unavailability": "false", 
-        "stack_name": "HDP", 
+        "agent_stack_retry_on_unavailability": "false",
+        "stack_name": "HDP",
         "package_version": "2_6_0_0_*",
         "custom_mysql_jdbc_name": "mysql-connector-java.jar",
         "previous_custom_mysql_jdbc_name": "mysql-connector-java-old.jar",
-        "host_sys_prepped": "false", 
-        "ambari_db_rca_username": "mapred", 
+        "host_sys_prepped": "false",
+        "ambari_db_rca_username": "mapred",
         "current_version": "2.6.0.0-801",
         "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar",
         "agent_stack_retry_count": "5",
         "stack_version": "2.6",
-        "jdk_name": "jdk-8u60-linux-x64.tar.gz", 
-        "ambari_db_rca_driver": "org.postgresql.Driver", 
+        "jdk_name": "jdk-8u60-linux-x64.tar.gz",
+        "ambari_db_rca_driver": "org.postgresql.Driver",
         "java_home": "/usr/jdk64/jdk1.7.0_45",
         "repository_version_id": "1",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
-        "not_managed_hdfs_path_list": "[\"/tmp\"]", 
-        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
-        "java_version": "8", 
+        "not_managed_hdfs_path_list": "[\"/tmp\"]",
+        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
+        "java_version": "8",
         "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.6.0.0-801\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.6\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.6.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.6.0.0-801\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]",
         "package_list": "[{\"name\":\"ranger_${stack_version}-admin\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-usersync\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-tagsync\",\"condition\":\"should_install_ranger_tagsync\",\"skipUpgrade\":false},{\"name\":\"ambari-logsearch-solr-client\",\"condition\":\"should_install_logsearch_solr_client\",\"skipUpgrade\":false}]",
         "db_name": "ambari",
-        "group_list": "[\"ranger\",\"hadoop\",\"users\"]", 
-        "agentCacheDir": "/var/lib/ambari-agent/cache", 
-        "ambari_db_rca_password": "mapred", 
-        "jce_name": "jce_policy-8.zip", 
-        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar", 
-        "db_driver_filename": "mysql-connector-java.jar", 
-        "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]", 
+        "group_list": "[\"ranger\",\"hadoop\",\"users\"]",
+        "agentCacheDir": "/var/lib/ambari-agent/cache",
+        "ambari_db_rca_password": "mapred",
+        "jce_name": "jce_policy-8.zip",
+        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
+        "db_driver_filename": "mysql-connector-java.jar",
+        "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]",
         "clientsToUpdateConfigs": "[\"*\"]"
-    }, 
+    },
     "commandParams": {
-        "service_package_folder": "common-services/RANGER/0.4.0/package", 
-        "script": "scripts/ranger_admin.py", 
-        "hooks_folder": "HDP/2.0.6/hooks", 
+        "service_package_folder": "common-services/RANGER/0.4.0/package",
+        "script": "scripts/ranger_admin.py",
+        "hooks_folder": "HDP/2.0.6/hooks",
         "version": "2.6.0.0-801",
         "max_duration_for_retries": "0",
         "command_retry_enabled": "false",
-        "command_timeout": "600", 
+        "command_timeout": "600",
         "script_type": "PYTHON"
-    }, 
-    "forceRefreshConfigTags": [], 
-    "stageId": 2, 
+    },
+    "forceRefreshConfigTags": [],
+    "stageId": 2,
     "clusterHostInfo": {
         "snamenode_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ambari_server_use_ssl": [
             "false"
-        ], 
+        ],
         "all_ping_ports": [
             "8670"
-        ], 
+        ],
         "ranger_tagsync_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ranger_usersync_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "all_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "slave_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "namenode_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ambari_server_port": [
             "8080"
-        ], 
+        ],
         "ranger_admin_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "all_racks": [
             "/default-rack"
-        ], 
+        ],
         "all_ipv4_ips": [
             "172.22.83.73"
-        ], 
+        ],
         "ambari_server_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "zookeeper_hosts": [
             "c6401.ambari.apache.org"
         ],
         "infra_solr_hosts": [
             "c6401.ambari.apache.org"
         ]
-    }, 
+    },
     "configurations": {
         "ranger-hdfs-audit": {
-            "xasecure.audit.destination.solr.zookeepers": "NONE", 
-            "xasecure.audit.destination.solr.urls": "", 
-            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool", 
+            "xasecure.audit.destination.solr.zookeepers": "NONE",
+            "xasecure.audit.destination.solr.urls": "",
+            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool",
             "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool",
-            "xasecure.audit.destination.hdfs": "true", 
+            "xasecure.audit.destination.hdfs": "true",
             "xasecure.audit.destination.solr": "false",
-            "xasecure.audit.provider.summary.enabled": "false", 
-            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit", 
+            "xasecure.audit.provider.summary.enabled": "false",
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
             "xasecure.audit.is.enabled": "true"
         },
         "ranger-tagsync-policymgr-ssl": {
-            "xasecure.policymgr.clientssl.keystore": "/etc/security/serverKeys/ranger-tagsync-keystore.jks", 
-            "xasecure.policymgr.clientssl.truststore.password": "changeit", 
+            "xasecure.policymgr.clientssl.keystore": "/etc/security/serverKeys/ranger-tagsync-keystore.jks",
+            "xasecure.policymgr.clientssl.truststore.password": "changeit",
             "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{ranger_tagsync_credential_file}}",
-            "xasecure.policymgr.clientssl.truststore": "/etc/security/serverKeys/ranger-tagsync-mytruststore.jks", 
+            "xasecure.policymgr.clientssl.truststore": "/etc/security/serverKeys/ranger-tagsync-mytruststore.jks",
             "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{ranger_tagsync_credential_file}}",
             "xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
         },
@@ -296,186 +296,186 @@
             "xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
         },
         "ssl-client": {
-            "ssl.client.truststore.reload.interval": "10000", 
-            "ssl.client.keystore.password": "bigdata", 
-            "ssl.client.truststore.type": "jks", 
-            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks", 
-            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks", 
-            "ssl.client.truststore.password": "bigdata", 
+            "ssl.client.truststore.reload.interval": "10000",
+            "ssl.client.keystore.password": "bigdata",
+            "ssl.client.truststore.type": "jks",
+            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks",
+            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks",
+            "ssl.client.truststore.password": "bigdata",
             "ssl.client.keystore.type": "jks"
-        }, 
+        },
         "ranger-admin-site": {
             "ranger.is.solr.kerberised": "true",
-            "ranger.admin.kerberos.cookie.domain": "{{ranger_host}}", 
-            "ranger.kms.service.user.hdfs": "hdfs", 
-            "ranger.spnego.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}", 
-            "ranger.plugins.hive.serviceuser": "hive", 
-            "ranger.lookup.kerberos.keytab": "/etc/security/keytabs/rangerlookup.service.keytab", 
-            "ranger.plugins.kms.serviceuser": "kms", 
-            "ranger.service.https.attrib.ssl.enabled": "false", 
-            "ranger.sso.browser.useragent": "Mozilla,chrome", 
-            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01", 
-            "ranger.plugins.hbase.serviceuser": "hbase", 
-            "ranger.plugins.hdfs.serviceuser": "hdfs", 
-            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}", 
-            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net", 
-            "ranger.plugins.knox.serviceuser": "knox", 
-            "ranger.ldap.base.dn": "dc=example,dc=com", 
-            "ranger.sso.publicKey": "", 
-            "ranger.admin.kerberos.cookie.path": "/", 
-            "ranger.service.https.attrib.clientAuth": "want", 
-            "ranger.jpa.jdbc.user": "{{ranger_db_user}}", 
-            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})", 
-            "ranger.ldap.group.roleattribute": "cn", 
-            "ranger.plugins.kafka.serviceuser": "kafka", 
-            "ranger.admin.kerberos.principal": "rangeradmin/_HOST@EXAMPLE.COM", 
-            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
+            "ranger.admin.kerberos.cookie.domain": "{{ranger_host}}",
+            "ranger.kms.service.user.hdfs": "hdfs",
+            "ranger.spnego.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
+            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}",
+            "ranger.plugins.hive.serviceuser": "hive",
+            "ranger.lookup.kerberos.keytab": "/etc/security/keytabs/rangerlookup.service.keytab",
+            "ranger.plugins.kms.serviceuser": "kms",
+            "ranger.service.https.attrib.ssl.enabled": "false",
+            "ranger.sso.browser.useragent": "Mozilla,chrome",
+            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01",
+            "ranger.plugins.hbase.serviceuser": "hbase",
+            "ranger.plugins.hdfs.serviceuser": "hdfs",
+            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}",
+            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net",
+            "ranger.plugins.knox.serviceuser": "knox",
+            "ranger.ldap.base.dn": "dc=example,dc=com",
+            "ranger.sso.publicKey": "",
+            "ranger.admin.kerberos.cookie.path": "/",
+            "ranger.service.https.attrib.clientAuth": "want",
+            "ranger.jpa.jdbc.user": "{{ranger_db_user}}",
+            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})",
+            "ranger.ldap.group.roleattribute": "cn",
+            "ranger.plugins.kafka.serviceuser": "kafka",
+            "ranger.admin.kerberos.principal": "rangeradmin/_HOST@EXAMPLE.COM",
+            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
             "ranger.credential.provider.path": "/etc/ranger/admin/rangeradmin.jceks",
-            "ranger.ldap.referral": "ignore", 
-            "ranger.service.http.port": "6080", 
-            "ranger.ldap.user.searchfilter": "(uid={0})", 
-            "ranger.plugins.atlas.serviceuser": "atlas", 
-            "ranger.truststore.password": "changeit", 
-            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
-            "ranger.audit.solr.password": "NONE", 
+            "ranger.ldap.referral": "ignore",
+            "ranger.service.http.port": "6080",
+            "ranger.ldap.user.searchfilter": "(uid={0})",
+            "ranger.plugins.atlas.serviceuser": "atlas",
+            "ranger.truststore.password": "changeit",
+            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+            "ranger.audit.solr.password": "NONE",
             "ranger.audit.solr.zookeepers": "c6401.ambari.apache.org:2181/infra-solr",
             "ranger.lookup.kerberos.principal": "rangerlookup/_HOST@EXAMPLE.COM",
-            "ranger.service.https.port": "6182", 
-            "ranger.plugins.storm.serviceuser": "storm", 
-            "ranger.externalurl": "{{ranger_external_url}}", 
-            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
-            "ranger.kms.service.user.hive": "", 
-            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
-            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}", 
-            "ranger.service.host": "{{ranger_host}}", 
-            "ranger.service.https.attrib.keystore.keyalias": "rangeradmin", 
-            "ranger.service.https.attrib.keystore.pass": "xasecure", 
-            "ranger.unixauth.remote.login.enabled": "true", 
+            "ranger.service.https.port": "6182",
+            "ranger.plugins.storm.serviceuser": "storm",
+            "ranger.externalurl": "{{ranger_external_url}}",
+            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+            "ranger.kms.service.user.hive": "",
+            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}",
+            "ranger.service.host": "{{ranger_host}}",
+            "ranger.service.https.attrib.keystore.keyalias": "rangeradmin",
+            "ranger.service.https.attrib.keystore.pass": "xasecure",
+            "ranger.unixauth.remote.login.enabled": "true",
             "ranger.jpa.jdbc.credential.alias": "rangeradmin",
-            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
-            "ranger.audit.solr.username": "ranger_solr", 
-            "ranger.sso.enabled": "false", 
-            "ranger.audit.solr.urls": "", 
-            "ranger.ldap.ad.domain": "", 
-            "ranger.plugins.yarn.serviceuser": "yarn", 
-            "ranger.audit.source.type": "solr", 
-            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
-            "ranger.ldap.url": "{{ranger_ug_ldap_url}}", 
-            "ranger.authentication.method": "UNIX", 
-            "ranger.service.http.enabled": "true", 
-            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}", 
-            "ranger.ldap.ad.referral": "ignore", 
-            "ranger.ldap.ad.base.dn": "dc=example,dc=com", 
-            "ranger.jpa.jdbc.password": "_", 
-            "ranger.spnego.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
-            "ranger.sso.providerurl": "", 
-            "ranger.unixauth.service.hostname": "{{ugsync_host}}", 
-            "ranger.admin.kerberos.keytab": "/etc/security/keytabs/rangeradmin.service.keytab", 
-            "ranger.admin.kerberos.token.valid.seconds": "30", 
-            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver", 
+            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+            "ranger.audit.solr.username": "ranger_solr",
+            "ranger.sso.enabled": "false",
+            "ranger.audit.solr.urls": "",
+            "ranger.ldap.ad.domain": "",
+            "ranger.plugins.yarn.serviceuser": "yarn",
+            "ranger.audit.source.type": "solr",
+            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
+            "ranger.ldap.url": "{{ranger_ug_ldap_url}}",
+            "ranger.authentication.method": "UNIX",
+            "ranger.service.http.enabled": "true",
+            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}",
+            "ranger.ldap.ad.referral": "ignore",
+            "ranger.ldap.ad.base.dn": "dc=example,dc=com",
+            "ranger.jpa.jdbc.password": "_",
+            "ranger.spnego.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+            "ranger.sso.providerurl": "",
+            "ranger.unixauth.service.hostname": "{{ugsync_host}}",
+            "ranger.admin.kerberos.keytab": "/etc/security/keytabs/rangeradmin.service.keytab",
+            "ranger.admin.kerberos.token.valid.seconds": "30",
+            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver",
             "ranger.unixauth.service.port": "5151"
-        }, 
+        },
         "ranger-hdfs-policymgr-ssl": {
-            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks", 
-            "xasecure.policymgr.clientssl.truststore.password": "changeit", 
-            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}", 
-            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks", 
-            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}", 
+            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks",
+            "xasecure.policymgr.clientssl.truststore.password": "changeit",
+            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}",
+            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks",
+            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}",
             "xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
-        }, 
+        },
         "tagsync-application-properties": {
-            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181", 
-            "atlas.kafka.security.protocol": "SASL_PLAINTEXT", 
-            "atlas.jaas.KafkaClient.option.principal": "{{tagsync_jaas_principal}}", 
-            "atlas.jaas.KafkaClient.option.keyTab": "{{tagsync_keytab_path}}", 
-            "atlas.kafka.entities.group.id": "ranger_entities_consumer", 
-            "atlas.jaas.KafkaClient.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule", 
-            "atlas.jaas.KafkaClient.option.serviceName": "kafka", 
-            "atlas.kafka.bootstrap.servers": "localhost:6667", 
-            "atlas.jaas.KafkaClient.option.useKeyTab": "true", 
-            "atlas.jaas.KafkaClient.option.storeKey": "true", 
-            "atlas.jaas.KafkaClient.loginModuleControlFlag": "required", 
+            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181",
+            "atlas.kafka.security.protocol": "SASL_PLAINTEXT",
+            "atlas.jaas.KafkaClient.option.principal": "{{tagsync_jaas_principal}}",
+            "atlas.jaas.KafkaClient.option.keyTab": "{{tagsync_keytab_path}}",
+            "atlas.kafka.entities.group.id": "ranger_entities_consumer",
+            "atlas.jaas.KafkaClient.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "atlas.jaas.KafkaClient.option.serviceName": "kafka",
+            "atlas.kafka.bootstrap.servers": "localhost:6667",
+            "atlas.jaas.KafkaClient.option.useKeyTab": "true",
+            "atlas.jaas.KafkaClient.option.storeKey": "true",
+            "atlas.jaas.KafkaClient.loginModuleControlFlag": "required",
             "atlas.kafka.sasl.kerberos.service.name": "kafka"
-        }, 
+        },
         "ranger-env": {
-            "ranger_solr_shards": "1", 
-            "ranger_solr_config_set": "ranger_audits", 
-            "ranger_user": "ranger", 
+            "ranger_solr_shards": "1",
+            "ranger_solr_config_set": "ranger_audits",
+            "ranger_user": "ranger",
             "ranger_solr_replication_factor": "1",
-            "xml_configurations_supported": "true", 
-            "ranger-atlas-plugin-enabled": "No", 
-            "ranger-hbase-plugin-enabled": "No", 
-            "ranger-yarn-plugin-enabled": "No", 
-            "bind_anonymous": "false", 
-            "ranger_admin_username": "amb_ranger_admin", 
-            "admin_password": "admin", 
-            "is_solrCloud_enabled": "true", 
-            "ranger-storm-plugin-enabled": "No", 
-            "ranger-hdfs-plugin-enabled": "No", 
-            "ranger_group": "ranger", 
-            "ranger-knox-plugin-enabled": "No", 
-            "ranger_admin_log_dir": "/var/log/ranger/admin", 
-            "ranger-kafka-plugin-enabled": "No", 
-            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306", 
-            "ranger-hive-plugin-enabled": "No", 
-            "xasecure.audit.destination.solr": "true", 
-            "ranger_pid_dir": "/var/run/ranger", 
-            "xasecure.audit.destination.hdfs": "true", 
-            "admin_username": "admin", 
-            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit", 
-            "create_db_dbuser": "true", 
-            "ranger_solr_collection_name": "ranger_audits", 
-            "ranger_admin_password": "P1!q9xa96SMi5NCl", 
+            "xml_configurations_supported": "true",
+            "ranger-atlas-plugin-enabled": "No",
+            "ranger-hbase-plugin-enabled": "No",
+            "ranger-yarn-plugin-enabled": "No",
+            "bind_anonymous": "false",
+            "ranger_admin_username": "amb_ranger_admin",
+            "admin_password": "admin",
+            "is_solrCloud_enabled": "true",
+            "ranger-storm-plugin-enabled": "No",
+            "ranger-hdfs-plugin-enabled": "No",
+            "ranger_group": "ranger",
+            "ranger-knox-plugin-enabled": "No",
+            "ranger_admin_log_dir": "/var/log/ranger/admin",
+            "ranger-kafka-plugin-enabled": "No",
+            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306",
+            "ranger-hive-plugin-enabled": "No",
+            "xasecure.audit.destination.solr": "true",
+            "ranger_pid_dir": "/var/run/ranger",
+            "xasecure.audit.destination.hdfs": "true",
+            "admin_username": "admin",
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
+            "create_db_dbuser": "true",
+            "ranger_solr_collection_name": "ranger_audits",
+            "ranger_admin_password": "P1!q9xa96SMi5NCl",
             "ranger_usersync_log_dir": "/var/log/ranger/usersync"
-        }, 
+        },
         "usersync-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/usersync.
 log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n"
-        }, 
+        },
         "ranger-hdfs-plugin-properties": {
-            "hadoop.rpc.protection": "authentication", 
-            "ranger-hdfs-plugin-enabled": "No", 
-            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
-            "policy_user": "ambari-qa", 
-            "common.name.for.certificate": "", 
+            "hadoop.rpc.protection": "authentication",
+            "ranger-hdfs-plugin-enabled": "No",
+            "REPOSITORY_CONFIG_USERNAME": "hadoop",
+            "policy_user": "ambari-qa",
+            "common.name.for.certificate": "",
             "REPOSITORY_CONFIG_PASSWORD": "hadoop"
-        }, 
+        },
         "kerberos-env": {
-            "kdc_hosts": "c6401.ambari.apache.org", 
-            "manage_auth_to_local": "true", 
-            "install_packages": "true", 
-            "realm": "EXAMPLE.COM", 
-            "encryption_types": "aes des3-cbc-sha1 rc4 des-cbc-md5", 
-            "ad_create_attributes_template": "\n{\n  \"objectClass\": [\"top\", \"person\", \"organizationalPerson\", \"user\"],\n  \"cn\": \"$principal_name\",\n  #if( $is_service )\n  \"servicePrincipalName\": \"$principal_name\",\n  #end\n  \"userPrincipalName\": \"$normalized_principal\",\n  \"unicodePwd\": \"$password\",\n  \"accountExpires\": \"0\",\n  \"userAccountControl\": \"66048\"\n}", 
-            "kdc_create_attributes": "", 
-            "admin_server_host": "c6401.ambari.apache.org", 
-            "group": "ambari-managed-principals", 
-            "password_length": "20", 
-            "ldap_url": "", 
-            "manage_identities": "true", 
-            "password_min_lowercase_letters": "1", 
-            "create_ambari_principal": "true", 
-            "service_check_principal_name": "${cluster_name|toLower()}-${short_date}", 
-            "executable_search_paths": "/usr/bin, /usr/kerberos/bin, /usr/sbin, /usr/lib/mit/bin, /usr/lib/mit/sbin", 
-            "password_chat_timeout": "5", 
-            "kdc_type": "mit-kdc", 
-            "set_password_expiry": "false", 
-            "password_min_punctuation": "1", 
-            "container_dn": "", 
-            "case_insensitive_username_rules": "false", 
-            "password_min_whitespace": "0", 
-            "password_min_uppercase_letters": "1", 
+            "kdc_hosts": "c6401.ambari.apache.org",
+            "manage_auth_to_local": "true",
+            "install_packages": "true",
+            "realm": "EXAMPLE.COM",
+            "encryption_types": "aes des3-cbc-sha1 rc4 des-cbc-md5",
+            "ad_create_attributes_template": "\n{\n  \"objectClass\": [\"top\", \"person\", \"organizationalPerson\", \"user\"],\n  \"cn\": \"$principal_name\",\n  #if( $is_service )\n  \"servicePrincipalName\": \"$principal_name\",\n  #end\n  \"userPrincipalName\": \"$normalized_principal\",\n  \"unicodePwd\": \"$password\",\n  \"accountExpires\": \"0\",\n  \"userAccountControl\": \"66048\"\n}",
+            "kdc_create_attributes": "",
+            "admin_server_host": "c6401.ambari.apache.org",
+            "group": "ambari-managed-principals",
+            "password_length": "20",
+            "ldap_url": "",
+            "manage_identities": "true",
+            "password_min_lowercase_letters": "1",
+            "create_ambari_principal": "true",
+            "service_check_principal_name": "${cluster_name|toLower()}-${short_date}",
+            "executable_search_paths": "/usr/bin, /usr/kerberos/bin, /usr/sbin, /usr/lib/mit/bin, /usr/lib/mit/sbin",
+            "password_chat_timeout": "5",
+            "kdc_type": "mit-kdc",
+            "set_password_expiry": "false",
+            "password_min_punctuation": "1",
+            "container_dn": "",
+            "case_insensitive_username_rules": "false",
+            "password_min_whitespace": "0",
+            "password_min_uppercase_letters": "1",
             "password_min_digits": "1"
-        }, 
+        },
         "admin-properties": {
-            "db_user": "rangeradmin01", 
-            "DB_FLAVOR": "MYSQL", 
-            "db_password": "rangeradmin01", 
-            "db_root_user": "root", 
-            "policymgr_external_url": "http://c6401.ambari.apache.org:6080", 
-            "db_name": "ranger01", 
-            "db_host": "c6401.ambari.apache.org", 
-            "db_root_password": "vagrant", 
+            "db_user": "rangeradmin01",
+            "DB_FLAVOR": "MYSQL",
+            "db_password": "rangeradmin01",
+            "db_root_user": "root",
+            "policymgr_external_url": "http://c6401.ambari.apache.org:6080",
+            "db_name": "ranger01",
+            "db_host": "c6401.ambari.apache.org",
+            "db_root_password": "vagrant",
             "SQL_CONNECTOR_JAR": "{{driver_curl_target}}"
         },
         "ranger-solr-configuration": {
@@ -484,261 +484,261 @@
             "ranger_audit_logs_merge_factor": "5"
         },
         "ranger-ugsync-site": {
-            "ranger.usersync.ldap.binddn": "", 
-            "ranger.usersync.policymgr.username": "rangerusersync", 
-            "ranger.usersync.policymanager.mockrun": "false", 
-            "ranger.usersync.group.searchbase": "", 
-            "ranger.usersync.ldap.bindalias": "testldapalias", 
-            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks", 
-            "ranger.usersync.port": "5151", 
-            "ranger.usersync.pagedresultssize": "500", 
-            "ranger.usersync.group.memberattributename": "", 
-            "ranger.usersync.kerberos.principal": "rangerusersync/_HOST@EXAMPLE.COM", 
-            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder", 
-            "ranger.usersync.ldap.referral": "ignore", 
-            "ranger.usersync.group.searchfilter": "", 
-            "ranger.usersync.ldap.user.objectclass": "person", 
-            "ranger.usersync.logdir": "{{usersync_log_dir}}", 
-            "ranger.usersync.ldap.user.searchfilter": "", 
-            "ranger.usersync.ldap.groupname.caseconversion": "none", 
-            "ranger.usersync.ldap.ldapbindpassword": "", 
-            "ranger.usersync.unix.minUserId": "500", 
-            "ranger.usersync.policymanager.maxrecordsperapicall": "1000", 
-            "ranger.usersync.group.nameattribute": "", 
-            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password", 
-            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks", 
-            "ranger.usersync.user.searchenabled": "false", 
-            "ranger.usersync.group.usermapsyncenabled": "true", 
-            "ranger.usersync.ldap.bindkeystore": "", 
-            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof", 
-            "ranger.usersync.kerberos.keytab": "/etc/security/keytabs/rangerusersync.service.keytab", 
-            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe", 
-            "ranger.usersync.group.objectclass": "", 
-            "ranger.usersync.ldap.user.searchscope": "sub", 
-            "ranger.usersync.unix.password.file": "/etc/passwd", 
-            "ranger.usersync.ldap.user.nameattribute": "", 
-            "ranger.usersync.pagedresultsenabled": "true", 
-            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}", 
-            "ranger.usersync.group.search.first.enabled": "false", 
-            "ranger.usersync.group.searchenabled": "false", 
-            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder", 
-            "ranger.usersync.ssl": "true", 
-            "ranger.usersync.ldap.url": "", 
-            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org", 
-            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
-            "ranger.usersync.ldap.user.searchbase": "", 
-            "ranger.usersync.ldap.username.caseconversion": "none", 
-            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
-            "ranger.usersync.keystore.password": "UnIx529p", 
-            "ranger.usersync.unix.group.file": "/etc/group", 
-            "ranger.usersync.filesource.file": "/tmp/usergroup.txt", 
-            "ranger.usersync.group.searchscope": "", 
-            "ranger.usersync.truststore.password": "changeit", 
-            "ranger.usersync.enabled": "true", 
-            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000", 
+            "ranger.usersync.ldap.binddn": "",
+            "ranger.usersync.policymgr.username": "rangerusersync",
+            "ranger.usersync.policymanager.mockrun": "false",
+            "ranger.usersync.group.searchbase": "",
+            "ranger.usersync.ldap.bindalias": "testldapalias",
+            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks",
+            "ranger.usersync.port": "5151",
+            "ranger.usersync.pagedresultssize": "500",
+            "ranger.usersync.group.memberattributename": "",
+            "ranger.usersync.kerberos.principal": "rangerusersync/_HOST@EXAMPLE.COM",
+            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder",
+            "ranger.usersync.ldap.referral": "ignore",
+            "ranger.usersync.group.searchfilter": "",
+            "ranger.usersync.ldap.user.objectclass": "person",
+            "ranger.usersync.logdir": "{{usersync_log_dir}}",
+            "ranger.usersync.ldap.user.searchfilter": "",
+            "ranger.usersync.ldap.groupname.caseconversion": "none",
+            "ranger.usersync.ldap.ldapbindpassword": "",
+            "ranger.usersync.unix.minUserId": "500",
+            "ranger.usersync.policymanager.maxrecordsperapicall": "1000",
+            "ranger.usersync.group.nameattribute": "",
+            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password",
+            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks",
+            "ranger.usersync.user.searchenabled": "false",
+            "ranger.usersync.group.usermapsyncenabled": "true",
+            "ranger.usersync.ldap.bindkeystore": "",
+            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof",
+            "ranger.usersync.kerberos.keytab": "/etc/security/keytabs/rangerusersync.service.keytab",
+            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe",
+            "ranger.usersync.group.objectclass": "",
+            "ranger.usersync.ldap.user.searchscope": "sub",
+            "ranger.usersync.unix.password.file": "/etc/passwd",
+            "ranger.usersync.ldap.user.nameattribute": "",
+            "ranger.usersync.pagedresultsenabled": "true",
+            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}",
+            "ranger.usersync.group.search.first.enabled": "false",
+            "ranger.usersync.group.searchenabled": "false",
+            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder",
+            "ranger.usersync.ssl": "true",
+            "ranger.usersync.ldap.url": "",
+            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org",
+            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+            "ranger.usersync.ldap.user.searchbase": "",
+            "ranger.usersync.ldap.username.caseconversion": "none",
+            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+            "ranger.usersync.keystore.password": "UnIx529p",
+            "ranger.usersync.unix.group.file": "/etc/group",
+            "ranger.usersync.filesource.file": "/tmp/usergroup.txt",
+            "ranger.usersync.group.searchscope": "",
+            "ranger.usersync.truststore.password": "changeit",
+            "ranger.usersync.enabled": "true",
+            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000",
             "ranger.usersync.filesource.text.delimiter": ","
-        }, 
+        },
         "hdfs-site": {
-            "dfs.namenode.checkpoint.period": "21600", 
-            "dfs.namenode.avoid.write.stale.datanode": "true", 
-            "dfs.permissions.superusergroup": "hdfs", 
-            "dfs.namenode.startup.delay.block.deletion.sec": "3600", 
-            "dfs.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM", 
-            "dfs.heartbeat.interval": "3", 
-            "dfs.content-summary.limit": "5000", 
-            "dfs.support.append": "true", 
-            "dfs.datanode.address": "0.0.0.0:1019", 
-            "dfs.cluster.administrators": " hdfs", 
-            "dfs.namenode.audit.log.async": "true", 
-            "dfs.datanode.balance.bandwidthPerSec": "6250000", 
-            "dfs.namenode.safemode.threshold-pct": "1", 
-            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", 
-            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020", 
-            "dfs.permissions.enabled": "true", 
-            "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", 
-            "dfs.client.read.shortcircuit": "true", 
-            "dfs.https.port": "50470", 
-            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470", 
-            "nfs.file.dump.dir": "/tmp/.hdfs-nfs", 
-            "dfs.blocksize": "134217728", 
-            "dfs.blockreport.initialDelay": "120", 
-            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode", 
-            "dfs.namenode.fslock.fair": "false", 
-            "dfs.datanode.max.transfer.threads": "4096", 
-            "dfs.secondary.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "dfs.replication": "3", 
-            "dfs.namenode.handler.count": "50", 
-            "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
-            "fs.permissions.umask-mode": "022", 
-            "dfs.namenode.stale.datanode.interval": "30000", 
-            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
-            "dfs.datanode.failed.volumes.tolerated": "0", 
-            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data", 
-            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070", 
-            "dfs.webhdfs.enabled": "true", 
-            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding", 
-            "dfs.namenode.accesstime.precision": "0", 
-            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
-            "dfs.datanode.https.address": "0.0.0.0:50475", 
-            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary", 
-            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090", 
-            "nfs.exports.allowed.hosts": "* rw", 
-            "dfs.namenode.checkpoint.txns": "1000000", 
-            "dfs.datanode.http.address": "0.0.0.0:1022", 
-            "dfs.datanode.du.reserved": "33011188224", 
-            "dfs.client.read.shortcircuit.streams.cache.size": "4096", 
-            "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", 
-            "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "dfs.http.policy": "HTTP_ONLY", 
-            "dfs.block.access.token.enable": "true", 
-            "dfs.client.retry.policy.enabled": "false", 
-            "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", 
-            "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab", 
-            "dfs.namenode.name.dir.restore": "true", 
-            "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", 
-            "dfs.journalnode.https-address": "0.0.0.0:8481", 
-            "dfs.journalnode.http-address": "0.0.0.0:8480", 
-            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
-            "dfs.namenode.avoid.read.stale.datanode": "true", 
-            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
-            "dfs.datanode.data.dir.perm": "750", 
-            "dfs.encryption.key.provider.uri": "kms://http@c6401.ambari.apache.org:9292/kms", 
-            "dfs.replication.max": "50", 
+            "dfs.namenode.checkpoint.period": "21600",
+            "dfs.namenode.avoid.write.stale.datanode": "true",
+            "dfs.permissions.superusergroup": "hdfs",
+            "dfs.namenode.startup.delay.block.deletion.sec": "3600",
+            "dfs.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
+            "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM",
+            "dfs.heartbeat.interval": "3",
+            "dfs.content-summary.limit": "5000",
+            "dfs.support.append": "true",
+            "dfs.datanode.address": "0.0.0.0:1019",
+            "dfs.cluster.administrators": " hdfs",
+            "dfs.namenode.audit.log.async": "true",
+            "dfs.datanode.balance.bandwidthPerSec": "6250000",
+            "dfs.namenode.safemode.threshold-pct": "1",
+            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
+            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020",
+            "dfs.permissions.enabled": "true",
+            "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
+            "dfs.client.read.shortcircuit": "true",
+            "dfs.https.port": "50470",
+            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470",
+            "nfs.file.dump.dir": "/tmp/.hdfs-nfs",
+            "dfs.blocksize": "134217728",
+            "dfs.blockreport.initialDelay": "120",
+            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode",
+            "dfs.namenode.fslock.fair": "false",
+            "dfs.datanode.max.transfer.threads": "4096",
+            "dfs.secondary.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
+            "dfs.replication": "3",
+            "dfs.namenode.handler.count": "50",
+            "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+            "fs.permissions.umask-mode": "022",
+            "dfs.namenode.stale.datanode.interval": "30000",
+            "dfs.datanode.ipc.address": "0.0.0.0:8010",
+            "dfs.datanode.failed.volumes.tolerated": "0",
+            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data",
+            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070",
+            "dfs.webhdfs.enabled": "true",
+            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding",
+            "dfs.namenode.accesstime.precision": "0",
+            "dfs.namenode.write.stale.datanode.ratio": "1.0f",
+            "dfs.datanode.https.address": "0.0.0.0:50475",
+            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary",
+            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090",
+            "nfs.exports.allowed.hosts": "* rw",
+            "dfs.namenode.checkpoint.txns": "1000000",
+            "dfs.datanode.http.address": "0.0.0.0:1022",
+            "dfs.datanode.du.reserved": "33011188224",
+            "dfs.client.read.shortcircuit.streams.cache.size": "4096",
+            "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
+            "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
+            "dfs.http.policy": "HTTP_ONLY",
+            "dfs.block.access.token.enable": "true",
+            "dfs.client.retry.policy.enabled": "false",
+            "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
+            "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab",
+            "dfs.namenode.name.dir.restore": "true",
+            "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
+            "dfs.journalnode.https-address": "0.0.0.0:8481",
+            "dfs.journalnode.http-address": "0.0.0.0:8480",
+            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
+            "dfs.namenode.avoid.read.stale.datanode": "true",
+            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
+            "dfs.datanode.data.dir.perm": "750",
+            "dfs.encryption.key.provider.uri": "kms://http@c6401.ambari.apache.org:9292/kms",
+            "dfs.replication.max": "50",
             "dfs.namenode.name.dir": "/grid/0/hadoop/hdfs/namenode"
-        }, 
+        },
         "ranger-tagsync-site": {
             "ranger.tagsync.dest.ranger.ssl.config.filename": "{{stack_root}}/current/ranger-tagsync/conf/ranger-policymgr-ssl.xml",
             "ranger.tagsync.source.atlasrest.username": "",
             "ranger.tagsync.logdir": "/var/log/ranger/tagsync",
-            "ranger.tagsync.source.atlasrest.download.interval.millis": "", 
-            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks", 
-            "ranger.tagsync.source.file.check.interval.millis": "", 
-            "ranger.tagsync.source.atlasrest.endpoint": "", 
-            "ranger.tagsync.dest.ranger.username": "rangertagsync", 
-            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}", 
-            "ranger.tagsync.kerberos.principal": "rangertagsync/_HOST@EXAMPLE.COM", 
-            "ranger.tagsync.kerberos.keytab": "/etc/security/keytabs/rangertagsync.service.keytab", 
-            "ranger.tagsync.source.atlas": "false", 
-            "ranger.tagsync.source.atlasrest": "false", 
-            "ranger.tagsync.source.file": "false", 
+            "ranger.tagsync.source.atlasrest.download.interval.millis": "",
+            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks",
+            "ranger.tagsync.source.file.check.interval.millis": "",
+            "ranger.tagsync.source.atlasrest.endpoint": "",
+            "ranger.tagsync.dest.ranger.username": "rangertagsync",
+            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}",
+            "ranger.tagsync.kerberos.principal": "rangertagsync/_HOST@EXAMPLE.COM",
+            "ranger.tagsync.kerberos.keytab": "/etc/security/keytabs/rangertagsync.service.keytab",
+            "ranger.tagsync.source.atlas": "false",
+            "ranger.tagsync.source.atlasrest": "false",
+            "ranger.tagsync.source.file": "false",
             "ranger.tagsync.source.file.filename": ""
-        }, 
+        },
         "zoo.cfg": {
-            "clientPort": "2181", 
-            "autopurge.purgeInterval": "24", 
-            "syncLimit": "5", 
-            "dataDir": "/grid/0/hadoop/zookeeper", 
-            "initLimit": "10", 
-            "tickTime": "2000", 
+            "clientPort": "2181",
+            "autopurge.purgeInterval": "24",
+            "syncLimit": "5",
+            "dataDir": "/grid/0/hadoop/zookeeper",
+            "initLimit": "10",
+            "tickTime": "2000",
             "autopurge.snapRetainCount": "30"
-        }, 
+        },
         "hadoop-policy": {
-            "security.job.client.protocol.acl": "*", 
-            "security.job.task.protocol.acl": "*", 
-            "security.datanode.protocol.acl": "*", 
-            "security.namenode.protocol.acl": "*", 
-            "security.client.datanode.protocol.acl": "*", 
-            "security.inter.tracker.protocol.acl": "*", 
-            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop", 
-            "security.client.protocol.acl": "*", 
-            "security.refresh.policy.protocol.acl": "hadoop", 
-            "security.admin.operations.protocol.acl": "hadoop", 
+            "security.job.client.protocol.acl": "*",
+            "security.job.task.protocol.acl": "*",
+            "security.datanode.protocol.acl": "*",
+            "security.namenode.protocol.acl": "*",
+            "security.client.datanode.protocol.acl": "*",
+            "security.inter.tracker.protocol.acl": "*",
+            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
+            "security.client.protocol.acl": "*",
+            "security.refresh.policy.protocol.acl": "hadoop",
+            "security.admin.operations.protocol.acl": "hadoop",
             "security.inter.datanode.protocol.acl": "*"
-        }, 
+        },
         "hdfs-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.lo
 gger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender
 .console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logg
 er}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=
 false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# NameNode metrics logging.\n# The default is to retain two namenode-metrics.log files up to 64MB each.\n#\nnamenode.metrics.logger=INFO,NullAppender\nlog4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}\nlog4j.additivity.NameNodeMetricsLog=false\nlog4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log\nlog4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n\nlog4j.appender.NNMETRICSRFA.MaxBackupIndex=1\nlog4j.appender.NNMETRICSRFA.MaxFileSize=64MB\n\n#\n# mapred audit logging\n#\nmapred
 .audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging leve
 ls\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateCh
 ange=WARN"
-        }, 
+        },
         "krb5-conf": {
-            "domains": "", 
-            "manage_krb5_conf": "true", 
-            "content": "\n[libdefaults]\n  renew_lifetime = 7d\n  forwardable = true\n  default_realm = {{realm}}\n  ticket_lifetime = 24h\n  dns_lookup_realm = false\n  dns_lookup_kdc = false\n  default_ccache_name = /tmp/krb5cc_%{uid}\n  #default_tgs_enctypes = {{encryption_types}}\n  #default_tkt_enctypes = {{encryption_types}}\n{% if domains %}\n[domain_realm]\n{%- for domain in domains.split(',') %}\n  {{domain|trim()}} = {{realm}}\n{%- endfor %}\n{% endif %}\n[logging]\n  default = FILE:/var/log/krb5kdc.log\n  admin_server = FILE:/var/log/kadmind.log\n  kdc = FILE:/var/log/krb5kdc.log\n\n[realms]\n  {{realm}} = {\n{%- if kdc_hosts > 0 -%}\n{%- set kdc_host_list = kdc_hosts.split(',')  -%}\n{%- if kdc_host_list and kdc_host_list|length > 0 %}\n    admin_server = {{admin_server_host|default(kdc_host_list[0]|trim(), True)}}\n{%- if kdc_host_list -%}\n{% for kdc_host in kdc_host_list %}\n    kdc = {{kdc_host|trim()}}\n{%- endfor -%}\n{% endif %}\n{%- endif %}\n{%- endif %}\n  }\n\
 n{# Append additional realm declarations below #}", 
+            "domains": "",
+            "manage_krb5_conf": "true",
+            "content": "\n[libdefaults]\n  renew_lifetime = 7d\n  forwardable = true\n  default_realm = {{realm}}\n  ticket_lifetime = 24h\n  dns_lookup_realm = false\n  dns_lookup_kdc = false\n  default_ccache_name = /tmp/krb5cc_%{uid}\n  #default_tgs_enctypes = {{encryption_types}}\n  #default_tkt_enctypes = {{encryption_types}}\n{% if domains %}\n[domain_realm]\n{%- for domain in domains.split(',') %}\n  {{domain|trim()}} = {{realm}}\n{%- endfor %}\n{% endif %}\n[logging]\n  default = FILE:/var/log/krb5kdc.log\n  admin_server = FILE:/var/log/kadmind.log\n  kdc = FILE:/var/log/krb5kdc.log\n\n[realms]\n  {{realm}} = {\n{%- if kdc_hosts > 0 -%}\n{%- set kdc_host_list = kdc_hosts.split(',')  -%}\n{%- if kdc_host_list and kdc_host_list|length > 0 %}\n    admin_server = {{admin_server_host|default(kdc_host_list[0]|trim(), True)}}\n{%- if kdc_host_list -%}\n{% for kdc_host in kdc_host_list %}\n    kdc = {{kdc_host|trim()}}\n{%- endfor -%}\n{% endif %}\n{%- endif %}\n{%- endif %}\n  }\n\
 n{# Append additional realm declarations below #}",
             "conf_dir": "/etc"
-        }, 
+        },
         "core-site": {
-            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py", 
-            "hadoop.proxyuser.hdfs.groups": "*", 
-            "fs.trash.interval": "360", 
-            "ipc.server.tcpnodelay": "true", 
-            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec", 
-            "ipc.client.idlethreshold": "8000", 
-            "io.file.buffer.size": "131072", 
-            "hadoop.proxyuser.ambari-server-test_cluster01.groups": "*", 
-            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", 
-            "hadoop.security.authentication": "kerberos", 
-            "mapreduce.jobtracker.webinterface.trusted": "false", 
-            "hadoop.proxyuser.hdfs.hosts": "*", 
-            "hadoop.proxyuser.HTTP.groups": "users", 
-            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020", 
-            "ha.failover-controller.active-standby-elector.zk.op.retries": "120", 
-            "hadoop.security.key.provider.path": "kms://http@c6401.ambari.apache.org:9292/kms", 
-            "hadoop.security.authorization": "true", 
-            "hadoop.http.authentication.simple.anonymous.allowed": "true", 
-            "ipc.client.connect.max.retries": "50", 
-            "hadoop.security.auth_to_local": "RULE:[1:$1@$0](ambari-qa-test_cluster01@EXAMPLE.COM)s/.*/ambari-qa/\nRULE:[1:$1@$0](hdfs-test_cluster01@EXAMPLE.COM)s/.*/hdfs/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nRULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](rangeradmin@EXAMPLE.COM)s/.*/ranger/\nRULE:[2:$1@$0](rangertagsync@EXAMPLE.COM)s/.*/rangertagsync/\nRULE:[2:$1@$0](rangerusersync@EXAMPLE.COM)s/.*/rangerusersync/\nDEFAULT", 
-            "hadoop.proxyuser.ambari-server-test_cluster01.hosts": "c6401.ambari.apache.org", 
+            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py",
+            "hadoop.proxyuser.hdfs.groups": "*",
+            "fs.trash.interval": "360",
+            "ipc.server.tcpnodelay": "true",
+            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec",
+            "ipc.client.idlethreshold": "8000",
+            "io.file.buffer.size": "131072",
+            "hadoop.proxyuser.ambari-server-test_cluster01.groups": "*",
+            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
+            "hadoop.security.authentication": "kerberos",
+            "mapreduce.jobtracker.webinterface.trusted": "false",
+            "hadoop.proxyuser.hdfs.hosts": "*",
+            "hadoop.proxyuser.HTTP.groups": "users",
+            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020",
+            "ha.failover-controller.active-standby-elector.zk.op.retries": "120",
+            "hadoop.security.key.provider.path": "kms://http@c6401.ambari.apache.org:9292/kms",
+            "hadoop.security.authorization": "true",
+            "hadoop.http.authentication.simple.anonymous.allowed": "true",
+            "ipc.client.connect.max.retries": "50",
+            "hadoop.security.auth_to_local": "RULE:[1:$1@$0](ambari-qa-test_cluster01@EXAMPLE.COM)s/.*/ambari-qa/\nRULE:[1:$1@$0](hdfs-test_cluster01@EXAMPLE.COM)s/.*/hdfs/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nRULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](rangeradmin@EXAMPLE.COM)s/.*/ranger/\nRULE:[2:$1@$0](rangertagsync@EXAMPLE.COM)s/.*/rangertagsync/\nRULE:[2:$1@$0](rangerusersync@EXAMPLE.COM)s/.*/rangerusersync/\nDEFAULT",
+            "hadoop.proxyuser.ambari-server-test_cluster01.hosts": "c6401.ambari.apache.org",
             "ipc.client.connection.maxidletime": "30000"
-        }, 
+        },
         "hadoop-env": {
-            "keyserver_port": "", 
-            "proxyuser_group": "users", 
-            "hdfs_user_nproc_limit": "65536", 
-            "hdfs_log_dir_prefix": "/var/log/hadoop", 
-            "hdfs_user_nofile_limit": "128000", 
-            "hdfs_user": "hdfs", 
-            "hdfs_principal_name": "hdfs-test_cluster01@EXAMPLE.COM", 
-            "keyserver_host": " ", 
-            "namenode_opt_maxnewsize": "128m", 
-            "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab", 
-            "namenode_opt_maxpermsize": "256m", 
-            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
 {{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
 node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
 NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
 N_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
 ing priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n  for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n  do\n    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n  done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
 ECURE_DN_USER\" ]; then\n  ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}", 
-            "namenode_heapsize": "1024m", 
-            "namenode_opt_newsize": "128m", 
-            "nfsgateway_heapsize": "1024", 
-            "dtnode_heapsize": "1024m", 
-            "hadoop_root_logger": "INFO,RFA", 
-            "hadoop_heapsize": "1024", 
-            "hadoop_pid_dir_prefix": "/var/run/hadoop", 
-            "namenode_opt_permsize": "128m", 
+            "keyserver_port": "",
+            "proxyuser_group": "users",
+            "hdfs_user_nproc_limit": "65536",
+            "hdfs_log_dir_prefix": "/var/log/hadoop",
+            "hdfs_user_nofile_limit": "128000",
+            "hdfs_user": "hdfs",
+            "hdfs_principal_name": "hdfs-test_cluster01@EXAMPLE.COM",
+            "keyserver_host": " ",
+            "namenode_opt_maxnewsize": "128m",
+            "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab",
+            "namenode_opt_maxpermsize": "256m",
+            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
 {{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
 node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
 NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
 N_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
 ing priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n  for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n  do\n    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n  done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
 ECURE_DN_USER\" ]; then\n  ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}",
+            "namenode_heapsize": "1024m",
+            "namenode_opt_newsize": "128m",
+            "nfsgateway_heapsize": "1024",
+            "dtnode_heapsize": "1024m",
+            "hadoop_root_logger": "INFO,RFA",
+            "hadoop_heapsize": "1024",
+            "hadoop_pid_dir_prefix": "/var/run/hadoop",
+            "namenode_opt_permsize": "128m",
             "hdfs_tmp_dir": "/tmp"
-        }, 
+        },
         "zookeeper-log4j": {
             "content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: consol

<TRUNCATED>

[05/36] ambari git commit: AMBARI-21384. Message for deleted hosts is confusing (alexantonenko)

Posted by lp...@apache.org.
AMBARI-21384. Message for deleted hosts is confusing (alexantonenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f17d317b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f17d317b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f17d317b

Branch: refs/heads/feature-branch-AMBARI-21307
Commit: f17d317b725012ccc687ee070b1e1dbc604e8d12
Parents: 8e719f7
Author: Alex Antonenko <hi...@gmail.com>
Authored: Fri Jun 30 17:42:53 2017 +0300
Committer: Alex Antonenko <hi...@gmail.com>
Committed: Fri Jul 7 10:38:45 2017 +0300

----------------------------------------------------------------------
 .../app/controllers/main/host/bulk_operations_controller.js    | 6 +++---
 ambari-web/app/messages.js                                     | 2 +-
 2 files changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f17d317b/ambari-web/app/controllers/main/host/bulk_operations_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/host/bulk_operations_controller.js b/ambari-web/app/controllers/main/host/bulk_operations_controller.js
index 92ae12e..b053fc3 100644
--- a/ambari-web/app/controllers/main/host/bulk_operations_controller.js
+++ b/ambari-web/app/controllers/main/host/bulk_operations_controller.js
@@ -444,7 +444,7 @@ App.BulkOperationsController = Em.Controller.extend({
         templateName: require('templates/main/host/delete_hosts_result_popup'),
         message: Em.I18n.t('hosts.bulkOperation.deleteHosts.dryRun.message').format(undeletableHosts.length),
         undeletableHosts: undeletableHosts,
-        deletedHosts: deletedHosts,
+        deletedHosts: deletedHosts.sortProperty('deleted.key'),
         onToggleHost: function (host) {
           host.contexts[0].toggleProperty('isCollapsed');
         }
@@ -823,7 +823,7 @@ App.BulkOperationsController = Em.Controller.extend({
         templateName: require('templates/main/host/delete_hosts_result_popup'),
         message: Em.I18n.t('hosts.bulkOperation.delete.component.dryRun.message').format(undeletableHosts.length),
         undeletableHosts: undeletableHosts,
-        deletedHosts: deletedHosts,
+        deletedHosts: deletedHosts.sortProperty('deleted.key'),
         onToggleHost: function (host) {
           host.contexts[0].toggleProperty('isCollapsed');
         }
@@ -1295,4 +1295,4 @@ App.BulkOperationsController = Em.Controller.extend({
     return hostNamesSkipped;
   }
 
-});
\ No newline at end of file
+});

http://git-wip-us.apache.org/repos/asf/ambari/blob/f17d317b/ambari-web/app/messages.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index e88ec42..5e1d08f 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -2593,7 +2593,7 @@ Em.I18n.translations = {
   'hosts.bulkOperation.deleteHosts.confirmation.body.msg3': 'To completely delete the hosts, first stop ambari-agent on them.',
   'hosts.bulkOperation.deleteHosts.confirmation.body.msg4': 'If the hosts were hosting a Zookeeper Server, the Zookeeper Service should be restarted. Go to the <i>Services</i> page.',
   'hosts.bulkOperation.deleteHosts.result.header':'Delete Hosts',
-  'hosts.bulkOperation.deleteHosts.result.body': 'The following hosts were deleted successfully:',
+  'hosts.bulkOperation.deleteHosts.result.body': 'The following hosts and host components were deleted successfully:',
   'hosts.bulkOperation.confirmation.delete.component.minimum.body': 'At least {0} {1} should be installed in the cluster.',
   'hosts.bulkOperation.confirmation.delete.component.nothingToDo.body': '{0} are neither installed on selected hosts nor in the states that can be deleted.',
   'hosts.bulkOperation.confirmation.delete.component.skip':'The following hosts are skipped as {0} on them are not in the states that can be deleted.',


[35/36] ambari git commit: AMBARI-21471. ATS going down due to missing org.apache.spark.deploy.history.yarn.plugin.SparkATSPlugin (smohanty)

Posted by lp...@apache.org.
AMBARI-21471. ATS going down due to missing org.apache.spark.deploy.history.yarn.plugin.SparkATSPlugin (smohanty)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/853a5d4a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/853a5d4a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/853a5d4a

Branch: refs/heads/feature-branch-AMBARI-21307
Commit: 853a5d4a2eda1afb5ee4578cf99d0757abc5f95d
Parents: eb1adcb
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Thu Jul 13 22:35:28 2017 -0700
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Thu Jul 13 22:38:40 2017 -0700

----------------------------------------------------------------------
 .../stacks/HDP/2.5/services/stack_advisor.py    |   5 +-
 .../src/main/resources/stacks/stack_advisor.py  |  19 ++-
 .../stacks/2.5/common/test_stack_advisor.py     | 150 +++++++++++--------
 3 files changed, 105 insertions(+), 69 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/853a5d4a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
index 3337e8e..4ca74ee 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
@@ -19,6 +19,7 @@ limitations under the License.
 
 import math
 
+
 from ambari_commons.str_utils import string_set_equals
 from resource_management.core.exceptions import Fail
 from resource_management.libraries.functions.get_bare_principal import get_bare_principal
@@ -774,9 +775,7 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
         self.checkAndStopLlapQueue(services, configurations, LLAP_QUEUE_NAME)
 
     putYarnSiteProperty = self.putProperty(configurations, "yarn-site", services)
-    stack_root = "/usr/hdp"
-    if cluster_env and "stack_root" in cluster_env:
-      stack_root = cluster_env["stack_root"]
+    stack_root = self.getStackRoot(services)
 
     timeline_plugin_classes_values = []
     timeline_plugin_classpath_values = []

http://git-wip-us.apache.org/repos/asf/ambari/blob/853a5d4a/ambari-server/src/main/resources/stacks/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/stack_advisor.py b/ambari-server/src/main/resources/stacks/stack_advisor.py
index 8e08d82..67f7fe0 100644
--- a/ambari-server/src/main/resources/stacks/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/stack_advisor.py
@@ -25,6 +25,7 @@ import re
 import socket
 import string
 import traceback
+import json
 import sys
 import logging
 from math import ceil, floor
@@ -34,7 +35,6 @@ from urlparse import urlparse
 from resource_management.libraries.functions.data_structure_utils import get_from_dict
 from resource_management.core.exceptions import Fail
 
-
 class StackAdvisor(object):
   """
   Abstract class implemented by all stack advisors. Stack advisors advise on stack specific questions. 
@@ -2006,6 +2006,23 @@ class DefaultStackAdvisor(StackAdvisor):
 
     return mount_points
 
+  def getStackRoot(self, services):
+    """
+    Gets the stack root associated with the stack
+    :param services: the services structure containing the current configurations
+    :return: the stack root as specified in the config or /usr/hdp
+    """
+    cluster_env = self.getServicesSiteProperties(services, "cluster-env")
+    stack_root = "/usr/hdp"
+    if cluster_env and "stack_root" in cluster_env:
+      stack_root_as_str = cluster_env["stack_root"]
+      stack_roots = json.loads(stack_root_as_str)
+      stack_name = cluster_env["stack_name"]
+      if stack_name in stack_roots:
+        stack_root = stack_roots[stack_name]
+
+    return stack_root
+
   def isSecurityEnabled(self, services):
     """
     Determines if security is enabled by testing the value of cluster-env/security enabled.

http://git-wip-us.apache.org/repos/asf/ambari/blob/853a5d4a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
index 50f527d..bf0cbec 100644
--- a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
@@ -825,70 +825,80 @@ class TestHDP25StackAdvisor(TestCase):
 
     services = {
       "services": [{
-        "StackServices": {
-          "service_name": "YARN",
-        },
-        "Versions": {
-          "stack_version": "2.5"
-        },
-        "components": [
-          {
-            "StackServiceComponents": {
-              "component_name": "NODEMANAGER",
-              "hostnames": ["c6401.ambari.apache.org"]
-            }
-          }
-        ]
-      }, {
-        "href": "/api/v1/stacks/HDP/versions/2.5/services/HIVE",
-        "StackServices": {
-          "service_name": "HIVE",
-          "service_version": "1.2.1.2.5",
-          "stack_name": "HDP",
-          "stack_version": "2.5"
-        },
-        "components": [
-          {
-            "href": "/api/v1/stacks/HDP/versions/2.5/services/HIVE/components/HIVE_SERVER_INTERACTIVE",
-            "StackServiceComponents": {
-              "advertise_version": "true",
-              "bulk_commands_display_name": "",
-              "bulk_commands_master_component_name": "",
-              "cardinality": "0-1",
-              "component_category": "MASTER",
-              "component_name": "HIVE_SERVER_INTERACTIVE",
-              "custom_commands": ["RESTART_LLAP"],
-              "decommission_allowed": "false",
-              "display_name": "HiveServer2 Interactive",
-              "has_bulk_commands_definition": "false",
-              "is_client": "false",
-              "is_master": "true",
-              "reassign_allowed": "false",
-              "recovery_enabled": "false",
-              "service_name": "HIVE",
-              "stack_name": "HDP",
-              "stack_version": "2.5",
-              "hostnames": ["c6401.ambari.apache.org"]
-            },
-            "dependencies": []
-          },
-          {
-            "StackServiceComponents": {
-              "advertise_version": "true",
-              "cardinality": "1+",
-              "component_category": "SLAVE",
-              "component_name": "NODEMANAGER",
-              "display_name": "NodeManager",
-              "is_client": "false",
-              "is_master": "false",
-              "hostnames": [
-                "c6403.ambari.apache.org"
-              ]
-            },
-            "dependencies": []
-          },
-        ]
-      }
+                     "StackServices": {
+                       "service_name": "TEZ"
+                     }
+                   },
+                   {
+                     "StackServices": {
+                       "service_name": "SPARK"
+                     }
+                   },
+                   {
+                     "StackServices": {
+                       "service_name": "YARN",
+                     },
+                     "Versions": {
+                       "stack_version": "2.5"
+                     },
+                     "components": [
+                       {
+                         "StackServiceComponents": {
+                           "component_name": "NODEMANAGER",
+                           "hostnames": ["c6401.ambari.apache.org"]
+                         }
+                       }
+                     ]
+                   }, {
+                     "href": "/api/v1/stacks/HDP/versions/2.5/services/HIVE",
+                     "StackServices": {
+                       "service_name": "HIVE",
+                       "service_version": "1.2.1.2.5",
+                       "stack_name": "HDP",
+                       "stack_version": "2.5"
+                     },
+                     "components": [
+                       {
+                         "href": "/api/v1/stacks/HDP/versions/2.5/services/HIVE/components/HIVE_SERVER_INTERACTIVE",
+                         "StackServiceComponents": {
+                           "advertise_version": "true",
+                           "bulk_commands_display_name": "",
+                           "bulk_commands_master_component_name": "",
+                           "cardinality": "0-1",
+                           "component_category": "MASTER",
+                           "component_name": "HIVE_SERVER_INTERACTIVE",
+                           "custom_commands": ["RESTART_LLAP"],
+                           "decommission_allowed": "false",
+                           "display_name": "HiveServer2 Interactive",
+                           "has_bulk_commands_definition": "false",
+                           "is_client": "false",
+                           "is_master": "true",
+                           "reassign_allowed": "false",
+                           "recovery_enabled": "false",
+                           "service_name": "HIVE",
+                           "stack_name": "HDP",
+                           "stack_version": "2.5",
+                           "hostnames": ["c6401.ambari.apache.org"]
+                         },
+                         "dependencies": []
+                       },
+                       {
+                         "StackServiceComponents": {
+                           "advertise_version": "true",
+                           "cardinality": "1+",
+                           "component_category": "SLAVE",
+                           "component_name": "NODEMANAGER",
+                           "display_name": "NodeManager",
+                           "is_client": "false",
+                           "is_master": "false",
+                           "hostnames": [
+                             "c6403.ambari.apache.org"
+                           ]
+                         },
+                         "dependencies": []
+                       },
+                     ]
+                   }
       ],
       "changed-configurations": [
         {
@@ -898,6 +908,12 @@ class TestHDP25StackAdvisor(TestCase):
         }
       ],
       "configurations": {
+        "cluster-env": {
+          "properties": {
+            "stack_root": "{\"HDP\":\"/usr/hdp\"}",
+            "stack_name": "HDP"
+          },
+        },
         "capacity-scheduler": {
           "properties": {
             "capacity-scheduler": 'yarn.scheduler.capacity.root.default.maximum-capacity=60\n'
@@ -960,7 +976,8 @@ class TestHDP25StackAdvisor(TestCase):
             "tez.am.resource.memory.mb": "341"
           }
         }
-      }
+      },
+      "ambari-server-properties": {"ambari-server.user":"ambari_user"}
     }
 
     clusterData = {
@@ -990,6 +1007,9 @@ class TestHDP25StackAdvisor(TestCase):
 
     self.assertEquals(configurations['hive-interactive-site']['properties']['hive.server2.tez.default.queues'], 'default')
     self.assertEquals(configurations['hive-interactive-site']['properties']['hive.llap.daemon.queue.name'], 'default')
+    self.assertEquals(configurations['yarn-site']['properties']['yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes'],
+                      'org.apache.tez.dag.history.logging.ats.TimelineCachePluginImpl,org.apache.spark.deploy.history.yarn.plugin.SparkATSPlugin')
+    self.assertEquals(configurations['yarn-site']['properties']['yarn.timeline-service.entity-group-fs-store.group-id-plugin-classpath'], '/usr/hdp/${hdp.version}/spark/hdpLib/*')
     self.assertTrue('hive-interactive-env' not in configurations)
     self.assertTrue('property_attributes' not in configurations)
 


[11/36] ambari git commit: AMBARI-21430 - Allow Multiple Versions of Stack Tools to Co-Exist (jonathanhurley)

Posted by lp...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-default.json b/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-default.json
index abe84ab..e5abe32 100644
--- a/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-default.json
+++ b/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-default.json
@@ -1,143 +1,143 @@
 {
     "localComponents": [
-        "NAMENODE", 
-        "SECONDARY_NAMENODE", 
-        "ZOOKEEPER_SERVER", 
-        "DATANODE", 
-        "HDFS_CLIENT", 
-        "ZOOKEEPER_CLIENT", 
-        "RANGER_USERSYNC", 
-        "RANGER_ADMIN", 
+        "NAMENODE",
+        "SECONDARY_NAMENODE",
+        "ZOOKEEPER_SERVER",
+        "DATANODE",
+        "HDFS_CLIENT",
+        "ZOOKEEPER_CLIENT",
+        "RANGER_USERSYNC",
+        "RANGER_ADMIN",
         "RANGER_TAGSYNC",
         "LOGSEARCH_SOLR",
         "LOGSEARCH_SOLR_CLIENT"
-    ], 
+    ],
     "configuration_attributes": {
-        "ranger-hdfs-audit": {}, 
-        "ssl-client": {}, 
-        "ranger-admin-site": {}, 
-        "ranger-hdfs-policymgr-ssl": {}, 
-        "tagsync-application-properties": {}, 
-        "ranger-env": {}, 
-        "usersync-log4j": {}, 
-        "admin-properties": {}, 
-        "ranger-ugsync-site": {}, 
+        "ranger-hdfs-audit": {},
+        "ssl-client": {},
+        "ranger-admin-site": {},
+        "ranger-hdfs-policymgr-ssl": {},
+        "tagsync-application-properties": {},
+        "ranger-env": {},
+        "usersync-log4j": {},
+        "admin-properties": {},
+        "ranger-ugsync-site": {},
         "hdfs-site": {
             "final": {
-                "dfs.datanode.data.dir": "true", 
-                "dfs.namenode.http-address": "true", 
-                "dfs.datanode.failed.volumes.tolerated": "true", 
-                "dfs.support.append": "true", 
-                "dfs.namenode.name.dir": "true", 
+                "dfs.datanode.data.dir": "true",
+                "dfs.namenode.http-address": "true",
+                "dfs.datanode.failed.volumes.tolerated": "true",
+                "dfs.support.append": "true",
+                "dfs.namenode.name.dir": "true",
                 "dfs.webhdfs.enabled": "true"
             }
-        }, 
+        },
         "ranger-tagsync-site": {},
         "ranger-tagsync-policymgr-ssl": {},
         "zoo.cfg": {},
         "hadoop-policy": {},
-        "hdfs-log4j": {}, 
-        "ranger-hdfs-plugin-properties": {}, 
+        "hdfs-log4j": {},
+        "ranger-hdfs-plugin-properties": {},
         "core-site": {
             "final": {
                 "fs.defaultFS": "true"
             }
-        }, 
-        "hadoop-env": {}, 
-        "zookeeper-log4j": {}, 
-        "ssl-server": {}, 
-        "ranger-site": {}, 
-        "admin-log4j": {}, 
-        "tagsync-log4j": {}, 
-        "ranger-hdfs-security": {}, 
+        },
+        "hadoop-env": {},
+        "zookeeper-log4j": {},
+        "ssl-server": {},
+        "ranger-site": {},
+        "admin-log4j": {},
+        "tagsync-log4j": {},
+        "ranger-hdfs-security": {},
         "ranger-solr-configuration": {},
         "usersync-properties": {},
         "zookeeper-env": {},
         "infra-solr-env": {},
         "infra-solr-client-log4j": {},
         "cluster-env": {}
-    }, 
-    "public_hostname": "c6401.ambari.apache.org", 
-    "commandId": "11-0", 
-    "hostname": "c6401.ambari.apache.org", 
-    "kerberosCommandParams": [], 
-    "serviceName": "RANGER", 
-    "role": "RANGER_ADMIN", 
-    "forceRefreshConfigTagsBeforeExecution": [], 
-    "requestId": 11, 
+    },
+    "public_hostname": "c6401.ambari.apache.org",
+    "commandId": "11-0",
+    "hostname": "c6401.ambari.apache.org",
+    "kerberosCommandParams": [],
+    "serviceName": "RANGER",
+    "role": "RANGER_ADMIN",
+    "forceRefreshConfigTagsBeforeExecution": [],
+    "requestId": 11,
     "agentConfigParams": {
         "agent": {
             "parallel_execution": 0
         }
-    }, 
-    "clusterName": "c1", 
-    "commandType": "EXECUTION_COMMAND", 
-    "taskId": 31, 
-    "roleParams": {}, 
+    },
+    "clusterName": "c1",
+    "commandType": "EXECUTION_COMMAND",
+    "taskId": 31,
+    "roleParams": {},
     "configurationTags": {
         "ranger-hdfs-audit": {
             "tag": "version1466705299922"
-        }, 
+        },
         "ssl-client": {
             "tag": "version1"
-        }, 
+        },
         "ranger-admin-site": {
             "tag": "version1466705299949"
-        }, 
+        },
         "ranger-hdfs-policymgr-ssl": {
             "tag": "version1466705299922"
-        }, 
+        },
         "tagsync-application-properties": {
             "tag": "version1466705299949"
-        }, 
+        },
         "ranger-env": {
             "tag": "version1466705299949"
-        }, 
+        },
         "usersync-log4j": {
             "tag": "version1466705299949"
-        }, 
+        },
         "admin-properties": {
             "tag": "version1466705299949"
-        }, 
+        },
         "ranger-ugsync-site": {
             "tag": "version1466705299949"
-        }, 
+        },
         "hdfs-site": {
             "tag": "version1"
-        }, 
+        },
         "ranger-tagsync-site": {
             "tag": "version1466705299949"
-        }, 
+        },
         "zoo.cfg": {
             "tag": "version1"
-        }, 
+        },
         "hadoop-policy": {
             "tag": "version1"
-        }, 
+        },
         "hdfs-log4j": {
             "tag": "version1"
-        }, 
+        },
         "ranger-hdfs-plugin-properties": {
             "tag": "version1466705299922"
-        }, 
+        },
         "core-site": {
             "tag": "version1"
-        }, 
+        },
         "hadoop-env": {
             "tag": "version1"
-        }, 
+        },
         "zookeeper-log4j": {
             "tag": "version1"
-        }, 
+        },
         "ssl-server": {
             "tag": "version1"
-        }, 
+        },
         "ranger-site": {
             "tag": "version1466705299949"
-        }, 
+        },
         "admin-log4j": {
             "tag": "version1466705299949"
-        }, 
+        },
         "tagsync-log4j": {
             "tag": "version1466705299949"
         },
@@ -146,7 +146,7 @@
         },
         "ranger-hdfs-security": {
             "tag": "version1466705299922"
-        }, 
+        },
         "usersync-properties": {
             "tag": "version1466705299949"
         },
@@ -165,116 +165,116 @@
         "cluster-env": {
             "tag": "version1"
         }
-    }, 
-    "roleCommand": "START", 
+    },
+    "roleCommand": "START",
     "hostLevelParams": {
-        "agent_stack_retry_on_unavailability": "false", 
-        "stack_name": "HDP", 
+        "agent_stack_retry_on_unavailability": "false",
+        "stack_name": "HDP",
         "package_version": "2_6_0_0_*",
         "custom_mysql_jdbc_name": "mysql-connector-java.jar",
         "previous_custom_mysql_jdbc_name": "mysql-connector-java-old.jar",
-        "host_sys_prepped": "false", 
-        "ambari_db_rca_username": "mapred", 
+        "host_sys_prepped": "false",
+        "ambari_db_rca_username": "mapred",
         "current_version": "2.6.0.0-801",
         "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar",
         "agent_stack_retry_count": "5",
         "stack_version": "2.6",
-        "jdk_name": "jdk-8u60-linux-x64.tar.gz", 
-        "ambari_db_rca_driver": "org.postgresql.Driver", 
-        "java_home": "/usr/jdk64/jdk1.7.0_45", 
-        "repository_version_id": "1", 
-        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
-        "not_managed_hdfs_path_list": "[\"/tmp\"]", 
-        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
-        "java_version": "8", 
+        "jdk_name": "jdk-8u60-linux-x64.tar.gz",
+        "ambari_db_rca_driver": "org.postgresql.Driver",
+        "java_home": "/usr/jdk64/jdk1.7.0_45",
+        "repository_version_id": "1",
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
+        "not_managed_hdfs_path_list": "[\"/tmp\"]",
+        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
+        "java_version": "8",
         "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.6.0.0-801\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.6\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.6.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.6.0.0-801\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]",
         "package_list": "[{\"name\":\"ranger_${stack_version}-admin\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-usersync\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-tagsync\",\"condition\":\"should_install_ranger_tagsync\",\"skipUpgrade\":false},{\"name\":\"ambari-logsearch-solr-client\",\"condition\":\"should_install_logsearch_solr_client\",\"skipUpgrade\":false}]",
-        "db_name": "ambari", 
-        "group_list": "[\"ranger\",\"hadoop\",\"users\"]", 
-        "agentCacheDir": "/var/lib/ambari-agent/cache", 
-        "ambari_db_rca_password": "mapred", 
-        "jce_name": "jce_policy-8.zip", 
-        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar", 
-        "db_driver_filename": "mysql-connector-java.jar", 
-        "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]", 
+        "db_name": "ambari",
+        "group_list": "[\"ranger\",\"hadoop\",\"users\"]",
+        "agentCacheDir": "/var/lib/ambari-agent/cache",
+        "ambari_db_rca_password": "mapred",
+        "jce_name": "jce_policy-8.zip",
+        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
+        "db_driver_filename": "mysql-connector-java.jar",
+        "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]",
         "clientsToUpdateConfigs": "[\"*\"]"
-    }, 
+    },
     "commandParams": {
-        "service_package_folder": "common-services/RANGER/0.4.0/package", 
-        "script": "scripts/ranger_admin.py", 
+        "service_package_folder": "common-services/RANGER/0.4.0/package",
+        "script": "scripts/ranger_admin.py",
         "hooks_folder": "HDP/2.0.6/hooks",
         "version": "2.6.0.0-801",
-        "max_duration_for_retries": "0", 
-        "command_retry_enabled": "false", 
-        "command_timeout": "600", 
+        "max_duration_for_retries": "0",
+        "command_retry_enabled": "false",
+        "command_timeout": "600",
         "script_type": "PYTHON"
-    }, 
-    "forceRefreshConfigTags": [], 
-    "stageId": 0, 
+    },
+    "forceRefreshConfigTags": [],
+    "stageId": 0,
     "clusterHostInfo": {
         "snamenode_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ambari_server_use_ssl": [
             "false"
-        ], 
+        ],
         "all_ping_ports": [
             "8670"
-        ], 
+        ],
         "ranger_tagsync_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ranger_usersync_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "all_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "slave_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "namenode_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ambari_server_port": [
             "8080"
-        ], 
+        ],
         "ranger_admin_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "all_racks": [
             "/default-rack"
-        ], 
+        ],
         "all_ipv4_ips": [
             "172.22.83.73"
-        ], 
+        ],
         "ambari_server_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "zookeeper_hosts": [
             "c6401.ambari.apache.org"
         ],
         "infra_solr_hosts": [
             "c6401.ambari.apache.org"
         ]
-    }, 
+    },
     "configurations": {
         "ranger-hdfs-audit": {
-            "xasecure.audit.destination.solr.zookeepers": "NONE", 
-            "xasecure.audit.destination.solr.urls": "", 
-            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool", 
-            "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool", 
+            "xasecure.audit.destination.solr.zookeepers": "NONE",
+            "xasecure.audit.destination.solr.urls": "",
+            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool",
+            "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool",
             "xasecure.audit.destination.hdfs": "true",
-            "xasecure.audit.destination.solr": "false", 
+            "xasecure.audit.destination.solr": "false",
             "xasecure.audit.provider.summary.enabled": "false",
             "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
             "xasecure.audit.is.enabled": "true"
         },
         "ranger-tagsync-policymgr-ssl": {
-            "xasecure.policymgr.clientssl.keystore": "/etc/security/serverKeys/ranger-tagsync-keystore.jks", 
-            "xasecure.policymgr.clientssl.truststore.password": "changeit", 
+            "xasecure.policymgr.clientssl.keystore": "/etc/security/serverKeys/ranger-tagsync-keystore.jks",
+            "xasecure.policymgr.clientssl.truststore.password": "changeit",
             "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{ranger_tagsync_credential_file}}",
-            "xasecure.policymgr.clientssl.truststore": "/etc/security/serverKeys/ranger-tagsync-mytruststore.jks", 
+            "xasecure.policymgr.clientssl.truststore": "/etc/security/serverKeys/ranger-tagsync-mytruststore.jks",
             "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{ranger_tagsync_credential_file}}",
             "xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
         },
@@ -287,143 +287,143 @@
             "xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
         },
         "ssl-client": {
-            "ssl.client.truststore.reload.interval": "10000", 
-            "ssl.client.keystore.password": "bigdata", 
-            "ssl.client.truststore.type": "jks", 
-            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks", 
-            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks", 
-            "ssl.client.truststore.password": "bigdata", 
+            "ssl.client.truststore.reload.interval": "10000",
+            "ssl.client.keystore.password": "bigdata",
+            "ssl.client.truststore.type": "jks",
+            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks",
+            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks",
+            "ssl.client.truststore.password": "bigdata",
             "ssl.client.keystore.type": "jks"
-        }, 
+        },
         "ranger-admin-site": {
             "ranger.admin.kerberos.cookie.domain": "",
-            "ranger.kms.service.user.hdfs": "hdfs", 
-            "ranger.spnego.kerberos.principal": "", 
-            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}", 
-            "ranger.plugins.hive.serviceuser": "hive", 
-            "ranger.lookup.kerberos.keytab": "", 
-            "ranger.plugins.kms.serviceuser": "kms", 
-            "ranger.service.https.attrib.ssl.enabled": "false", 
-            "ranger.sso.browser.useragent": "Mozilla,chrome", 
-            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01", 
-            "ranger.plugins.hbase.serviceuser": "hbase", 
-            "ranger.plugins.hdfs.serviceuser": "hdfs", 
-            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}", 
-            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net", 
-            "ranger.plugins.knox.serviceuser": "knox", 
-            "ranger.ldap.base.dn": "dc=example,dc=com", 
-            "ranger.sso.publicKey": "", 
-            "ranger.admin.kerberos.cookie.path": "/", 
-            "ranger.service.https.attrib.clientAuth": "want", 
-            "ranger.jpa.jdbc.user": "{{ranger_db_user}}", 
-            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})", 
-            "ranger.ldap.group.roleattribute": "cn", 
-            "ranger.plugins.kafka.serviceuser": "kafka", 
-            "ranger.admin.kerberos.principal": "", 
-            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
-            "ranger.credential.provider.path": "/etc/ranger/admin/rangeradmin.jceks", 
-            "ranger.ldap.referral": "ignore", 
-            "ranger.service.http.port": "6080", 
-            "ranger.ldap.user.searchfilter": "(uid={0})", 
-            "ranger.plugins.atlas.serviceuser": "atlas", 
+            "ranger.kms.service.user.hdfs": "hdfs",
+            "ranger.spnego.kerberos.principal": "",
+            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}",
+            "ranger.plugins.hive.serviceuser": "hive",
+            "ranger.lookup.kerberos.keytab": "",
+            "ranger.plugins.kms.serviceuser": "kms",
+            "ranger.service.https.attrib.ssl.enabled": "false",
+            "ranger.sso.browser.useragent": "Mozilla,chrome",
+            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01",
+            "ranger.plugins.hbase.serviceuser": "hbase",
+            "ranger.plugins.hdfs.serviceuser": "hdfs",
+            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}",
+            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net",
+            "ranger.plugins.knox.serviceuser": "knox",
+            "ranger.ldap.base.dn": "dc=example,dc=com",
+            "ranger.sso.publicKey": "",
+            "ranger.admin.kerberos.cookie.path": "/",
+            "ranger.service.https.attrib.clientAuth": "want",
+            "ranger.jpa.jdbc.user": "{{ranger_db_user}}",
+            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})",
+            "ranger.ldap.group.roleattribute": "cn",
+            "ranger.plugins.kafka.serviceuser": "kafka",
+            "ranger.admin.kerberos.principal": "",
+            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
+            "ranger.credential.provider.path": "/etc/ranger/admin/rangeradmin.jceks",
+            "ranger.ldap.referral": "ignore",
+            "ranger.service.http.port": "6080",
+            "ranger.ldap.user.searchfilter": "(uid={0})",
+            "ranger.plugins.atlas.serviceuser": "atlas",
             "ranger.truststore.password": "changeit",
             "ranger.truststore.alias": "trustStoreAlias",
-            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
-            "ranger.audit.solr.password": "NONE", 
+            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+            "ranger.audit.solr.password": "NONE",
             "ranger.audit.solr.zookeepers": "c6401.ambari.apache.org:2181/infra-solr",
-            "ranger.lookup.kerberos.principal": "", 
-            "ranger.service.https.port": "6182", 
-            "ranger.plugins.storm.serviceuser": "storm", 
-            "ranger.externalurl": "{{ranger_external_url}}", 
-            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
-            "ranger.kms.service.user.hive": "", 
-            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
-            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}", 
-            "ranger.service.host": "{{ranger_host}}", 
-            "ranger.service.https.attrib.keystore.keyalias": "rangeradmin", 
-            "ranger.service.https.attrib.keystore.pass": "xasecure", 
-            "ranger.unixauth.remote.login.enabled": "true", 
-            "ranger.jpa.jdbc.credential.alias": "rangeradmin", 
-            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
-            "ranger.audit.solr.username": "ranger_solr", 
-            "ranger.sso.enabled": "false", 
-            "ranger.audit.solr.urls": "", 
-            "ranger.ldap.ad.domain": "", 
-            "ranger.plugins.yarn.serviceuser": "yarn", 
-            "ranger.audit.source.type": "solr", 
-            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
-            "ranger.ldap.url": "{{ranger_ug_ldap_url}}", 
-            "ranger.authentication.method": "UNIX", 
-            "ranger.service.http.enabled": "true", 
-            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}", 
-            "ranger.ldap.ad.referral": "ignore", 
-            "ranger.ldap.ad.base.dn": "dc=example,dc=com", 
-            "ranger.jpa.jdbc.password": "_", 
-            "ranger.spnego.kerberos.keytab": "", 
-            "ranger.sso.providerurl": "", 
-            "ranger.unixauth.service.hostname": "{{ugsync_host}}", 
-            "ranger.admin.kerberos.keytab": "", 
-            "ranger.admin.kerberos.token.valid.seconds": "30", 
-            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver", 
+            "ranger.lookup.kerberos.principal": "",
+            "ranger.service.https.port": "6182",
+            "ranger.plugins.storm.serviceuser": "storm",
+            "ranger.externalurl": "{{ranger_external_url}}",
+            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+            "ranger.kms.service.user.hive": "",
+            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}",
+            "ranger.service.host": "{{ranger_host}}",
+            "ranger.service.https.attrib.keystore.keyalias": "rangeradmin",
+            "ranger.service.https.attrib.keystore.pass": "xasecure",
+            "ranger.unixauth.remote.login.enabled": "true",
+            "ranger.jpa.jdbc.credential.alias": "rangeradmin",
+            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+            "ranger.audit.solr.username": "ranger_solr",
+            "ranger.sso.enabled": "false",
+            "ranger.audit.solr.urls": "",
+            "ranger.ldap.ad.domain": "",
+            "ranger.plugins.yarn.serviceuser": "yarn",
+            "ranger.audit.source.type": "solr",
+            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
+            "ranger.ldap.url": "{{ranger_ug_ldap_url}}",
+            "ranger.authentication.method": "UNIX",
+            "ranger.service.http.enabled": "true",
+            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}",
+            "ranger.ldap.ad.referral": "ignore",
+            "ranger.ldap.ad.base.dn": "dc=example,dc=com",
+            "ranger.jpa.jdbc.password": "_",
+            "ranger.spnego.kerberos.keytab": "",
+            "ranger.sso.providerurl": "",
+            "ranger.unixauth.service.hostname": "{{ugsync_host}}",
+            "ranger.admin.kerberos.keytab": "",
+            "ranger.admin.kerberos.token.valid.seconds": "30",
+            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver",
             "ranger.unixauth.service.port": "5151",
             "ranger.service.https.attrib.keystore.credential.alias": "keyStoreCredentialAlias"
-        }, 
+        },
         "ranger-hdfs-policymgr-ssl": {
-            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks", 
-            "xasecure.policymgr.clientssl.truststore.password": "changeit", 
-            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}", 
-            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks", 
-            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}", 
+            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks",
+            "xasecure.policymgr.clientssl.truststore.password": "changeit",
+            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}",
+            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks",
+            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}",
             "xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
-        }, 
+        },
         "tagsync-application-properties": {
-            "atlas.kafka.entities.group.id": "ranger_entities_consumer", 
-            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181", 
+            "atlas.kafka.entities.group.id": "ranger_entities_consumer",
+            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181",
             "atlas.kafka.bootstrap.servers": "localhost:6667"
-        }, 
+        },
         "ranger-env": {
-            "ranger_solr_shards": "1", 
-            "ranger_solr_config_set": "ranger_audits", 
-            "ranger_user": "ranger", 
+            "ranger_solr_shards": "1",
+            "ranger_solr_config_set": "ranger_audits",
+            "ranger_user": "ranger",
             "ranger_solr_replication_factor": "1",
-            "xml_configurations_supported": "true", 
-            "ranger-atlas-plugin-enabled": "No", 
-            "ranger-hbase-plugin-enabled": "No", 
-            "ranger-yarn-plugin-enabled": "No", 
-            "bind_anonymous": "false", 
-            "ranger_admin_username": "amb_ranger_admin", 
-            "admin_password": "admin", 
-            "is_solrCloud_enabled": "true", 
-            "ranger-storm-plugin-enabled": "No", 
-            "ranger-hdfs-plugin-enabled": "No", 
-            "ranger_group": "ranger", 
-            "ranger-knox-plugin-enabled": "No", 
-            "ranger_admin_log_dir": "/var/log/ranger/admin", 
-            "ranger-kafka-plugin-enabled": "No", 
-            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306", 
-            "ranger-hive-plugin-enabled": "No", 
-            "xasecure.audit.destination.solr": "true", 
-            "ranger_pid_dir": "/var/run/ranger", 
-            "xasecure.audit.destination.hdfs": "true", 
-            "admin_username": "admin", 
-            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit", 
-            "create_db_dbuser": "true", 
-            "ranger_solr_collection_name": "ranger_audits", 
-            "ranger_admin_password": "P1!q9xa96SMi5NCl", 
+            "xml_configurations_supported": "true",
+            "ranger-atlas-plugin-enabled": "No",
+            "ranger-hbase-plugin-enabled": "No",
+            "ranger-yarn-plugin-enabled": "No",
+            "bind_anonymous": "false",
+            "ranger_admin_username": "amb_ranger_admin",
+            "admin_password": "admin",
+            "is_solrCloud_enabled": "true",
+            "ranger-storm-plugin-enabled": "No",
+            "ranger-hdfs-plugin-enabled": "No",
+            "ranger_group": "ranger",
+            "ranger-knox-plugin-enabled": "No",
+            "ranger_admin_log_dir": "/var/log/ranger/admin",
+            "ranger-kafka-plugin-enabled": "No",
+            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306",
+            "ranger-hive-plugin-enabled": "No",
+            "xasecure.audit.destination.solr": "true",
+            "ranger_pid_dir": "/var/run/ranger",
+            "xasecure.audit.destination.hdfs": "true",
+            "admin_username": "admin",
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
+            "create_db_dbuser": "true",
+            "ranger_solr_collection_name": "ranger_audits",
+            "ranger_admin_password": "P1!q9xa96SMi5NCl",
             "ranger_usersync_log_dir": "/var/log/ranger/usersync"
-        }, 
+        },
         "usersync-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/usersync.
 log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n"
-        }, 
+        },
         "admin-properties": {
-            "db_user": "rangeradmin01", 
-            "DB_FLAVOR": "MYSQL", 
-            "db_password": "rangeradmin01", 
-            "db_root_user": "root", 
-            "policymgr_external_url": "http://c6401.ambari.apache.org:6080", 
-            "db_name": "ranger01", 
-            "db_host": "c6401.ambari.apache.org", 
-            "db_root_password": "vagrant", 
+            "db_user": "rangeradmin01",
+            "DB_FLAVOR": "MYSQL",
+            "db_password": "rangeradmin01",
+            "db_root_user": "root",
+            "policymgr_external_url": "http://c6401.ambari.apache.org:6080",
+            "db_name": "ranger01",
+            "db_host": "c6401.ambari.apache.org",
+            "db_root_password": "vagrant",
             "SQL_CONNECTOR_JAR": "{{driver_curl_target}}"
         },
         "ranger-solr-configuration": {
@@ -432,248 +432,248 @@
             "ranger_audit_logs_merge_factor": "5"
         },
         "ranger-ugsync-site": {
-            "ranger.usersync.ldap.binddn": "", 
-            "ranger.usersync.policymgr.username": "rangerusersync", 
-            "ranger.usersync.policymanager.mockrun": "false", 
-            "ranger.usersync.group.searchbase": "", 
-            "ranger.usersync.ldap.bindalias": "testldapalias", 
-            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks", 
-            "ranger.usersync.port": "5151", 
-            "ranger.usersync.pagedresultssize": "500", 
-            "ranger.usersync.group.memberattributename": "", 
-            "ranger.usersync.kerberos.principal": "", 
-            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder", 
-            "ranger.usersync.ldap.referral": "ignore", 
-            "ranger.usersync.group.searchfilter": "", 
-            "ranger.usersync.ldap.user.objectclass": "person", 
-            "ranger.usersync.logdir": "{{usersync_log_dir}}", 
-            "ranger.usersync.ldap.user.searchfilter": "", 
-            "ranger.usersync.ldap.groupname.caseconversion": "none", 
-            "ranger.usersync.ldap.ldapbindpassword": "", 
-            "ranger.usersync.unix.minUserId": "500", 
-            "ranger.usersync.policymanager.maxrecordsperapicall": "1000", 
-            "ranger.usersync.group.nameattribute": "", 
-            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password", 
-            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks", 
-            "ranger.usersync.user.searchenabled": "false", 
-            "ranger.usersync.group.usermapsyncenabled": "true", 
-            "ranger.usersync.ldap.bindkeystore": "", 
-            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof", 
-            "ranger.usersync.kerberos.keytab": "", 
-            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe", 
-            "ranger.usersync.group.objectclass": "", 
-            "ranger.usersync.ldap.user.searchscope": "sub", 
-            "ranger.usersync.unix.password.file": "/etc/passwd", 
-            "ranger.usersync.ldap.user.nameattribute": "", 
-            "ranger.usersync.pagedresultsenabled": "true", 
-            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}", 
-            "ranger.usersync.group.search.first.enabled": "false", 
-            "ranger.usersync.group.searchenabled": "false", 
-            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder", 
-            "ranger.usersync.ssl": "true", 
-            "ranger.usersync.ldap.url": "", 
-            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org", 
-            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
-            "ranger.usersync.ldap.user.searchbase": "", 
-            "ranger.usersync.ldap.username.caseconversion": "none", 
-            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
-            "ranger.usersync.keystore.password": "UnIx529p", 
-            "ranger.usersync.unix.group.file": "/etc/group", 
-            "ranger.usersync.filesource.file": "/tmp/usergroup.txt", 
-            "ranger.usersync.group.searchscope": "", 
-            "ranger.usersync.truststore.password": "changeit", 
-            "ranger.usersync.enabled": "true", 
-            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000", 
+            "ranger.usersync.ldap.binddn": "",
+            "ranger.usersync.policymgr.username": "rangerusersync",
+            "ranger.usersync.policymanager.mockrun": "false",
+            "ranger.usersync.group.searchbase": "",
+            "ranger.usersync.ldap.bindalias": "testldapalias",
+            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks",
+            "ranger.usersync.port": "5151",
+            "ranger.usersync.pagedresultssize": "500",
+            "ranger.usersync.group.memberattributename": "",
+            "ranger.usersync.kerberos.principal": "",
+            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder",
+            "ranger.usersync.ldap.referral": "ignore",
+            "ranger.usersync.group.searchfilter": "",
+            "ranger.usersync.ldap.user.objectclass": "person",
+            "ranger.usersync.logdir": "{{usersync_log_dir}}",
+            "ranger.usersync.ldap.user.searchfilter": "",
+            "ranger.usersync.ldap.groupname.caseconversion": "none",
+            "ranger.usersync.ldap.ldapbindpassword": "",
+            "ranger.usersync.unix.minUserId": "500",
+            "ranger.usersync.policymanager.maxrecordsperapicall": "1000",
+            "ranger.usersync.group.nameattribute": "",
+            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password",
+            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks",
+            "ranger.usersync.user.searchenabled": "false",
+            "ranger.usersync.group.usermapsyncenabled": "true",
+            "ranger.usersync.ldap.bindkeystore": "",
+            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof",
+            "ranger.usersync.kerberos.keytab": "",
+            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe",
+            "ranger.usersync.group.objectclass": "",
+            "ranger.usersync.ldap.user.searchscope": "sub",
+            "ranger.usersync.unix.password.file": "/etc/passwd",
+            "ranger.usersync.ldap.user.nameattribute": "",
+            "ranger.usersync.pagedresultsenabled": "true",
+            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}",
+            "ranger.usersync.group.search.first.enabled": "false",
+            "ranger.usersync.group.searchenabled": "false",
+            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder",
+            "ranger.usersync.ssl": "true",
+            "ranger.usersync.ldap.url": "",
+            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org",
+            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+            "ranger.usersync.ldap.user.searchbase": "",
+            "ranger.usersync.ldap.username.caseconversion": "none",
+            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+            "ranger.usersync.keystore.password": "UnIx529p",
+            "ranger.usersync.unix.group.file": "/etc/group",
+            "ranger.usersync.filesource.file": "/tmp/usergroup.txt",
+            "ranger.usersync.group.searchscope": "",
+            "ranger.usersync.truststore.password": "changeit",
+            "ranger.usersync.enabled": "true",
+            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000",
             "ranger.usersync.filesource.text.delimiter": ","
-        }, 
+        },
         "hdfs-site": {
-            "dfs.namenode.checkpoint.period": "21600", 
-            "dfs.namenode.avoid.write.stale.datanode": "true", 
-            "dfs.namenode.startup.delay.block.deletion.sec": "3600", 
-            "dfs.namenode.checkpoint.txns": "1000000", 
-            "dfs.content-summary.limit": "5000", 
-            "dfs.support.append": "true", 
-            "dfs.datanode.address": "0.0.0.0:50010", 
-            "dfs.cluster.administrators": " hdfs", 
-            "dfs.namenode.audit.log.async": "true", 
-            "dfs.datanode.balance.bandwidthPerSec": "6250000", 
-            "dfs.namenode.safemode.threshold-pct": "1", 
-            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", 
-            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020", 
-            "dfs.permissions.enabled": "true", 
-            "dfs.client.read.shortcircuit": "true", 
-            "dfs.https.port": "50470", 
-            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470", 
-            "nfs.file.dump.dir": "/tmp/.hdfs-nfs", 
-            "dfs.blocksize": "134217728", 
-            "dfs.blockreport.initialDelay": "120", 
-            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode", 
-            "dfs.namenode.fslock.fair": "false", 
-            "dfs.datanode.max.transfer.threads": "4096", 
-            "dfs.heartbeat.interval": "3", 
-            "dfs.replication": "3", 
-            "dfs.namenode.handler.count": "50", 
-            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary", 
-            "fs.permissions.umask-mode": "022", 
-            "dfs.namenode.stale.datanode.interval": "30000", 
-            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
-            "dfs.datanode.failed.volumes.tolerated": "0", 
-            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data", 
-            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070", 
-            "dfs.webhdfs.enabled": "true", 
-            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding", 
-            "dfs.namenode.accesstime.precision": "0", 
-            "dfs.datanode.https.address": "0.0.0.0:50475", 
-            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
-            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090", 
-            "nfs.exports.allowed.hosts": "* rw", 
-            "dfs.datanode.http.address": "0.0.0.0:50075", 
-            "dfs.datanode.du.reserved": "33011188224", 
-            "dfs.client.read.shortcircuit.streams.cache.size": "4096", 
-            "dfs.http.policy": "HTTP_ONLY", 
-            "dfs.block.access.token.enable": "true", 
-            "dfs.client.retry.policy.enabled": "false", 
-            "dfs.namenode.name.dir.restore": "true", 
-            "dfs.permissions.superusergroup": "hdfs", 
-            "dfs.journalnode.https-address": "0.0.0.0:8481", 
-            "dfs.journalnode.http-address": "0.0.0.0:8480", 
-            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
-            "dfs.namenode.avoid.read.stale.datanode": "true", 
-            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
-            "dfs.datanode.data.dir.perm": "750", 
-            "dfs.encryption.key.provider.uri": "", 
-            "dfs.replication.max": "50", 
+            "dfs.namenode.checkpoint.period": "21600",
+            "dfs.namenode.avoid.write.stale.datanode": "true",
+            "dfs.namenode.startup.delay.block.deletion.sec": "3600",
+            "dfs.namenode.checkpoint.txns": "1000000",
+            "dfs.content-summary.limit": "5000",
+            "dfs.support.append": "true",
+            "dfs.datanode.address": "0.0.0.0:50010",
+            "dfs.cluster.administrators": " hdfs",
+            "dfs.namenode.audit.log.async": "true",
+            "dfs.datanode.balance.bandwidthPerSec": "6250000",
+            "dfs.namenode.safemode.threshold-pct": "1",
+            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
+            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020",
+            "dfs.permissions.enabled": "true",
+            "dfs.client.read.shortcircuit": "true",
+            "dfs.https.port": "50470",
+            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470",
+            "nfs.file.dump.dir": "/tmp/.hdfs-nfs",
+            "dfs.blocksize": "134217728",
+            "dfs.blockreport.initialDelay": "120",
+            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode",
+            "dfs.namenode.fslock.fair": "false",
+            "dfs.datanode.max.transfer.threads": "4096",
+            "dfs.heartbeat.interval": "3",
+            "dfs.replication": "3",
+            "dfs.namenode.handler.count": "50",
+            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary",
+            "fs.permissions.umask-mode": "022",
+            "dfs.namenode.stale.datanode.interval": "30000",
+            "dfs.datanode.ipc.address": "0.0.0.0:8010",
+            "dfs.datanode.failed.volumes.tolerated": "0",
+            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data",
+            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070",
+            "dfs.webhdfs.enabled": "true",
+            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding",
+            "dfs.namenode.accesstime.precision": "0",
+            "dfs.datanode.https.address": "0.0.0.0:50475",
+            "dfs.namenode.write.stale.datanode.ratio": "1.0f",
+            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090",
+            "nfs.exports.allowed.hosts": "* rw",
+            "dfs.datanode.http.address": "0.0.0.0:50075",
+            "dfs.datanode.du.reserved": "33011188224",
+            "dfs.client.read.shortcircuit.streams.cache.size": "4096",
+            "dfs.http.policy": "HTTP_ONLY",
+            "dfs.block.access.token.enable": "true",
+            "dfs.client.retry.policy.enabled": "false",
+            "dfs.namenode.name.dir.restore": "true",
+            "dfs.permissions.superusergroup": "hdfs",
+            "dfs.journalnode.https-address": "0.0.0.0:8481",
+            "dfs.journalnode.http-address": "0.0.0.0:8480",
+            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
+            "dfs.namenode.avoid.read.stale.datanode": "true",
+            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
+            "dfs.datanode.data.dir.perm": "750",
+            "dfs.encryption.key.provider.uri": "",
+            "dfs.replication.max": "50",
             "dfs.namenode.name.dir": "/grid/0/hadoop/hdfs/namenode"
-        }, 
+        },
         "ranger-tagsync-site": {
             "ranger.tagsync.dest.ranger.ssl.config.filename": "{{stack_root}}/current/ranger-tagsync/conf/ranger-policymgr-ssl.xml",
             "ranger.tagsync.source.atlasrest.username": "",
-            "ranger.tagsync.logdir": "/var/log/ranger/tagsync", 
-            "ranger.tagsync.source.atlasrest.download.interval.millis": "", 
-            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks", 
-            "ranger.tagsync.source.file.check.interval.millis": "", 
-            "ranger.tagsync.source.atlasrest.endpoint": "", 
-            "ranger.tagsync.dest.ranger.username": "rangertagsync", 
-            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}", 
-            "ranger.tagsync.kerberos.principal": "", 
-            "ranger.tagsync.kerberos.keytab": "", 
-            "ranger.tagsync.source.atlas": "false", 
-            "ranger.tagsync.source.atlasrest": "false", 
-            "ranger.tagsync.source.file": "false", 
+            "ranger.tagsync.logdir": "/var/log/ranger/tagsync",
+            "ranger.tagsync.source.atlasrest.download.interval.millis": "",
+            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks",
+            "ranger.tagsync.source.file.check.interval.millis": "",
+            "ranger.tagsync.source.atlasrest.endpoint": "",
+            "ranger.tagsync.dest.ranger.username": "rangertagsync",
+            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}",
+            "ranger.tagsync.kerberos.principal": "",
+            "ranger.tagsync.kerberos.keytab": "",
+            "ranger.tagsync.source.atlas": "false",
+            "ranger.tagsync.source.atlasrest": "false",
+            "ranger.tagsync.source.file": "false",
             "ranger.tagsync.source.file.filename": ""
-        }, 
+        },
         "zoo.cfg": {
-            "clientPort": "2181", 
-            "autopurge.purgeInterval": "24", 
-            "syncLimit": "5", 
-            "dataDir": "/grid/0/hadoop/zookeeper", 
-            "initLimit": "10", 
-            "tickTime": "2000", 
+            "clientPort": "2181",
+            "autopurge.purgeInterval": "24",
+            "syncLimit": "5",
+            "dataDir": "/grid/0/hadoop/zookeeper",
+            "initLimit": "10",
+            "tickTime": "2000",
             "autopurge.snapRetainCount": "30"
-        }, 
+        },
         "hadoop-policy": {
-            "security.job.client.protocol.acl": "*", 
-            "security.job.task.protocol.acl": "*", 
-            "security.datanode.protocol.acl": "*", 
-            "security.namenode.protocol.acl": "*", 
-            "security.client.datanode.protocol.acl": "*", 
-            "security.inter.tracker.protocol.acl": "*", 
-            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop", 
-            "security.client.protocol.acl": "*", 
-            "security.refresh.policy.protocol.acl": "hadoop", 
-            "security.admin.operations.protocol.acl": "hadoop", 
+            "security.job.client.protocol.acl": "*",
+            "security.job.task.protocol.acl": "*",
+            "security.datanode.protocol.acl": "*",
+            "security.namenode.protocol.acl": "*",
+            "security.client.datanode.protocol.acl": "*",
+            "security.inter.tracker.protocol.acl": "*",
+            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
+            "security.client.protocol.acl": "*",
+            "security.refresh.policy.protocol.acl": "hadoop",
+            "security.admin.operations.protocol.acl": "hadoop",
             "security.inter.datanode.protocol.acl": "*"
-        }, 
+        },
         "hdfs-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.lo
 gger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender
 .console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logg
 er}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=
 false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# NameNode metrics logging.\n# The default is to retain two namenode-metrics.log files up to 64MB each.\n#\nnamenode.metrics.logger=INFO,NullAppender\nlog4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}\nlog4j.additivity.NameNodeMetricsLog=false\nlog4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log\nlog4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n\nlog4j.appender.NNMETRICSRFA.MaxBackupIndex=1\nlog4j.appender.NNMETRICSRFA.MaxFileSize=64MB\n\n#\n# mapred audit logging\n#\nmapred
 .audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging leve
 ls\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateCh
 ange=WARN"
-        }, 
+        },
         "ranger-hdfs-plugin-properties": {
-            "hadoop.rpc.protection": "authentication", 
-            "ranger-hdfs-plugin-enabled": "No", 
-            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
-            "policy_user": "ambari-qa", 
-            "common.name.for.certificate": "", 
+            "hadoop.rpc.protection": "authentication",
+            "ranger-hdfs-plugin-enabled": "No",
+            "REPOSITORY_CONFIG_USERNAME": "hadoop",
+            "policy_user": "ambari-qa",
+            "common.name.for.certificate": "",
             "REPOSITORY_CONFIG_PASSWORD": "hadoop"
-        }, 
+        },
         "core-site": {
-            "hadoop.proxyuser.root.hosts": "c6401.ambari.apache.org", 
-            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", 
-            "fs.trash.interval": "360", 
-            "ha.failover-controller.active-standby-elector.zk.op.retries": "120", 
-            "hadoop.http.authentication.simple.anonymous.allowed": "true", 
-            "hadoop.security.authentication": "simple", 
-            "hadoop.proxyuser.root.groups": "*", 
-            "ipc.client.connection.maxidletime": "30000", 
-            "hadoop.security.key.provider.path": "", 
-            "mapreduce.jobtracker.webinterface.trusted": "false", 
-            "hadoop.security.authorization": "false", 
-            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py", 
-            "ipc.server.tcpnodelay": "true", 
-            "ipc.client.connect.max.retries": "50", 
-            "hadoop.security.auth_to_local": "DEFAULT", 
-            "io.file.buffer.size": "131072", 
-            "hadoop.proxyuser.hdfs.hosts": "*", 
-            "hadoop.proxyuser.hdfs.groups": "*", 
-            "ipc.client.idlethreshold": "8000", 
-            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020", 
+            "hadoop.proxyuser.root.hosts": "c6401.ambari.apache.org",
+            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
+            "fs.trash.interval": "360",
+            "ha.failover-controller.active-standby-elector.zk.op.retries": "120",
+            "hadoop.http.authentication.simple.anonymous.allowed": "true",
+            "hadoop.security.authentication": "simple",
+            "hadoop.proxyuser.root.groups": "*",
+            "ipc.client.connection.maxidletime": "30000",
+            "hadoop.security.key.provider.path": "",
+            "mapreduce.jobtracker.webinterface.trusted": "false",
+            "hadoop.security.authorization": "false",
+            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py",
+            "ipc.server.tcpnodelay": "true",
+            "ipc.client.connect.max.retries": "50",
+            "hadoop.security.auth_to_local": "DEFAULT",
+            "io.file.buffer.size": "131072",
+            "hadoop.proxyuser.hdfs.hosts": "*",
+            "hadoop.proxyuser.hdfs.groups": "*",
+            "ipc.client.idlethreshold": "8000",
+            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020",
             "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec"
-        }, 
+        },
         "hadoop-env": {
-            "keyserver_port": "", 
-            "proxyuser_group": "users", 
-            "hdfs_user_nproc_limit": "65536", 
-            "hdfs_log_dir_prefix": "/var/log/hadoop", 
-            "hdfs_user_nofile_limit": "128000", 
-            "hdfs_user": "hdfs", 
-            "keyserver_host": " ", 
-            "namenode_opt_maxnewsize": "128m", 
-            "namenode_opt_maxpermsize": "256m", 
-            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
 {{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
 node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
 NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
 N_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
 ing priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n  for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n  do\n    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n  done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
 ECURE_DN_USER\" ]; then\n  ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}", 
-            "namenode_heapsize": "1024m", 
-            "namenode_opt_newsize": "128m", 
-            "nfsgateway_heapsize": "1024", 
-            "dtnode_heapsize": "1024m", 
-            "hadoop_root_logger": "INFO,RFA", 
-            "hadoop_heapsize": "1024", 
-            "hadoop_pid_dir_prefix": "/var/run/hadoop", 
-            "namenode_opt_permsize": "128m", 
+            "keyserver_port": "",
+            "proxyuser_group": "users",
+            "hdfs_user_nproc_limit": "65536",
+            "hdfs_log_dir_prefix": "/var/log/hadoop",
+            "hdfs_user_nofile_limit": "128000",
+            "hdfs_user": "hdfs",
+            "keyserver_host": " ",
+            "namenode_opt_maxnewsize": "128m",
+            "namenode_opt_maxpermsize": "256m",
+            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
 {{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
 node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
 NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
 N_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
 ing priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n  for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n  do\n    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n  done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
 ECURE_DN_USER\" ]; then\n  ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}",
+            "namenode_heapsize": "1024m",
+            "namenode_opt_newsize": "128m",
+            "nfsgateway_heapsize": "1024",
+            "dtnode_heapsize": "1024m",
+            "hadoop_root_logger": "INFO,RFA",
+            "hadoop_heapsize": "1024",
+            "hadoop_pid_dir_prefix": "/var/run/hadoop",
+            "namenode_opt_permsize": "128m",
             "hdfs_tmp_dir": "/tmp"
-        }, 
+        },
         "zookeeper-log4j": {
             "content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: console appender only\nlog4j.rootLogger=INFO, CONSOLE\n\n# Example with rolling log file\n#log4
 j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE\n\n# Example with rolling log file and tracing\n#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE\n\n#\n# Log INFO level and above messages to the console\n#\nlog4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\nlog4j.appender.CONSOLE.Threshold=INFO\nlog4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n#\n# Add ROLLINGFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender\nlog4j.appender.ROLLINGFILE.Threshold=DEBUG\nlog4j.appender.ROLLINGFILE.File=zookeeper.log\n\n# Max log file size of 10MB\nlog4j.appender.ROLLINGFILE.MaxFileSize=10MB\n# uncomment the next line to limit number of backup files\n#log4j.appender.ROLLINGFILE.MaxBackupIndex=10\n\nlog4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.ROLLIN
 GFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n\n#\n# Add TRACEFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.TRACEFILE=org.apache.log4j.FileAppender\nlog4j.appender.TRACEFILE.Threshold=TRACE\nlog4j.appender.TRACEFILE.File=zookeeper_trace.log\n\nlog4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout\n### Notice we are including log4j's NDC here (%x)\nlog4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n"
-        }, 
+        },
         "ssl-server": {
-            "ssl.server.keystore.location": "/etc/security/serverKeys/keystore.jks", 
-            "ssl.server.keystore.keypassword": "bigdata", 
-            "ssl.server.truststore.location": "/etc/security/serverKeys/all.jks", 
-            "ssl.server.keystore.password": "bigdata", 
-            "ssl.server.truststore.password": "bigdata", 
-            "ssl.server.truststore.type": "jks", 
-            "ssl.server.keystore.type": "jks", 
+            "ssl.server.keystore.location": "/etc/security/serverKeys/keystore.jks",
+            "ssl.server.keystore.keypassword": "bigdata",
+            "ssl.server.truststore.location": "/etc/security/serverKeys/all.jks",
+            "ssl.server.keystore.password": "bigdata",
+            "ssl.server.truststore.password": "bigdata",
+            "ssl.server.truststore.type": "jks",
+            "ssl.server.keystore.type": "jks",
             "ssl.server.truststore.reload.interval": "10000"
-        }, 
-        "ranger-site": {}, 
+        },
+        "ranger-site": {},
         "admin-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nlog4j.rootLogger = warn,xa_log_appender\n\n\n# xa_logger\nlog4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.xa_log_app
 ender.file=${logdir}/xa_portal.log\nlog4j.appender.xa_log_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.xa_log_appender.append=true\nlog4j.appender.xa_log_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.xa_log_appender.layout.ConversionPattern=%d [%t] %-5p %C{6} (%F:%L) - %m%n\n# xa_log_appender : category and additivity\nlog4j.category.org.springframework=warn,xa_log_appender\nlog4j.additivity.org.springframework=false\n\nlog4j.category.org.apache.ranger=info,xa_log_appender\nlog4j.additivity.org.apache.ranger=false\n\nlog4j.category.xa=info,xa_log_appender\nlog4j.additivity.xa=false\n\n# perf_logger\nlog4j.appender.perf_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.perf_appender.file=${logdir}/ranger_admin_perf.log\nlog4j.appender.perf_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.perf_appender.append=true\nlog4j.appender.perf_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.perf_appender.layout.ConversionPattern=%d [%t] 
 %m%n\n\n\n# sql_appender\nlog4j.appender.sql_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.sql_appender.file=${logdir}/xa_portal_sql.log\nlog4j.appender.sql_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.sql_appender.append=true\nlog4j.appender.sql_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.sql_appender.layout.ConversionPattern=%d [%t] %-5p %C{6} (%F:%L) - %m%n\n\n# sql_appender : category and additivity\nlog4j.category.org.hibernate.SQL=warn,sql_appender\nlog4j.additivity.org.hibernate.SQL=false\n\nlog4j.category.jdbc.sqlonly=fatal,sql_appender\nlog4j.additivity.jdbc.sqlonly=false\n\nlog4j.category.jdbc.sqltiming=warn,sql_appender\nlog4j.additivity.jdbc.sqltiming=false\n\nlog4j.category.jdbc.audit=fatal,sql_appender\nlog4j.additivity.jdbc.audit=false\n\nlog4j.category.jdbc.resultset=fatal,sql_appender\nlog4j.additivity.jdbc.resultset=false\n\nlog4j.category.jdbc.connection=fatal,sql_appender\nlog4j.additivity.jdbc.connection=false"
-        }, 
+        },
         "tagsync-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/tagsync
 .log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n"
-        }, 
+        },
         "ranger-hdfs-security": {
-            "ranger.plugin.hdfs.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient", 
-            "ranger.plugin.hdfs.service.name": "{{repo_name}}", 
-            "ranger.plugin.hdfs.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache", 
-            "ranger.plugin.hdfs.policy.pollIntervalMs": "30000", 
-            "ranger.plugin.hdfs.policy.rest.url": "{{policymgr_mgr_url}}", 
-            "ranger.plugin.hdfs.policy.rest.ssl.config.file": "/etc/hadoop/conf/ranger-policymgr-ssl.xml", 
+            "ranger.plugin.hdfs.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient",
+            "ranger.plugin.hdfs.service.name": "{{repo_name}}",
+            "ranger.plugin.hdfs.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache",
+            "ranger.plugin.hdfs.policy.pollIntervalMs": "30000",
+            "ranger.plugin.hdfs.policy.rest.url": "{{policymgr_mgr_url}}",
+            "ranger.plugin.hdfs.policy.rest.ssl.config.file": "/etc/hadoop/conf/ranger-policymgr-ssl.xml",
             "xasecure.add-hadoop-authorization": "true"
-        }, 
-        "usersync-properties": {}, 
+        },
+        "usersync-properties": {},
         "zookeeper-env": {
-            "zk_log_dir": "/var/log/zookeeper", 
-            "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}", 
-            "zk_server_heapsize": "1024m", 
-            "zk_pid_dir": "/var/run/zookeeper", 
+            "zk_log_dir": "/var/log/zookeeper",
+            "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}",
+            "zk_server_heapsize": "1024m",
+            "zk_pid_dir": "/var/run/zookeeper",
             "zk_user": "zookeeper"
         },
         "infra-solr-env": {
@@ -682,7 +682,7 @@
             "infra_solr_kerberos_name_rules": "DEFAULT",
             "infra_solr_user": "infra-solr",
             "infra_solr_maxmem": "1024",
-            "content": "#!/bin/bash\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# By default the script will use JAVA_HOME to determine which java\n# to use, but you can set a specific path for Solr to use without\n# affecting other Java applica
 tions on your server/workstation.\nSOLR_JAVA_HOME

<TRUNCATED>

[24/36] ambari git commit: AMBARI-21427. Assigning hosts concurrently to same config group may fail with "org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException: Config group already exist". (stoader)

Posted by lp...@apache.org.
AMBARI-21427. Assigning hosts concurrently to same config group may fail with "org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException: Config group already exist". (stoader)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/639f4523
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/639f4523
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/639f4523

Branch: refs/heads/feature-branch-AMBARI-21307
Commit: 639f4523fdf49c8e0dddf79074cdb7eb4e43940c
Parents: 70cf77e
Author: Vitaly Brodetskyi <vb...@hortonworks.com>
Authored: Tue Jul 11 00:55:59 2017 +0300
Committer: Vitaly Brodetskyi <vb...@hortonworks.com>
Committed: Tue Jul 11 00:55:59 2017 +0300

----------------------------------------------------------------------
 .../ambari/server/topology/AmbariContext.java   | 81 +++++++++++++++-----
 1 file changed, 62 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/639f4523/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
index 106d7c8..dee0e6c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -30,6 +30,7 @@ import java.util.SortedSet;
 import java.util.TreeSet;
 import java.util.concurrent.Callable;
 import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.Lock;
 
 import javax.annotation.Nullable;
 import javax.inject.Inject;
@@ -69,9 +70,11 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.ConfigFactory;
+import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.SecurityType;
+import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.configgroup.ConfigGroup;
 import org.apache.ambari.server.utils.RetryHelper;
 import org.slf4j.Logger;
@@ -79,6 +82,8 @@ import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.Striped;
+import com.google.inject.Provider;
 
 
 /**
@@ -99,6 +104,12 @@ public class AmbariContext {
   @Inject
   ConfigFactory configFactory;
 
+  /**
+   * Used for getting configuration property values from stack and services.
+   */
+  @Inject
+  private Provider<ConfigHelper> configHelper;
+
   private static AmbariManagementController controller;
   private static ClusterController clusterController;
   //todo: task id's.  Use existing mechanism for getting next task id sequence
@@ -112,6 +123,16 @@ public class AmbariContext {
 
   private final static Logger LOG = LoggerFactory.getLogger(AmbariContext.class);
 
+
+  /**
+   * When config groups are created using Blueprints these are created when
+   * hosts join a hostgroup and are added to the corresponding config group.
+   * Since hosts join in parallel there might be a race condition in creating
+   * the config group a host is to be added to. Thus we need to synchronize
+   * the creation of config groups with the same name.
+   */
+  private Striped<Lock> configGroupCreateLock = Striped.lazyWeakLock(1);
+
   public boolean isClusterKerberosEnabled(long clusterId) {
     Cluster cluster;
     try {
@@ -167,9 +188,10 @@ public class AmbariContext {
 
   public void createAmbariResources(ClusterTopology topology, String clusterName, SecurityType securityType, String repoVersion) {
     Stack stack = topology.getBlueprint().getStack();
+    StackId stackId = new StackId(stack.getName(), stack.getVersion());
 
     createAmbariClusterResource(clusterName, stack.getName(), stack.getVersion(), securityType, repoVersion);
-    createAmbariServiceAndComponentResources(topology, clusterName);
+    createAmbariServiceAndComponentResources(topology, clusterName, stackId, repoVersion);
   }
 
   public void createAmbariClusterResource(String clusterName, String stackName, String stackVersion, SecurityType securityType, String repoVersion) {
@@ -196,7 +218,8 @@ public class AmbariContext {
     }
   }
 
-  public void createAmbariServiceAndComponentResources(ClusterTopology topology, String clusterName) {
+  public void createAmbariServiceAndComponentResources(ClusterTopology topology, String clusterName,
+      StackId stackId, String repositoryVersion) {
     Collection<String> services = topology.getBlueprint().getServices();
 
     try {
@@ -205,11 +228,13 @@ public class AmbariContext {
     } catch (AmbariException e) {
       throw new RuntimeException("Failed to persist service and component resources: " + e, e);
     }
-    Set<ServiceRequest> serviceRequests = new HashSet<ServiceRequest>();
-    Set<ServiceComponentRequest> componentRequests = new HashSet<ServiceComponentRequest>();
+    Set<ServiceRequest> serviceRequests = new HashSet<>();
+    Set<ServiceComponentRequest> componentRequests = new HashSet<>();
     for (String service : services) {
       String credentialStoreEnabled = topology.getBlueprint().getCredentialStoreEnabled(service);
-      serviceRequests.add(new ServiceRequest(clusterName, service, null, credentialStoreEnabled));
+      serviceRequests.add(new ServiceRequest(clusterName, service, stackId.getStackId(),
+          repositoryVersion, null, credentialStoreEnabled));
+
       for (String component : topology.getBlueprint().getComponents(service)) {
         String recoveryEnabled = topology.getBlueprint().getRecoveryEnabled(service, component);
         componentRequests.add(new ServiceComponentRequest(clusterName, service, component, null, recoveryEnabled));
@@ -223,14 +248,14 @@ public class AmbariContext {
     }
     // set all services state to INSTALLED->STARTED
     // this is required so the user can start failed services at the service level
-    Map<String, Object> installProps = new HashMap<String, Object>();
+    Map<String, Object> installProps = new HashMap<>();
     installProps.put(ServiceResourceProvider.SERVICE_SERVICE_STATE_PROPERTY_ID, "INSTALLED");
     installProps.put(ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
-    Map<String, Object> startProps = new HashMap<String, Object>();
+    Map<String, Object> startProps = new HashMap<>();
     startProps.put(ServiceResourceProvider.SERVICE_SERVICE_STATE_PROPERTY_ID, "STARTED");
     startProps.put(ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
-    Predicate predicate = new EqualsPredicate<String>(
-        ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
+    Predicate predicate = new EqualsPredicate<>(
+      ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
     try {
       getServiceResourceProvider().updateResources(
           new RequestImpl(null, Collections.singleton(installProps), null, null), predicate);
@@ -262,9 +287,9 @@ public class AmbariContext {
     }
     String clusterName = cluster.getClusterName();
 
-    Map<String, Object> properties = new HashMap<String, Object>();
+    Map<String, Object> properties = new HashMap<>();
     properties.put(HostResourceProvider.HOST_CLUSTER_NAME_PROPERTY_ID, clusterName);
-    properties.put(HostResourceProvider.HOST_NAME_PROPERTY_ID, hostName);
+    properties.put(HostResourceProvider.HOST_HOST_NAME_PROPERTY_ID, hostName);
     properties.put(HostResourceProvider.HOST_RACK_INFO_PROPERTY_ID, host.getRackInfo());
 
     try {
@@ -275,7 +300,7 @@ public class AmbariContext {
           hostName, e.toString()), e);
     }
 
-    final Set<ServiceComponentHostRequest> requests = new HashSet<ServiceComponentHostRequest>();
+    final Set<ServiceComponentHostRequest> requests = new HashSet<>();
 
     for (Map.Entry<String, Collection<String>> entry : components.entrySet()) {
       String service = entry.getKey();
@@ -328,11 +353,17 @@ public class AmbariContext {
   }
 
   public void registerHostWithConfigGroup(final String hostName, final ClusterTopology topology, final String groupName) {
+    String qualifiedGroupName = getConfigurationGroupName(topology.getBlueprint().getName(), groupName);
+
+    Lock configGroupLock = configGroupCreateLock.get(qualifiedGroupName);
+
     try {
+      configGroupLock.lock();
+
       boolean hostAdded = RetryHelper.executeWithRetry(new Callable<Boolean>() {
         @Override
         public Boolean call() throws Exception {
-          return addHostToExistingConfigGroups(hostName, topology, groupName);
+          return addHostToExistingConfigGroups(hostName, topology, qualifiedGroupName);
         }
       });
       if (!hostAdded) {
@@ -342,6 +373,9 @@ public class AmbariContext {
       LOG.error("Unable to register config group for host: ", e);
       throw new RuntimeException("Unable to register config group for host: " + hostName);
     }
+    finally {
+      configGroupLock.unlock();
+    }
   }
 
   public RequestStatusResponse installHost(String hostName, String clusterName, Collection<String> skipInstallForComponents, Collection<String> dontSkipInstallForComponents, boolean skipFailure) {
@@ -549,7 +583,7 @@ public class AmbariContext {
   /**
    * Add the new host to an existing config group.
    */
-  private boolean addHostToExistingConfigGroups(String hostName, ClusterTopology topology, String groupName) {
+  private boolean addHostToExistingConfigGroups(String hostName, ClusterTopology topology, String configGroupName) {
     boolean addedHost = false;
     Clusters clusters;
     Cluster cluster;
@@ -563,9 +597,8 @@ public class AmbariContext {
     // I don't know of a method to get config group by name
     //todo: add a method to get config group by name
     Map<Long, ConfigGroup> configGroups = cluster.getConfigGroups();
-    String qualifiedGroupName = getConfigurationGroupName(topology.getBlueprint().getName(), groupName);
     for (ConfigGroup group : configGroups.values()) {
-      if (group.getName().equals(qualifiedGroupName)) {
+      if (group.getName().equals(configGroupName)) {
         try {
           Host host = clusters.getHost(hostName);
           addedHost = true;
@@ -589,7 +622,7 @@ public class AmbariContext {
    * and the hosts associated with the host group are assigned to the config group.
    */
   private void createConfigGroupsAndRegisterHost(ClusterTopology topology, String groupName) throws AmbariException {
-    Map<String, Map<String, Config>> groupConfigs = new HashMap<String, Map<String, Config>>();
+    Map<String, Map<String, Config>> groupConfigs = new HashMap<>();
     Stack stack = topology.getBlueprint().getStack();
 
     // get the host-group config with cluster creation template overrides
@@ -608,7 +641,7 @@ public class AmbariContext {
       //todo: attributes
       Map<String, Config> serviceConfigs = groupConfigs.get(service);
       if (serviceConfigs == null) {
-        serviceConfigs = new HashMap<String, Config>();
+        serviceConfigs = new HashMap<>();
         groupConfigs.put(service, serviceConfigs);
       }
       serviceConfigs.put(type, config);
@@ -669,6 +702,16 @@ public class AmbariContext {
     return String.format("%s:%s", bpName, hostGroupName);
   }
 
+  /**
+   * Gets an instance of {@link ConfigHelper} for classes which are not
+   * dependency injected.
+   *
+   * @return a {@link ConfigHelper} instance.
+   */
+  public ConfigHelper getConfigHelper() {
+    return configHelper.get();
+  }
+
   private synchronized HostResourceProvider getHostResourceProvider() {
     if (hostResourceProvider == null) {
       hostResourceProvider = (HostResourceProvider)


[18/36] ambari git commit: AMBARI-21430. Allow Multiple Versions of Stack Tools to Co-Exist - fix illegal import

Posted by lp...@apache.org.
AMBARI-21430. Allow Multiple Versions of Stack Tools to Co-Exist - fix illegal import


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d0f7a515
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d0f7a515
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d0f7a515

Branch: refs/heads/feature-branch-AMBARI-21307
Commit: d0f7a51537469740e5397486b1e2c19862c26c01
Parents: f33a250
Author: Attila Doroszlai <ad...@hortonworks.com>
Authored: Sun Jul 9 12:15:28 2017 +0200
Committer: Attila Doroszlai <ad...@hortonworks.com>
Committed: Sun Jul 9 12:16:54 2017 +0200

----------------------------------------------------------------------
 .../java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/d0f7a515/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
index fa3aea3..0656f68 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
@@ -29,7 +29,7 @@ import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.PropertyInfo;
-import org.apache.hadoop.metrics2.sink.relocated.commons.lang.StringUtils;
+import org.apache.commons.lang.StringUtils;
 
 import com.google.common.collect.Sets;
 import com.google.inject.Inject;


[06/36] ambari git commit: AMBARI-21409. Ambari agent not starting in latest PPC build (aonishuk)

Posted by lp...@apache.org.
AMBARI-21409. Ambari agent not starting in latest PPC build (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/6832ed93
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/6832ed93
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/6832ed93

Branch: refs/heads/feature-branch-AMBARI-21307
Commit: 6832ed93a156b33178e62d6f8279faea8e414cba
Parents: 7029e7f
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Fri Jul 7 11:04:22 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Fri Jul 7 11:04:22 2017 +0300

----------------------------------------------------------------------
 .../src/main/python/ambari_commons/resources/os_family.json       | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/6832ed93/ambari-common/src/main/python/ambari_commons/resources/os_family.json
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/ambari_commons/resources/os_family.json b/ambari-common/src/main/python/ambari_commons/resources/os_family.json
index b9cdbf6..5579378 100644
--- a/ambari-common/src/main/python/ambari_commons/resources/os_family.json
+++ b/ambari-common/src/main/python/ambari_commons/resources/os_family.json
@@ -29,7 +29,8 @@
           "centos-ppc"
         ],
         "versions": [
-          6
+          6,
+          7
         ]
       },
       "debian": {


[14/36] ambari git commit: AMBARI-21430 - Allow Multiple Versions of Stack Tools to Co-Exist (jonathanhurley)

Posted by lp...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-secured.json b/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-secured.json
index f959b1f..7f1e549 100644
--- a/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-secured.json
+++ b/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-secured.json
@@ -1,159 +1,159 @@
 {
     "localComponents": [
-        "SECONDARY_NAMENODE", 
-        "HDFS_CLIENT", 
-        "DATANODE", 
-        "NAMENODE", 
-        "RANGER_ADMIN", 
-        "RANGER_TAGSYNC", 
-        "RANGER_USERSYNC", 
-        "ZOOKEEPER_SERVER", 
-        "ZOOKEEPER_CLIENT", 
+        "SECONDARY_NAMENODE",
+        "HDFS_CLIENT",
+        "DATANODE",
+        "NAMENODE",
+        "RANGER_ADMIN",
+        "RANGER_TAGSYNC",
+        "RANGER_USERSYNC",
+        "ZOOKEEPER_SERVER",
+        "ZOOKEEPER_CLIENT",
         "KERBEROS_CLIENT",
         "LOGSEARCH_SOLR",
         "LOGSEARCH_SOLR_CLIENT"
-    ], 
+    ],
     "configuration_attributes": {
-        "ranger-hdfs-audit": {}, 
-        "ssl-client": {}, 
-        "ranger-admin-site": {}, 
-        "ranger-hdfs-policymgr-ssl": {}, 
-        "tagsync-application-properties": {}, 
-        "ranger-env": {}, 
-        "usersync-log4j": {}, 
-        "ranger-hdfs-plugin-properties": {}, 
-        "kerberos-env": {}, 
-        "admin-properties": {}, 
-        "ranger-ugsync-site": {}, 
+        "ranger-hdfs-audit": {},
+        "ssl-client": {},
+        "ranger-admin-site": {},
+        "ranger-hdfs-policymgr-ssl": {},
+        "tagsync-application-properties": {},
+        "ranger-env": {},
+        "usersync-log4j": {},
+        "ranger-hdfs-plugin-properties": {},
+        "kerberos-env": {},
+        "admin-properties": {},
+        "ranger-ugsync-site": {},
         "hdfs-site": {
             "final": {
-                "dfs.datanode.data.dir": "true", 
-                "dfs.namenode.http-address": "true", 
-                "dfs.datanode.failed.volumes.tolerated": "true", 
-                "dfs.support.append": "true", 
-                "dfs.namenode.name.dir": "true", 
+                "dfs.datanode.data.dir": "true",
+                "dfs.namenode.http-address": "true",
+                "dfs.datanode.failed.volumes.tolerated": "true",
+                "dfs.support.append": "true",
+                "dfs.namenode.name.dir": "true",
                 "dfs.webhdfs.enabled": "true"
             }
-        }, 
-        "ranger-tagsync-site": {}, 
-        "zoo.cfg": {}, 
-        "hadoop-policy": {}, 
-        "hdfs-log4j": {}, 
-        "krb5-conf": {}, 
+        },
+        "ranger-tagsync-site": {},
+        "zoo.cfg": {},
+        "hadoop-policy": {},
+        "hdfs-log4j": {},
+        "krb5-conf": {},
         "core-site": {
             "final": {
                 "fs.defaultFS": "true"
             }
-        }, 
-        "hadoop-env": {}, 
-        "zookeeper-log4j": {}, 
-        "ssl-server": {}, 
-        "ranger-site": {}, 
-        "admin-log4j": {}, 
-        "tagsync-log4j": {}, 
-        "ranger-hdfs-security": {}, 
-        "usersync-properties": {}, 
+        },
+        "hadoop-env": {},
+        "zookeeper-log4j": {},
+        "ssl-server": {},
+        "ranger-site": {},
+        "admin-log4j": {},
+        "tagsync-log4j": {},
+        "ranger-hdfs-security": {},
+        "usersync-properties": {},
         "zookeeper-env": {},
         "infra-solr-env": {},
         "infra-solr-client-log4j": {},
         "cluster-env": {}
-    }, 
-    "public_hostname": "c6401.ambari.apache.org", 
-    "commandId": "41-2", 
-    "hostname": "c6401.ambari.apache.org", 
-    "kerberosCommandParams": [], 
-    "serviceName": "RANGER", 
-    "role": "RANGER_ADMIN", 
-    "forceRefreshConfigTagsBeforeExecution": [], 
-    "requestId": 41, 
+    },
+    "public_hostname": "c6401.ambari.apache.org",
+    "commandId": "41-2",
+    "hostname": "c6401.ambari.apache.org",
+    "kerberosCommandParams": [],
+    "serviceName": "RANGER",
+    "role": "RANGER_ADMIN",
+    "forceRefreshConfigTagsBeforeExecution": [],
+    "requestId": 41,
     "agentConfigParams": {
         "agent": {
             "parallel_execution": 0
         }
-    }, 
-    "clusterName": "test_Cluster01", 
-    "commandType": "EXECUTION_COMMAND", 
-    "taskId": 186, 
-    "roleParams": {}, 
+    },
+    "clusterName": "test_Cluster01",
+    "commandType": "EXECUTION_COMMAND",
+    "taskId": 186,
+    "roleParams": {},
     "configurationTags": {
         "ranger-hdfs-audit": {
             "tag": "version1466705299922"
-        }, 
+        },
         "ssl-client": {
             "tag": "version1"
-        }, 
+        },
         "ranger-admin-site": {
             "tag": "version1467016680635"
-        }, 
+        },
         "ranger-hdfs-policymgr-ssl": {
             "tag": "version1466705299922"
-        }, 
+        },
         "tagsync-application-properties": {
             "tag": "version1467016680511"
-        }, 
+        },
         "ranger-env": {
             "tag": "version1466705299949"
-        }, 
+        },
         "ranger-ugsync-site": {
             "tag": "version1467016680537"
-        }, 
+        },
         "ranger-hdfs-plugin-properties": {
             "tag": "version1466705299922"
-        }, 
+        },
         "kerberos-env": {
             "tag": "version1467016537243"
-        }, 
+        },
         "admin-properties": {
             "tag": "version1466705299949"
-        }, 
+        },
         "hdfs-site": {
             "tag": "version1467016680401"
-        }, 
+        },
         "ranger-tagsync-site": {
             "tag": "version1467016680586"
-        }, 
+        },
         "zoo.cfg": {
             "tag": "version1"
-        }, 
+        },
         "hadoop-policy": {
             "tag": "version1"
-        }, 
+        },
         "hdfs-log4j": {
             "tag": "version1"
-        }, 
+        },
         "usersync-log4j": {
             "tag": "version1466705299949"
-        }, 
+        },
         "krb5-conf": {
             "tag": "version1467016537243"
-        }, 
+        },
         "core-site": {
             "tag": "version1467016680612"
-        }, 
+        },
         "hadoop-env": {
             "tag": "version1467016680446"
-        }, 
+        },
         "zookeeper-log4j": {
             "tag": "version1"
-        }, 
+        },
         "ssl-server": {
             "tag": "version1"
-        }, 
+        },
         "ranger-site": {
             "tag": "version1466705299949"
-        }, 
+        },
         "admin-log4j": {
             "tag": "version1466705299949"
-        }, 
+        },
         "tagsync-log4j": {
             "tag": "version1466705299949"
-        }, 
+        },
         "ranger-hdfs-security": {
             "tag": "version1466705299922"
-        }, 
+        },
         "usersync-properties": {
             "tag": "version1466705299949"
-        }, 
+        },
         "zookeeper-env": {
             "tag": "version1467016680492"
         },
@@ -166,550 +166,550 @@
         "cluster-env": {
             "tag": "version1467016680567"
         }
-    }, 
-    "roleCommand": "START", 
+    },
+    "roleCommand": "START",
     "hostLevelParams": {
-        "agent_stack_retry_on_unavailability": "false", 
-        "stack_name": "HDP", 
-        "package_version": "2_5_0_0_*", 
+        "agent_stack_retry_on_unavailability": "false",
+        "stack_name": "HDP",
+        "package_version": "2_5_0_0_*",
         "custom_mysql_jdbc_name": "mysql-connector-java.jar",
         "previous_custom_mysql_jdbc_name": "mysql-connector-java-old.jar",
-        "host_sys_prepped": "false", 
-        "ambari_db_rca_username": "mapred", 
-        "current_version": "2.5.0.0-801", 
-        "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar", 
-        "agent_stack_retry_count": "5", 
-        "stack_version": "2.5", 
-        "jdk_name": "jdk-8u60-linux-x64.tar.gz", 
-        "ambari_db_rca_driver": "org.postgresql.Driver", 
+        "host_sys_prepped": "false",
+        "ambari_db_rca_username": "mapred",
+        "current_version": "2.5.0.0-801",
+        "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar",
+        "agent_stack_retry_count": "5",
+        "stack_version": "2.5",
+        "jdk_name": "jdk-8u60-linux-x64.tar.gz",
+        "ambari_db_rca_driver": "org.postgresql.Driver",
         "java_home": "/usr/jdk64/jdk1.7.0_45",
-        "repository_version_id": "1", 
-        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
-        "not_managed_hdfs_path_list": "[\"/tmp\"]", 
-        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
-        "java_version": "8", 
-        "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.5\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]", 
-        "package_list": "[{\"name\":\"ranger_${stack_version}-admin\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-usersync\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-tagsync\",\"condition\":\"should_install_ranger_tagsync\",\"skipUpgrade\":false},{\"name\":\"ambari-logsearch-solr-client\",\"condition\":\"should_install_logsearch_solr_client\",\"skipUpgrade\":false}]", 
-        "db_name": "ambari", 
-        "group_list": "[\"ranger\",\"hadoop\",\"users\"]", 
-        "agentCacheDir": "/var/lib/ambari-agent/cache", 
-        "ambari_db_rca_password": "mapred", 
-        "jce_name": "jce_policy-8.zip", 
-        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar", 
-        "db_driver_filename": "mysql-connector-java.jar", 
-        "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]", 
+        "repository_version_id": "1",
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
+        "not_managed_hdfs_path_list": "[\"/tmp\"]",
+        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
+        "java_version": "8",
+        "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.5\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]",
+        "package_list": "[{\"name\":\"ranger_${stack_version}-admin\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-usersync\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-tagsync\",\"condition\":\"should_install_ranger_tagsync\",\"skipUpgrade\":false},{\"name\":\"ambari-logsearch-solr-client\",\"condition\":\"should_install_logsearch_solr_client\",\"skipUpgrade\":false}]",
+        "db_name": "ambari",
+        "group_list": "[\"ranger\",\"hadoop\",\"users\"]",
+        "agentCacheDir": "/var/lib/ambari-agent/cache",
+        "ambari_db_rca_password": "mapred",
+        "jce_name": "jce_policy-8.zip",
+        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
+        "db_driver_filename": "mysql-connector-java.jar",
+        "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]",
         "clientsToUpdateConfigs": "[\"*\"]"
-    }, 
+    },
     "commandParams": {
-        "service_package_folder": "common-services/RANGER/0.4.0/package", 
-        "script": "scripts/ranger_admin.py", 
-        "hooks_folder": "HDP/2.0.6/hooks", 
-        "version": "2.5.0.0-801", 
-        "max_duration_for_retries": "0", 
-        "command_retry_enabled": "false", 
-        "command_timeout": "600", 
+        "service_package_folder": "common-services/RANGER/0.4.0/package",
+        "script": "scripts/ranger_admin.py",
+        "hooks_folder": "HDP/2.0.6/hooks",
+        "version": "2.5.0.0-801",
+        "max_duration_for_retries": "0",
+        "command_retry_enabled": "false",
+        "command_timeout": "600",
         "script_type": "PYTHON"
-    }, 
-    "forceRefreshConfigTags": [], 
-    "stageId": 2, 
+    },
+    "forceRefreshConfigTags": [],
+    "stageId": 2,
     "clusterHostInfo": {
         "snamenode_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ambari_server_use_ssl": [
             "false"
-        ], 
+        ],
         "all_ping_ports": [
             "8670"
-        ], 
+        ],
         "ranger_tagsync_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ranger_usersync_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "all_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "slave_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "namenode_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ambari_server_port": [
             "8080"
-        ], 
+        ],
         "ranger_admin_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "all_racks": [
             "/default-rack"
-        ], 
+        ],
         "all_ipv4_ips": [
             "172.22.83.73"
-        ], 
+        ],
         "ambari_server_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "zookeeper_hosts": [
             "c6401.ambari.apache.org"
         ],
         "infra_solr_hosts": [
             "c6401.ambari.apache.org"
         ]
-    }, 
+    },
     "configurations": {
         "ranger-hdfs-audit": {
-            "xasecure.audit.destination.solr.zookeepers": "NONE", 
-            "xasecure.audit.destination.solr.urls": "", 
-            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool", 
+            "xasecure.audit.destination.solr.zookeepers": "NONE",
+            "xasecure.audit.destination.solr.urls": "",
+            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool",
             "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool",
-            "xasecure.audit.destination.hdfs": "true", 
+            "xasecure.audit.destination.hdfs": "true",
             "xasecure.audit.destination.solr": "false",
-            "xasecure.audit.provider.summary.enabled": "false", 
-            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit", 
+            "xasecure.audit.provider.summary.enabled": "false",
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
             "xasecure.audit.is.enabled": "true"
-        }, 
+        },
         "ssl-client": {
-            "ssl.client.truststore.reload.interval": "10000", 
-            "ssl.client.keystore.password": "bigdata", 
-            "ssl.client.truststore.type": "jks", 
-            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks", 
-            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks", 
-            "ssl.client.truststore.password": "bigdata", 
+            "ssl.client.truststore.reload.interval": "10000",
+            "ssl.client.keystore.password": "bigdata",
+            "ssl.client.truststore.type": "jks",
+            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks",
+            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks",
+            "ssl.client.truststore.password": "bigdata",
             "ssl.client.keystore.type": "jks"
-        }, 
+        },
         "ranger-admin-site": {
             "ranger.is.solr.kerberised": "true",
-            "ranger.admin.kerberos.cookie.domain": "{{ranger_host}}", 
-            "ranger.kms.service.user.hdfs": "hdfs", 
-            "ranger.spnego.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}", 
-            "ranger.plugins.hive.serviceuser": "hive", 
-            "ranger.lookup.kerberos.keytab": "/etc/security/keytabs/rangerlookup.service.keytab", 
-            "ranger.plugins.kms.serviceuser": "kms", 
-            "ranger.service.https.attrib.ssl.enabled": "false", 
-            "ranger.sso.browser.useragent": "Mozilla,chrome", 
-            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01", 
-            "ranger.plugins.hbase.serviceuser": "hbase", 
-            "ranger.plugins.hdfs.serviceuser": "hdfs", 
-            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}", 
-            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net", 
-            "ranger.plugins.knox.serviceuser": "knox", 
-            "ranger.ldap.base.dn": "dc=example,dc=com", 
-            "ranger.sso.publicKey": "", 
-            "ranger.admin.kerberos.cookie.path": "/", 
-            "ranger.service.https.attrib.clientAuth": "want", 
-            "ranger.jpa.jdbc.user": "{{ranger_db_user}}", 
-            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})", 
-            "ranger.ldap.group.roleattribute": "cn", 
-            "ranger.plugins.kafka.serviceuser": "kafka", 
-            "ranger.admin.kerberos.principal": "rangeradmin/_HOST@EXAMPLE.COM", 
-            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
+            "ranger.admin.kerberos.cookie.domain": "{{ranger_host}}",
+            "ranger.kms.service.user.hdfs": "hdfs",
+            "ranger.spnego.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
+            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}",
+            "ranger.plugins.hive.serviceuser": "hive",
+            "ranger.lookup.kerberos.keytab": "/etc/security/keytabs/rangerlookup.service.keytab",
+            "ranger.plugins.kms.serviceuser": "kms",
+            "ranger.service.https.attrib.ssl.enabled": "false",
+            "ranger.sso.browser.useragent": "Mozilla,chrome",
+            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01",
+            "ranger.plugins.hbase.serviceuser": "hbase",
+            "ranger.plugins.hdfs.serviceuser": "hdfs",
+            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}",
+            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net",
+            "ranger.plugins.knox.serviceuser": "knox",
+            "ranger.ldap.base.dn": "dc=example,dc=com",
+            "ranger.sso.publicKey": "",
+            "ranger.admin.kerberos.cookie.path": "/",
+            "ranger.service.https.attrib.clientAuth": "want",
+            "ranger.jpa.jdbc.user": "{{ranger_db_user}}",
+            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})",
+            "ranger.ldap.group.roleattribute": "cn",
+            "ranger.plugins.kafka.serviceuser": "kafka",
+            "ranger.admin.kerberos.principal": "rangeradmin/_HOST@EXAMPLE.COM",
+            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
             "ranger.credential.provider.path": "/etc/ranger/admin/rangeradmin.jceks",
-            "ranger.ldap.referral": "ignore", 
-            "ranger.service.http.port": "6080", 
-            "ranger.ldap.user.searchfilter": "(uid={0})", 
-            "ranger.plugins.atlas.serviceuser": "atlas", 
-            "ranger.truststore.password": "changeit", 
-            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
-            "ranger.audit.solr.password": "NONE", 
-            "ranger.audit.solr.zookeepers": "c6401.ambari.apache.org:2181/ambari-solr", 
+            "ranger.ldap.referral": "ignore",
+            "ranger.service.http.port": "6080",
+            "ranger.ldap.user.searchfilter": "(uid={0})",
+            "ranger.plugins.atlas.serviceuser": "atlas",
+            "ranger.truststore.password": "changeit",
+            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+            "ranger.audit.solr.password": "NONE",
+            "ranger.audit.solr.zookeepers": "c6401.ambari.apache.org:2181/ambari-solr",
             "ranger.lookup.kerberos.principal": "rangerlookup/_HOST@EXAMPLE.COM",
-            "ranger.service.https.port": "6182", 
-            "ranger.plugins.storm.serviceuser": "storm", 
-            "ranger.externalurl": "{{ranger_external_url}}", 
-            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
-            "ranger.kms.service.user.hive": "", 
-            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
-            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}", 
-            "ranger.service.host": "{{ranger_host}}", 
-            "ranger.service.https.attrib.keystore.keyalias": "rangeradmin", 
-            "ranger.service.https.attrib.keystore.pass": "xasecure", 
-            "ranger.unixauth.remote.login.enabled": "true", 
+            "ranger.service.https.port": "6182",
+            "ranger.plugins.storm.serviceuser": "storm",
+            "ranger.externalurl": "{{ranger_external_url}}",
+            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+            "ranger.kms.service.user.hive": "",
+            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}",
+            "ranger.service.host": "{{ranger_host}}",
+            "ranger.service.https.attrib.keystore.keyalias": "rangeradmin",
+            "ranger.service.https.attrib.keystore.pass": "xasecure",
+            "ranger.unixauth.remote.login.enabled": "true",
             "ranger.jpa.jdbc.credential.alias": "rangeradmin",
-            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
-            "ranger.audit.solr.username": "ranger_solr", 
-            "ranger.sso.enabled": "false", 
-            "ranger.audit.solr.urls": "", 
-            "ranger.ldap.ad.domain": "", 
-            "ranger.plugins.yarn.serviceuser": "yarn", 
-            "ranger.audit.source.type": "solr", 
-            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
-            "ranger.ldap.url": "{{ranger_ug_ldap_url}}", 
-            "ranger.authentication.method": "UNIX", 
-            "ranger.service.http.enabled": "true", 
-            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}", 
-            "ranger.ldap.ad.referral": "ignore", 
-            "ranger.ldap.ad.base.dn": "dc=example,dc=com", 
-            "ranger.jpa.jdbc.password": "_", 
-            "ranger.spnego.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
-            "ranger.sso.providerurl": "", 
-            "ranger.unixauth.service.hostname": "{{ugsync_host}}", 
-            "ranger.admin.kerberos.keytab": "/etc/security/keytabs/rangeradmin.service.keytab", 
-            "ranger.admin.kerberos.token.valid.seconds": "30", 
-            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver", 
+            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+            "ranger.audit.solr.username": "ranger_solr",
+            "ranger.sso.enabled": "false",
+            "ranger.audit.solr.urls": "",
+            "ranger.ldap.ad.domain": "",
+            "ranger.plugins.yarn.serviceuser": "yarn",
+            "ranger.audit.source.type": "solr",
+            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
+            "ranger.ldap.url": "{{ranger_ug_ldap_url}}",
+            "ranger.authentication.method": "UNIX",
+            "ranger.service.http.enabled": "true",
+            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}",
+            "ranger.ldap.ad.referral": "ignore",
+            "ranger.ldap.ad.base.dn": "dc=example,dc=com",
+            "ranger.jpa.jdbc.password": "_",
+            "ranger.spnego.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+            "ranger.sso.providerurl": "",
+            "ranger.unixauth.service.hostname": "{{ugsync_host}}",
+            "ranger.admin.kerberos.keytab": "/etc/security/keytabs/rangeradmin.service.keytab",
+            "ranger.admin.kerberos.token.valid.seconds": "30",
+            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver",
             "ranger.unixauth.service.port": "5151"
-        }, 
+        },
         "ranger-hdfs-policymgr-ssl": {
-            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks", 
-            "xasecure.policymgr.clientssl.truststore.password": "changeit", 
-            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}", 
-            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks", 
-            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}", 
+            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks",
+            "xasecure.policymgr.clientssl.truststore.password": "changeit",
+            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}",
+            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks",
+            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}",
             "xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
-        }, 
+        },
         "tagsync-application-properties": {
-            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181", 
-            "atlas.kafka.security.protocol": "SASL_PLAINTEXT", 
-            "atlas.jaas.KafkaClient.option.principal": "{{tagsync_jaas_principal}}", 
-            "atlas.jaas.KafkaClient.option.keyTab": "{{tagsync_keytab_path}}", 
-            "atlas.kafka.entities.group.id": "ranger_entities_consumer", 
-            "atlas.jaas.KafkaClient.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule", 
-            "atlas.jaas.KafkaClient.option.serviceName": "kafka", 
-            "atlas.kafka.bootstrap.servers": "localhost:6667", 
-            "atlas.jaas.KafkaClient.option.useKeyTab": "true", 
-            "atlas.jaas.KafkaClient.option.storeKey": "true", 
-            "atlas.jaas.KafkaClient.loginModuleControlFlag": "required", 
+            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181",
+            "atlas.kafka.security.protocol": "SASL_PLAINTEXT",
+            "atlas.jaas.KafkaClient.option.principal": "{{tagsync_jaas_principal}}",
+            "atlas.jaas.KafkaClient.option.keyTab": "{{tagsync_keytab_path}}",
+            "atlas.kafka.entities.group.id": "ranger_entities_consumer",
+            "atlas.jaas.KafkaClient.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "atlas.jaas.KafkaClient.option.serviceName": "kafka",
+            "atlas.kafka.bootstrap.servers": "localhost:6667",
+            "atlas.jaas.KafkaClient.option.useKeyTab": "true",
+            "atlas.jaas.KafkaClient.option.storeKey": "true",
+            "atlas.jaas.KafkaClient.loginModuleControlFlag": "required",
             "atlas.kafka.sasl.kerberos.service.name": "kafka"
-        }, 
+        },
         "ranger-env": {
-            "ranger_solr_shards": "1", 
-            "ranger_solr_config_set": "ranger_audits", 
-            "ranger_user": "ranger", 
+            "ranger_solr_shards": "1",
+            "ranger_solr_config_set": "ranger_audits",
+            "ranger_user": "ranger",
             "ranger_solr_replication_factor": "1",
-            "xml_configurations_supported": "true", 
-            "ranger-atlas-plugin-enabled": "No", 
-            "ranger-hbase-plugin-enabled": "No", 
-            "ranger-yarn-plugin-enabled": "No", 
-            "bind_anonymous": "false", 
-            "ranger_admin_username": "amb_ranger_admin", 
-            "admin_password": "admin", 
-            "is_solrCloud_enabled": "true", 
-            "ranger-storm-plugin-enabled": "No", 
-            "ranger-hdfs-plugin-enabled": "No", 
-            "ranger_group": "ranger", 
-            "ranger-knox-plugin-enabled": "No", 
-            "ranger_admin_log_dir": "/var/log/ranger/admin", 
-            "ranger-kafka-plugin-enabled": "No", 
-            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306", 
-            "ranger-hive-plugin-enabled": "No", 
-            "xasecure.audit.destination.solr": "true", 
-            "ranger_pid_dir": "/var/run/ranger", 
-            "xasecure.audit.destination.hdfs": "true", 
-            "admin_username": "admin", 
-            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit", 
-            "create_db_dbuser": "true", 
-            "ranger_solr_collection_name": "ranger_audits", 
-            "ranger_admin_password": "P1!q9xa96SMi5NCl", 
+            "xml_configurations_supported": "true",
+            "ranger-atlas-plugin-enabled": "No",
+            "ranger-hbase-plugin-enabled": "No",
+            "ranger-yarn-plugin-enabled": "No",
+            "bind_anonymous": "false",
+            "ranger_admin_username": "amb_ranger_admin",
+            "admin_password": "admin",
+            "is_solrCloud_enabled": "true",
+            "ranger-storm-plugin-enabled": "No",
+            "ranger-hdfs-plugin-enabled": "No",
+            "ranger_group": "ranger",
+            "ranger-knox-plugin-enabled": "No",
+            "ranger_admin_log_dir": "/var/log/ranger/admin",
+            "ranger-kafka-plugin-enabled": "No",
+            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306",
+            "ranger-hive-plugin-enabled": "No",
+            "xasecure.audit.destination.solr": "true",
+            "ranger_pid_dir": "/var/run/ranger",
+            "xasecure.audit.destination.hdfs": "true",
+            "admin_username": "admin",
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
+            "create_db_dbuser": "true",
+            "ranger_solr_collection_name": "ranger_audits",
+            "ranger_admin_password": "P1!q9xa96SMi5NCl",
             "ranger_usersync_log_dir": "/var/log/ranger/usersync"
-        }, 
+        },
         "usersync-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/usersync.
 log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n"
-        }, 
+        },
         "ranger-hdfs-plugin-properties": {
-            "hadoop.rpc.protection": "authentication", 
-            "ranger-hdfs-plugin-enabled": "No", 
-            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
-            "policy_user": "ambari-qa", 
-            "common.name.for.certificate": "", 
+            "hadoop.rpc.protection": "authentication",
+            "ranger-hdfs-plugin-enabled": "No",
+            "REPOSITORY_CONFIG_USERNAME": "hadoop",
+            "policy_user": "ambari-qa",
+            "common.name.for.certificate": "",
             "REPOSITORY_CONFIG_PASSWORD": "hadoop"
-        }, 
+        },
         "kerberos-env": {
-            "kdc_hosts": "c6401.ambari.apache.org", 
-            "manage_auth_to_local": "true", 
-            "install_packages": "true", 
-            "realm": "EXAMPLE.COM", 
-            "encryption_types": "aes des3-cbc-sha1 rc4 des-cbc-md5", 
-            "ad_create_attributes_template": "\n{\n  \"objectClass\": [\"top\", \"person\", \"organizationalPerson\", \"user\"],\n  \"cn\": \"$principal_name\",\n  #if( $is_service )\n  \"servicePrincipalName\": \"$principal_name\",\n  #end\n  \"userPrincipalName\": \"$normalized_principal\",\n  \"unicodePwd\": \"$password\",\n  \"accountExpires\": \"0\",\n  \"userAccountControl\": \"66048\"\n}", 
-            "kdc_create_attributes": "", 
-            "admin_server_host": "c6401.ambari.apache.org", 
-            "group": "ambari-managed-principals", 
-            "password_length": "20", 
-            "ldap_url": "", 
-            "manage_identities": "true", 
-            "password_min_lowercase_letters": "1", 
-            "create_ambari_principal": "true", 
-            "service_check_principal_name": "${cluster_name|toLower()}-${short_date}", 
-            "executable_search_paths": "/usr/bin, /usr/kerberos/bin, /usr/sbin, /usr/lib/mit/bin, /usr/lib/mit/sbin", 
-            "password_chat_timeout": "5", 
-            "kdc_type": "mit-kdc", 
-            "set_password_expiry": "false", 
-            "password_min_punctuation": "1", 
-            "container_dn": "", 
-            "case_insensitive_username_rules": "false", 
-            "password_min_whitespace": "0", 
-            "password_min_uppercase_letters": "1", 
+            "kdc_hosts": "c6401.ambari.apache.org",
+            "manage_auth_to_local": "true",
+            "install_packages": "true",
+            "realm": "EXAMPLE.COM",
+            "encryption_types": "aes des3-cbc-sha1 rc4 des-cbc-md5",
+            "ad_create_attributes_template": "\n{\n  \"objectClass\": [\"top\", \"person\", \"organizationalPerson\", \"user\"],\n  \"cn\": \"$principal_name\",\n  #if( $is_service )\n  \"servicePrincipalName\": \"$principal_name\",\n  #end\n  \"userPrincipalName\": \"$normalized_principal\",\n  \"unicodePwd\": \"$password\",\n  \"accountExpires\": \"0\",\n  \"userAccountControl\": \"66048\"\n}",
+            "kdc_create_attributes": "",
+            "admin_server_host": "c6401.ambari.apache.org",
+            "group": "ambari-managed-principals",
+            "password_length": "20",
+            "ldap_url": "",
+            "manage_identities": "true",
+            "password_min_lowercase_letters": "1",
+            "create_ambari_principal": "true",
+            "service_check_principal_name": "${cluster_name|toLower()}-${short_date}",
+            "executable_search_paths": "/usr/bin, /usr/kerberos/bin, /usr/sbin, /usr/lib/mit/bin, /usr/lib/mit/sbin",
+            "password_chat_timeout": "5",
+            "kdc_type": "mit-kdc",
+            "set_password_expiry": "false",
+            "password_min_punctuation": "1",
+            "container_dn": "",
+            "case_insensitive_username_rules": "false",
+            "password_min_whitespace": "0",
+            "password_min_uppercase_letters": "1",
             "password_min_digits": "1"
-        }, 
+        },
         "admin-properties": {
-            "db_user": "rangeradmin01", 
-            "DB_FLAVOR": "MYSQL", 
-            "db_password": "rangeradmin01", 
-            "db_root_user": "root", 
-            "policymgr_external_url": "http://c6401.ambari.apache.org:6080", 
-            "db_name": "ranger01", 
-            "db_host": "c6401.ambari.apache.org", 
-            "db_root_password": "vagrant", 
+            "db_user": "rangeradmin01",
+            "DB_FLAVOR": "MYSQL",
+            "db_password": "rangeradmin01",
+            "db_root_user": "root",
+            "policymgr_external_url": "http://c6401.ambari.apache.org:6080",
+            "db_name": "ranger01",
+            "db_host": "c6401.ambari.apache.org",
+            "db_root_password": "vagrant",
             "SQL_CONNECTOR_JAR": "{{driver_curl_target}}"
-        }, 
+        },
         "ranger-ugsync-site": {
-            "ranger.usersync.ldap.binddn": "", 
-            "ranger.usersync.policymgr.username": "rangerusersync", 
-            "ranger.usersync.policymanager.mockrun": "false", 
-            "ranger.usersync.group.searchbase": "", 
-            "ranger.usersync.ldap.bindalias": "testldapalias", 
-            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks", 
-            "ranger.usersync.port": "5151", 
-            "ranger.usersync.pagedresultssize": "500", 
-            "ranger.usersync.group.memberattributename": "", 
-            "ranger.usersync.kerberos.principal": "rangerusersync/_HOST@EXAMPLE.COM", 
-            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder", 
-            "ranger.usersync.ldap.referral": "ignore", 
-            "ranger.usersync.group.searchfilter": "", 
-            "ranger.usersync.ldap.user.objectclass": "person", 
-            "ranger.usersync.logdir": "{{usersync_log_dir}}", 
-            "ranger.usersync.ldap.user.searchfilter": "", 
-            "ranger.usersync.ldap.groupname.caseconversion": "none", 
-            "ranger.usersync.ldap.ldapbindpassword": "", 
-            "ranger.usersync.unix.minUserId": "500", 
-            "ranger.usersync.policymanager.maxrecordsperapicall": "1000", 
-            "ranger.usersync.group.nameattribute": "", 
-            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password", 
-            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks", 
-            "ranger.usersync.user.searchenabled": "false", 
-            "ranger.usersync.group.usermapsyncenabled": "true", 
-            "ranger.usersync.ldap.bindkeystore": "", 
-            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof", 
-            "ranger.usersync.kerberos.keytab": "/etc/security/keytabs/rangerusersync.service.keytab", 
-            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe", 
-            "ranger.usersync.group.objectclass": "", 
-            "ranger.usersync.ldap.user.searchscope": "sub", 
-            "ranger.usersync.unix.password.file": "/etc/passwd", 
-            "ranger.usersync.ldap.user.nameattribute": "", 
-            "ranger.usersync.pagedresultsenabled": "true", 
-            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}", 
-            "ranger.usersync.group.search.first.enabled": "false", 
-            "ranger.usersync.group.searchenabled": "false", 
-            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder", 
-            "ranger.usersync.ssl": "true", 
-            "ranger.usersync.ldap.url": "", 
-            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org", 
-            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
-            "ranger.usersync.ldap.user.searchbase": "", 
-            "ranger.usersync.ldap.username.caseconversion": "none", 
-            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
-            "ranger.usersync.keystore.password": "UnIx529p", 
-            "ranger.usersync.unix.group.file": "/etc/group", 
-            "ranger.usersync.filesource.file": "/tmp/usergroup.txt", 
-            "ranger.usersync.group.searchscope": "", 
-            "ranger.usersync.truststore.password": "changeit", 
-            "ranger.usersync.enabled": "true", 
-            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000", 
+            "ranger.usersync.ldap.binddn": "",
+            "ranger.usersync.policymgr.username": "rangerusersync",
+            "ranger.usersync.policymanager.mockrun": "false",
+            "ranger.usersync.group.searchbase": "",
+            "ranger.usersync.ldap.bindalias": "testldapalias",
+            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks",
+            "ranger.usersync.port": "5151",
+            "ranger.usersync.pagedresultssize": "500",
+            "ranger.usersync.group.memberattributename": "",
+            "ranger.usersync.kerberos.principal": "rangerusersync/_HOST@EXAMPLE.COM",
+            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder",
+            "ranger.usersync.ldap.referral": "ignore",
+            "ranger.usersync.group.searchfilter": "",
+            "ranger.usersync.ldap.user.objectclass": "person",
+            "ranger.usersync.logdir": "{{usersync_log_dir}}",
+            "ranger.usersync.ldap.user.searchfilter": "",
+            "ranger.usersync.ldap.groupname.caseconversion": "none",
+            "ranger.usersync.ldap.ldapbindpassword": "",
+            "ranger.usersync.unix.minUserId": "500",
+            "ranger.usersync.policymanager.maxrecordsperapicall": "1000",
+            "ranger.usersync.group.nameattribute": "",
+            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password",
+            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks",
+            "ranger.usersync.user.searchenabled": "false",
+            "ranger.usersync.group.usermapsyncenabled": "true",
+            "ranger.usersync.ldap.bindkeystore": "",
+            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof",
+            "ranger.usersync.kerberos.keytab": "/etc/security/keytabs/rangerusersync.service.keytab",
+            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe",
+            "ranger.usersync.group.objectclass": "",
+            "ranger.usersync.ldap.user.searchscope": "sub",
+            "ranger.usersync.unix.password.file": "/etc/passwd",
+            "ranger.usersync.ldap.user.nameattribute": "",
+            "ranger.usersync.pagedresultsenabled": "true",
+            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}",
+            "ranger.usersync.group.search.first.enabled": "false",
+            "ranger.usersync.group.searchenabled": "false",
+            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder",
+            "ranger.usersync.ssl": "true",
+            "ranger.usersync.ldap.url": "",
+            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org",
+            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+            "ranger.usersync.ldap.user.searchbase": "",
+            "ranger.usersync.ldap.username.caseconversion": "none",
+            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+            "ranger.usersync.keystore.password": "UnIx529p",
+            "ranger.usersync.unix.group.file": "/etc/group",
+            "ranger.usersync.filesource.file": "/tmp/usergroup.txt",
+            "ranger.usersync.group.searchscope": "",
+            "ranger.usersync.truststore.password": "changeit",
+            "ranger.usersync.enabled": "true",
+            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000",
             "ranger.usersync.filesource.text.delimiter": ","
-        }, 
+        },
         "hdfs-site": {
-            "dfs.namenode.checkpoint.period": "21600", 
-            "dfs.namenode.avoid.write.stale.datanode": "true", 
-            "dfs.permissions.superusergroup": "hdfs", 
-            "dfs.namenode.startup.delay.block.deletion.sec": "3600", 
-            "dfs.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM", 
-            "dfs.heartbeat.interval": "3", 
-            "dfs.content-summary.limit": "5000", 
-            "dfs.support.append": "true", 
-            "dfs.datanode.address": "0.0.0.0:1019", 
-            "dfs.cluster.administrators": " hdfs", 
-            "dfs.namenode.audit.log.async": "true", 
-            "dfs.datanode.balance.bandwidthPerSec": "6250000", 
-            "dfs.namenode.safemode.threshold-pct": "1", 
-            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", 
-            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020", 
-            "dfs.permissions.enabled": "true", 
-            "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", 
-            "dfs.client.read.shortcircuit": "true", 
-            "dfs.https.port": "50470", 
-            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470", 
-            "nfs.file.dump.dir": "/tmp/.hdfs-nfs", 
-            "dfs.blocksize": "134217728", 
-            "dfs.blockreport.initialDelay": "120", 
-            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode", 
-            "dfs.namenode.fslock.fair": "false", 
-            "dfs.datanode.max.transfer.threads": "4096", 
-            "dfs.secondary.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "dfs.replication": "3", 
-            "dfs.namenode.handler.count": "50", 
-            "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
-            "fs.permissions.umask-mode": "022", 
-            "dfs.namenode.stale.datanode.interval": "30000", 
-            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
-            "dfs.datanode.failed.volumes.tolerated": "0", 
-            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data", 
-            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070", 
-            "dfs.webhdfs.enabled": "true", 
-            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding", 
-            "dfs.namenode.accesstime.precision": "0", 
-            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
-            "dfs.datanode.https.address": "0.0.0.0:50475", 
-            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary", 
-            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090", 
-            "nfs.exports.allowed.hosts": "* rw", 
-            "dfs.namenode.checkpoint.txns": "1000000", 
-            "dfs.datanode.http.address": "0.0.0.0:1022", 
-            "dfs.datanode.du.reserved": "33011188224", 
-            "dfs.client.read.shortcircuit.streams.cache.size": "4096", 
-            "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", 
-            "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "dfs.http.policy": "HTTP_ONLY", 
-            "dfs.block.access.token.enable": "true", 
-            "dfs.client.retry.policy.enabled": "false", 
-            "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", 
-            "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab", 
-            "dfs.namenode.name.dir.restore": "true", 
-            "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", 
-            "dfs.journalnode.https-address": "0.0.0.0:8481", 
-            "dfs.journalnode.http-address": "0.0.0.0:8480", 
-            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
-            "dfs.namenode.avoid.read.stale.datanode": "true", 
-            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
-            "dfs.datanode.data.dir.perm": "750", 
-            "dfs.encryption.key.provider.uri": "kms://http@c6401.ambari.apache.org:9292/kms", 
-            "dfs.replication.max": "50", 
+            "dfs.namenode.checkpoint.period": "21600",
+            "dfs.namenode.avoid.write.stale.datanode": "true",
+            "dfs.permissions.superusergroup": "hdfs",
+            "dfs.namenode.startup.delay.block.deletion.sec": "3600",
+            "dfs.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
+            "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM",
+            "dfs.heartbeat.interval": "3",
+            "dfs.content-summary.limit": "5000",
+            "dfs.support.append": "true",
+            "dfs.datanode.address": "0.0.0.0:1019",
+            "dfs.cluster.administrators": " hdfs",
+            "dfs.namenode.audit.log.async": "true",
+            "dfs.datanode.balance.bandwidthPerSec": "6250000",
+            "dfs.namenode.safemode.threshold-pct": "1",
+            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
+            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020",
+            "dfs.permissions.enabled": "true",
+            "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
+            "dfs.client.read.shortcircuit": "true",
+            "dfs.https.port": "50470",
+            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470",
+            "nfs.file.dump.dir": "/tmp/.hdfs-nfs",
+            "dfs.blocksize": "134217728",
+            "dfs.blockreport.initialDelay": "120",
+            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode",
+            "dfs.namenode.fslock.fair": "false",
+            "dfs.datanode.max.transfer.threads": "4096",
+            "dfs.secondary.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
+            "dfs.replication": "3",
+            "dfs.namenode.handler.count": "50",
+            "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+            "fs.permissions.umask-mode": "022",
+            "dfs.namenode.stale.datanode.interval": "30000",
+            "dfs.datanode.ipc.address": "0.0.0.0:8010",
+            "dfs.datanode.failed.volumes.tolerated": "0",
+            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data",
+            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070",
+            "dfs.webhdfs.enabled": "true",
+            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding",
+            "dfs.namenode.accesstime.precision": "0",
+            "dfs.namenode.write.stale.datanode.ratio": "1.0f",
+            "dfs.datanode.https.address": "0.0.0.0:50475",
+            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary",
+            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090",
+            "nfs.exports.allowed.hosts": "* rw",
+            "dfs.namenode.checkpoint.txns": "1000000",
+            "dfs.datanode.http.address": "0.0.0.0:1022",
+            "dfs.datanode.du.reserved": "33011188224",
+            "dfs.client.read.shortcircuit.streams.cache.size": "4096",
+            "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
+            "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
+            "dfs.http.policy": "HTTP_ONLY",
+            "dfs.block.access.token.enable": "true",
+            "dfs.client.retry.policy.enabled": "false",
+            "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
+            "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab",
+            "dfs.namenode.name.dir.restore": "true",
+            "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
+            "dfs.journalnode.https-address": "0.0.0.0:8481",
+            "dfs.journalnode.http-address": "0.0.0.0:8480",
+            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
+            "dfs.namenode.avoid.read.stale.datanode": "true",
+            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
+            "dfs.datanode.data.dir.perm": "750",
+            "dfs.encryption.key.provider.uri": "kms://http@c6401.ambari.apache.org:9292/kms",
+            "dfs.replication.max": "50",
             "dfs.namenode.name.dir": "/grid/0/hadoop/hdfs/namenode"
-        }, 
+        },
         "ranger-tagsync-site": {
-            "ranger.tagsync.dest.ranger.ssl.config.filename": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks", 
-            "ranger.tagsync.source.atlasrest.username": "", 
-            "ranger.tagsync.logdir": "/var/log/ranger/tagsync", 
-            "ranger.tagsync.source.atlasrest.download.interval.millis": "", 
-            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks", 
-            "ranger.tagsync.source.file.check.interval.millis": "", 
-            "ranger.tagsync.source.atlasrest.endpoint": "", 
-            "ranger.tagsync.dest.ranger.username": "rangertagsync", 
-            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}", 
-            "ranger.tagsync.kerberos.principal": "rangertagsync/_HOST@EXAMPLE.COM", 
-            "ranger.tagsync.kerberos.keytab": "/etc/security/keytabs/rangertagsync.service.keytab", 
-            "ranger.tagsync.source.atlas": "false", 
-            "ranger.tagsync.source.atlasrest": "false", 
-            "ranger.tagsync.source.file": "false", 
+            "ranger.tagsync.dest.ranger.ssl.config.filename": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks",
+            "ranger.tagsync.source.atlasrest.username": "",
+            "ranger.tagsync.logdir": "/var/log/ranger/tagsync",
+            "ranger.tagsync.source.atlasrest.download.interval.millis": "",
+            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks",
+            "ranger.tagsync.source.file.check.interval.millis": "",
+            "ranger.tagsync.source.atlasrest.endpoint": "",
+            "ranger.tagsync.dest.ranger.username": "rangertagsync",
+            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}",
+            "ranger.tagsync.kerberos.principal": "rangertagsync/_HOST@EXAMPLE.COM",
+            "ranger.tagsync.kerberos.keytab": "/etc/security/keytabs/rangertagsync.service.keytab",
+            "ranger.tagsync.source.atlas": "false",
+            "ranger.tagsync.source.atlasrest": "false",
+            "ranger.tagsync.source.file": "false",
             "ranger.tagsync.source.file.filename": ""
-        }, 
+        },
         "zoo.cfg": {
-            "clientPort": "2181", 
-            "autopurge.purgeInterval": "24", 
-            "syncLimit": "5", 
-            "dataDir": "/grid/0/hadoop/zookeeper", 
-            "initLimit": "10", 
-            "tickTime": "2000", 
+            "clientPort": "2181",
+            "autopurge.purgeInterval": "24",
+            "syncLimit": "5",
+            "dataDir": "/grid/0/hadoop/zookeeper",
+            "initLimit": "10",
+            "tickTime": "2000",
             "autopurge.snapRetainCount": "30"
-        }, 
+        },
         "hadoop-policy": {
-            "security.job.client.protocol.acl": "*", 
-            "security.job.task.protocol.acl": "*", 
-            "security.datanode.protocol.acl": "*", 
-            "security.namenode.protocol.acl": "*", 
-            "security.client.datanode.protocol.acl": "*", 
-            "security.inter.tracker.protocol.acl": "*", 
-            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop", 
-            "security.client.protocol.acl": "*", 
-            "security.refresh.policy.protocol.acl": "hadoop", 
-            "security.admin.operations.protocol.acl": "hadoop", 
+            "security.job.client.protocol.acl": "*",
+            "security.job.task.protocol.acl": "*",
+            "security.datanode.protocol.acl": "*",
+            "security.namenode.protocol.acl": "*",
+            "security.client.datanode.protocol.acl": "*",
+            "security.inter.tracker.protocol.acl": "*",
+            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
+            "security.client.protocol.acl": "*",
+            "security.refresh.policy.protocol.acl": "hadoop",
+            "security.admin.operations.protocol.acl": "hadoop",
             "security.inter.datanode.protocol.acl": "*"
-        }, 
+        },
         "hdfs-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.lo
 gger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender
 .console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logg
 er}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=
 false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# NameNode metrics logging.\n# The default is to retain two namenode-metrics.log files up to 64MB each.\n#\nnamenode.metrics.logger=INFO,NullAppender\nlog4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}\nlog4j.additivity.NameNodeMetricsLog=false\nlog4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log\nlog4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n\nlog4j.appender.NNMETRICSRFA.MaxBackupIndex=1\nlog4j.appender.NNMETRICSRFA.MaxFileSize=64MB\n\n#\n# mapred audit logging\n#\nmapred
 .audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging leve
 ls\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateCh
 ange=WARN"
-        }, 
+        },
         "krb5-conf": {
-            "domains": "", 
-            "manage_krb5_conf": "true", 
-            "content": "\n[libdefaults]\n  renew_lifetime = 7d\n  forwardable = true\n  default_realm = {{realm}}\n  ticket_lifetime = 24h\n  dns_lookup_realm = false\n  dns_lookup_kdc = false\n  default_ccache_name = /tmp/krb5cc_%{uid}\n  #default_tgs_enctypes = {{encryption_types}}\n  #default_tkt_enctypes = {{encryption_types}}\n{% if domains %}\n[domain_realm]\n{%- for domain in domains.split(',') %}\n  {{domain|trim()}} = {{realm}}\n{%- endfor %}\n{% endif %}\n[logging]\n  default = FILE:/var/log/krb5kdc.log\n  admin_server = FILE:/var/log/kadmind.log\n  kdc = FILE:/var/log/krb5kdc.log\n\n[realms]\n  {{realm}} = {\n{%- if kdc_hosts > 0 -%}\n{%- set kdc_host_list = kdc_hosts.split(',')  -%}\n{%- if kdc_host_list and kdc_host_list|length > 0 %}\n    admin_server = {{admin_server_host|default(kdc_host_list[0]|trim(), True)}}\n{%- if kdc_host_list -%}\n{% for kdc_host in kdc_host_list %}\n    kdc = {{kdc_host|trim()}}\n{%- endfor -%}\n{% endif %}\n{%- endif %}\n{%- endif %}\n  }\n\
 n{# Append additional realm declarations below #}", 
+            "domains": "",
+            "manage_krb5_conf": "true",
+            "content": "\n[libdefaults]\n  renew_lifetime = 7d\n  forwardable = true\n  default_realm = {{realm}}\n  ticket_lifetime = 24h\n  dns_lookup_realm = false\n  dns_lookup_kdc = false\n  default_ccache_name = /tmp/krb5cc_%{uid}\n  #default_tgs_enctypes = {{encryption_types}}\n  #default_tkt_enctypes = {{encryption_types}}\n{% if domains %}\n[domain_realm]\n{%- for domain in domains.split(',') %}\n  {{domain|trim()}} = {{realm}}\n{%- endfor %}\n{% endif %}\n[logging]\n  default = FILE:/var/log/krb5kdc.log\n  admin_server = FILE:/var/log/kadmind.log\n  kdc = FILE:/var/log/krb5kdc.log\n\n[realms]\n  {{realm}} = {\n{%- if kdc_hosts > 0 -%}\n{%- set kdc_host_list = kdc_hosts.split(',')  -%}\n{%- if kdc_host_list and kdc_host_list|length > 0 %}\n    admin_server = {{admin_server_host|default(kdc_host_list[0]|trim(), True)}}\n{%- if kdc_host_list -%}\n{% for kdc_host in kdc_host_list %}\n    kdc = {{kdc_host|trim()}}\n{%- endfor -%}\n{% endif %}\n{%- endif %}\n{%- endif %}\n  }\n\
 n{# Append additional realm declarations below #}",
             "conf_dir": "/etc"
-        }, 
+        },
         "core-site": {
-            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py", 
-            "hadoop.proxyuser.hdfs.groups": "*", 
-            "fs.trash.interval": "360", 
-            "ipc.server.tcpnodelay": "true", 
-            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec", 
-            "ipc.client.idlethreshold": "8000", 
-            "io.file.buffer.size": "131072", 
-            "hadoop.proxyuser.ambari-server-test_cluster01.groups": "*", 
-            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", 
-            "hadoop.security.authentication": "kerberos", 
-            "mapreduce.jobtracker.webinterface.trusted": "false", 
-            "hadoop.proxyuser.hdfs.hosts": "*", 
-            "hadoop.proxyuser.HTTP.groups": "users", 
-            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020", 
-            "ha.failover-controller.active-standby-elector.zk.op.retries": "120", 
-            "hadoop.security.key.provider.path": "kms://http@c6401.ambari.apache.org:9292/kms", 
-            "hadoop.security.authorization": "true", 
-            "hadoop.http.authentication.simple.anonymous.allowed": "true", 
-            "ipc.client.connect.max.retries": "50", 
-            "hadoop.security.auth_to_local": "RULE:[1:$1@$0](ambari-qa-test_cluster01@EXAMPLE.COM)s/.*/ambari-qa/\nRULE:[1:$1@$0](hdfs-test_cluster01@EXAMPLE.COM)s/.*/hdfs/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nRULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](rangeradmin@EXAMPLE.COM)s/.*/ranger/\nRULE:[2:$1@$0](rangertagsync@EXAMPLE.COM)s/.*/rangertagsync/\nRULE:[2:$1@$0](rangerusersync@EXAMPLE.COM)s/.*/rangerusersync/\nDEFAULT", 
-            "hadoop.proxyuser.ambari-server-test_cluster01.hosts": "c6401.ambari.apache.org", 
+            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py",
+            "hadoop.proxyuser.hdfs.groups": "*",
+            "fs.trash.interval": "360",
+            "ipc.server.tcpnodelay": "true",
+            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec",
+            "ipc.client.idlethreshold": "8000",
+            "io.file.buffer.size": "131072",
+            "hadoop.proxyuser.ambari-server-test_cluster01.groups": "*",
+            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
+            "hadoop.security.authentication": "kerberos",
+            "mapreduce.jobtracker.webinterface.trusted": "false",
+            "hadoop.proxyuser.hdfs.hosts": "*",
+            "hadoop.proxyuser.HTTP.groups": "users",
+            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020",
+            "ha.failover-controller.active-standby-elector.zk.op.retries": "120",
+            "hadoop.security.key.provider.path": "kms://http@c6401.ambari.apache.org:9292/kms",
+            "hadoop.security.authorization": "true",
+            "hadoop.http.authentication.simple.anonymous.allowed": "true",
+            "ipc.client.connect.max.retries": "50",
+            "hadoop.security.auth_to_local": "RULE:[1:$1@$0](ambari-qa-test_cluster01@EXAMPLE.COM)s/.*/ambari-qa/\nRULE:[1:$1@$0](hdfs-test_cluster01@EXAMPLE.COM)s/.*/hdfs/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nRULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](rangeradmin@EXAMPLE.COM)s/.*/ranger/\nRULE:[2:$1@$0](rangertagsync@EXAMPLE.COM)s/.*/rangertagsync/\nRULE:[2:$1@$0](rangerusersync@EXAMPLE.COM)s/.*/rangerusersync/\nDEFAULT",
+            "hadoop.proxyuser.ambari-server-test_cluster01.hosts": "c6401.ambari.apache.org",
             "ipc.client.connection.maxidletime": "30000"
-        }, 
+        },
         "hadoop-env": {
-            "keyserver_port": "", 
-            "proxyuser_group": "users", 
-            "hdfs_user_nproc_limit": "65536", 
-            "hdfs_log_dir_prefix": "/var/log/hadoop", 
-            "hdfs_user_nofile_limit": "128000", 
-            "hdfs_user": "hdfs", 
-            "hdfs_principal_name": "hdfs-test_cluster01@EXAMPLE.COM", 
-            "keyserver_host": " ", 
-            "namenode_opt_maxnewsize": "128m", 
-            "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab", 
-            "namenode_opt_maxpermsize": "256m", 
-            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
 {{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
 node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
 NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
 N_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
 ing priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n  for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n  do\n    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n  done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
 ECURE_DN_USER\" ]; then\n  ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}", 
-            "namenode_heapsize": "1024m", 
-            "namenode_opt_newsize": "128m", 
-            "nfsgateway_heapsize": "1024", 
-            "dtnode_heapsize": "1024m", 
-            "hadoop_root_logger": "INFO,RFA", 
-            "hadoop_heapsize": "1024", 
-            "hadoop_pid_dir_prefix": "/var/run/hadoop", 
-            "namenode_opt_permsize": "128m", 
+            "keyserver_port": "",
+            "proxyuser_group": "users",
+            "hdfs_user_nproc_limit": "65536",
+            "hdfs_log_dir_prefix": "/var/log/hadoop",
+            "hdfs_user_nofile_limit": "128000",
+            "hdfs_user": "hdfs",
+            "hdfs_principal_name": "hdfs-test_cluster01@EXAMPLE.COM",
+            "keyserver_host": " ",
+            "namenode_opt_maxnewsize": "128m",
+            "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab",
+            "namenode_opt_maxpermsize": "256m",
+            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
 {{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
 node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
 NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
 N_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
 ing priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n  for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n  do\n    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n  done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
 ECURE_DN_USER\" ]; then\n  ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}",
+            "namenode_heapsize": "1024m",
+            "namenode_opt_newsize": "128m",
+            "nfsgateway_heapsize": "1024",
+            "dtnode_heapsize": "1024m",
+            "hadoop_root_logger": "INFO,RFA",
+            "hadoop_heapsize": "1024",
+            "hadoop_pid_dir_prefix": "/var/run/hadoop",
+            "namenode_opt_permsize": "128m",
             "hdfs_tmp_dir": "/tmp"
-        }, 
+        },
         "zookeeper-log4j": {
             "content": "\n#\n#\n# Licensed to the Apache Softwa

<TRUNCATED>

[02/36] ambari git commit: AMBARI-21413 Move the Log Search ZK config root to the connect string (mgergely)

Posted by lp...@apache.org.
AMBARI-21413 Move the Log Search ZK config root to the connect string (mgergely)

Change-Id: Ia50439cf278556b5bf862c996644f60f3a826b32


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/8e719f79
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/8e719f79
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/8e719f79

Branch: refs/heads/feature-branch-AMBARI-21307
Commit: 8e719f79402c10d529d2006702148acb085bccfe
Parents: 4256067
Author: Miklos Gergely <mg...@hortonworks.com>
Authored: Fri Jul 7 01:54:18 2017 +0200
Committer: Miklos Gergely <mg...@hortonworks.com>
Committed: Fri Jul 7 01:54:18 2017 +0200

----------------------------------------------------------------------
 .../config/zookeeper/LogSearchConfigZK.java     | 39 ++++++++++----------
 1 file changed, 19 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/8e719f79/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/LogSearchConfigZK.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/LogSearchConfigZK.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/LogSearchConfigZK.java
index 6d36203..fdd8ed6 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/LogSearchConfigZK.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/LogSearchConfigZK.java
@@ -97,7 +97,6 @@ public class LogSearchConfigZK implements LogSearchConfig {
   private static final String ZK_ROOT_NODE_PROPERTY = "logsearch.config.zk_root";
 
   private Map<String, String> properties;
-  private String root;
   private CuratorFramework client;
   private TreeCache cache;
   private Gson gson;
@@ -106,29 +105,29 @@ public class LogSearchConfigZK implements LogSearchConfig {
   public void init(Component component, Map<String, String> properties, String clusterName) throws Exception {
     this.properties = properties;
     
-    LOG.info("Connecting to ZooKeeper at " + properties.get(ZK_CONNECT_STRING_PROPERTY));
+    String root = MapUtils.getString(properties, ZK_ROOT_NODE_PROPERTY, DEFAULT_ZK_ROOT);
+    LOG.info("Connecting to ZooKeeper at " + properties.get(ZK_CONNECT_STRING_PROPERTY) + root);
     client = CuratorFrameworkFactory.builder()
-        .connectString(properties.get(ZK_CONNECT_STRING_PROPERTY))
+        .connectString(properties.get(ZK_CONNECT_STRING_PROPERTY) + root)
         .retryPolicy(new ExponentialBackoffRetry(1000, 3))
         .connectionTimeoutMs(CONNECTION_TIMEOUT)
         .sessionTimeoutMs(SESSION_TIMEOUT)
         .build();
     client.start();
 
-    root = MapUtils.getString(properties, ZK_ROOT_NODE_PROPERTY, DEFAULT_ZK_ROOT);
 
     if (component == Component.SERVER) {
-      if (client.checkExists().forPath(root) == null) {
-        client.create().creatingParentContainersIfNeeded().forPath(root);
+      if (client.checkExists().forPath("/") == null) {
+        client.create().creatingParentContainersIfNeeded().forPath("/");
       }
-      cache = new TreeCache(client, root);
+      cache = new TreeCache(client, "/");
       cache.start();
     } else {
-      while (client.checkExists().forPath(root) == null) {
+      while (client.checkExists().forPath("/") == null) {
         LOG.info("Root node is not present yet, going to sleep for " + WAIT_FOR_ROOT_SLEEP_SECONDS + " seconds");
         Thread.sleep(WAIT_FOR_ROOT_SLEEP_SECONDS * 1000);
       }
-      cache = new TreeCache(client, String.format("%s/%s", root, clusterName));
+      cache = new TreeCache(client, String.format("/%s", clusterName));
     }
     
     gson = new GsonBuilder().setDateFormat(DATE_FORMAT).create();
@@ -136,13 +135,13 @@ public class LogSearchConfigZK implements LogSearchConfig {
 
   @Override
   public boolean inputConfigExists(String clusterName, String serviceName) throws Exception {
-    String nodePath = root + "/" + clusterName + "/input/" + serviceName;
+    String nodePath = String.format("/%s/input/%s", clusterName, serviceName);
     return cache.getCurrentData(nodePath) != null;
   }
 
   @Override
   public void createInputConfig(String clusterName, String serviceName, String inputConfig) throws Exception {
-    String nodePath = String.format("%s/%s/input/%s", root, clusterName, serviceName);
+    String nodePath = String.format("/%s/input/%s", clusterName, serviceName);
     try {
       client.create().creatingParentContainersIfNeeded().withACL(getAcls()).forPath(nodePath, inputConfig.getBytes());
       LOG.info("Uploaded input config for the service " + serviceName + " for cluster " + clusterName);
@@ -153,7 +152,7 @@ public class LogSearchConfigZK implements LogSearchConfig {
 
   @Override
   public void setInputConfig(String clusterName, String serviceName, String inputConfig) throws Exception {
-    String nodePath = String.format("%s/%s/input/%s", root, clusterName, serviceName);
+    String nodePath = String.format("/%s/input/%s", clusterName, serviceName);
     client.setData().forPath(nodePath, inputConfig.getBytes());
     LOG.info("Set input config for the service " + serviceName + " for cluster " + clusterName);
   }
@@ -182,7 +181,7 @@ public class LogSearchConfigZK implements LogSearchConfig {
         String nodeData = new String(event.getData().getData());
         Type eventType = event.getType();
         
-        String configPathStab = String.format("%s/%s/", root, clusterName);
+        String configPathStab = String.format("/%s/", clusterName);
         
         if (event.getData().getPath().startsWith(configPathStab + "input/")) {
           handleInputConfigChange(eventType, nodeName, nodeData);
@@ -267,7 +266,7 @@ public class LogSearchConfigZK implements LogSearchConfig {
   }
 
   private void createGlobalConfigNode(JsonArray globalConfigNode, String clusterName) {
-    String globalConfigNodePath = String.format("%s/%s/global", root, clusterName);
+    String globalConfigNodePath = String.format("/%s/global", clusterName);
     String data = InputConfigGson.gson.toJson(globalConfigNode);
     
     try {
@@ -283,14 +282,14 @@ public class LogSearchConfigZK implements LogSearchConfig {
 
   @Override
   public List<String> getServices(String clusterName) {
-    String parentPath = String.format("%s/%s/input", root, clusterName);
+    String parentPath = String.format("/%s/input", clusterName);
     Map<String, ChildData> serviceNodes = cache.getCurrentChildren(parentPath);
     return new ArrayList<String>(serviceNodes.keySet());
   }
 
   @Override
   public String getGlobalConfigs(String clusterName) {
-    String globalConfigNodePath = String.format("%s/%s/global", root, clusterName);
+    String globalConfigNodePath = String.format("/%s/global", clusterName);
     return new String(cache.getCurrentData(globalConfigNodePath).getData());
   }
 
@@ -300,13 +299,13 @@ public class LogSearchConfigZK implements LogSearchConfig {
     JsonArray globalConfigs = (JsonArray) new JsonParser().parse(globalConfigData);
     InputAdapter.setGlobalConfigs(globalConfigs);
     
-    ChildData childData = cache.getCurrentData(String.format("%s/%s/input/%s", root, clusterName, serviceName));
+    ChildData childData = cache.getCurrentData(String.format("/%s/input/%s", clusterName, serviceName));
     return childData == null ? null : InputConfigGson.gson.fromJson(new String(childData.getData()), InputConfigImpl.class);
   }
 
   @Override
   public void createLogLevelFilter(String clusterName, String logId, LogLevelFilter filter) throws Exception {
-    String nodePath = String.format("%s/%s/loglevelfilter/%s", root, clusterName, logId);
+    String nodePath = String.format("/%s/loglevelfilter/%s", clusterName, logId);
     String logLevelFilterJson = gson.toJson(filter);
     try {
       client.create().creatingParentContainersIfNeeded().withACL(getAcls()).forPath(nodePath, logLevelFilterJson.getBytes());
@@ -319,7 +318,7 @@ public class LogSearchConfigZK implements LogSearchConfig {
   @Override
   public void setLogLevelFilters(String clusterName, LogLevelFilterMap filters) throws Exception {
     for (Map.Entry<String, LogLevelFilter> e : filters.getFilter().entrySet()) {
-      String nodePath = String.format("%s/%s/loglevelfilter/%s", root, clusterName, e.getKey());
+      String nodePath = String.format("/%s/loglevelfilter/%s", clusterName, e.getKey());
       String logLevelFilterJson = gson.toJson(e.getValue());
       String currentLogLevelFilterJson = new String(cache.getCurrentData(nodePath).getData());
       if (!logLevelFilterJson.equals(currentLogLevelFilterJson)) {
@@ -331,7 +330,7 @@ public class LogSearchConfigZK implements LogSearchConfig {
 
   @Override
   public LogLevelFilterMap getLogLevelFilters(String clusterName) {
-    String parentPath = String.format("%s/%s/loglevelfilter", root, clusterName);
+    String parentPath = String.format("/%s/loglevelfilter", clusterName);
     Map<String, ChildData> logLevelFilterNodes = cache.getCurrentChildren(parentPath);
     TreeMap<String, LogLevelFilter> filters = new TreeMap<>();
     for (Map.Entry<String, ChildData> e : logLevelFilterNodes.entrySet()) {


[09/36] ambari git commit: AMBARI-21430 - Allow Multiple Versions of Stack Tools to Co-Exist (jonathanhurley)

Posted by lp...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
index 282b542..2f3794d 100644
--- a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
+++ b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
@@ -102,8 +102,12 @@ class RMFTestCase(TestCase):
     else:
       raise RuntimeError("Please specify either config_file_path or config_dict parameter")
 
-    self.config_dict["configurations"]["cluster-env"]["stack_tools"] = RMFTestCase.get_stack_tools()
-    self.config_dict["configurations"]["cluster-env"]["stack_features"] = RMFTestCase.get_stack_features()
+    # add the stack tools & features from the stack if the test case's JSON file didn't have them
+    if "stack_tools" not in self.config_dict["configurations"]["cluster-env"]:
+      self.config_dict["configurations"]["cluster-env"]["stack_tools"] = RMFTestCase.get_stack_tools()
+
+    if "stack_features" not in self.config_dict["configurations"]["cluster-env"]:
+      self.config_dict["configurations"]["cluster-env"]["stack_features"] = RMFTestCase.get_stack_features()
 
     if config_overrides:
       for key, value in config_overrides.iteritems():


[22/36] ambari git commit: AMBARI-21423 Add REST end point for the documentation of the Log Feeder shipper properties (mgergely)

Posted by lp...@apache.org.
AMBARI-21423 Add REST end point for the documentation of the Log Feeder shipper properties (mgergely)

Change-Id: If6d1b66c3a1f74b118ae60a7edc26624d49fb7e6


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/15dd999f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/15dd999f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/15dd999f

Branch: refs/heads/feature-branch-AMBARI-21307
Commit: 15dd999fff99fb80bc65ddfc94513e890a6efdef
Parents: c088289
Author: Miklos Gergely <mg...@hortonworks.com>
Authored: Mon Jul 10 14:51:23 2017 +0200
Committer: Miklos Gergely <mg...@hortonworks.com>
Committed: Mon Jul 10 14:51:23 2017 +0200

----------------------------------------------------------------------
 .../api/ShipperConfigElementDescription.java    |  59 +++++++++++
 .../api/ShipperConfigTypeDescription.java       |  44 ++++++++
 .../model/inputconfig/impl/ConditionsImpl.java  |  13 +++
 .../model/inputconfig/impl/FieldsImpl.java      |  14 +++
 .../inputconfig/impl/FilterDescriptorImpl.java  |  51 ++++++++++
 .../impl/FilterGrokDescriptorImpl.java          |  24 +++++
 .../impl/FilterKeyValueDescriptorImpl.java      |  28 +++++
 .../model/inputconfig/impl/InputConfigImpl.java |  18 ++++
 .../inputconfig/impl/InputDescriptorImpl.java   | 101 +++++++++++++++++++
 .../impl/InputFileBaseDescriptorImpl.java       |  27 +++++
 .../impl/InputS3FileDescriptorImpl.java         |  16 +++
 .../impl/MapAnonymizeDescriptorImpl.java        |  21 +++-
 .../inputconfig/impl/MapDateDescriptorImpl.java |  20 +++-
 .../impl/MapFieldCopyDescriptorImpl.java        |  14 ++-
 .../impl/MapFieldDescriptorImpl.java            |  33 ++++++
 .../impl/MapFieldNameDescriptorImpl.java        |  14 ++-
 .../impl/MapFieldValueDescriptorImpl.java       |  20 +++-
 .../inputconfig/impl/PostMapValuesAdapter.java  |   2 +-
 .../ambari-logsearch-logfeeder/docs/filter.md   |   4 +-
 .../ambari-logsearch-logfeeder/docs/input.md    |  10 +-
 .../docs/postMapValues.md                       |   2 +-
 .../ambari/logfeeder/filter/FilterJSONTest.java |  12 ++-
 .../common/ShipperConfigDescriptionStorage.java |  67 ++++++++++++
 .../ambari/logsearch/doc/DocConstants.java      |   1 +
 .../ambari/logsearch/manager/InfoManager.java   |   9 ++
 .../response/ShipperConfigDescriptionData.java  |  52 ++++++++++
 .../ambari/logsearch/rest/InfoResource.java     |  10 ++
 27 files changed, 667 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/ShipperConfigElementDescription.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/ShipperConfigElementDescription.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/ShipperConfigElementDescription.java
new file mode 100644
index 0000000..d65bf8e
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/ShipperConfigElementDescription.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.logsearch.config.api;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * Marker for the shipper configuration properties.
+ * Can be used to generate documentation about the shipper configs.
+ */
+@Retention(RetentionPolicy.RUNTIME)
+@Target({ElementType.FIELD})
+public @interface ShipperConfigElementDescription {
+
+  /**
+   * The path of the json element.
+   */
+  String path();
+
+  /**
+   * The type of the json element.
+   */
+  String type();
+
+  /**
+   * Describe what the json element is used for.
+   */
+  String description();
+
+  /**
+   * An example value for the element, if applicable.
+   */
+  String[] examples() default {};
+
+  /**
+   * Default value of the json element, if applicable.
+   */
+  String defaultValue() default "";
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/ShipperConfigTypeDescription.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/ShipperConfigTypeDescription.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/ShipperConfigTypeDescription.java
new file mode 100644
index 0000000..1c112d8
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/ShipperConfigTypeDescription.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.logsearch.config.api;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * Marker for the shipper configuration types.
+ * Can be used to generate documentation about the shipper configs.
+ */
+@Retention(RetentionPolicy.RUNTIME)
+@Target({ElementType.TYPE})
+public @interface ShipperConfigTypeDescription {
+
+  /**
+   * The name of the element type.
+   */
+  String name();
+
+  /**
+   * The description of the json element.
+   */
+  String description();
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/ConditionsImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/ConditionsImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/ConditionsImpl.java
index 8bbff8f..2ba472c 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/ConditionsImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/ConditionsImpl.java
@@ -19,11 +19,24 @@
 
 package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
 
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
 import org.apache.ambari.logsearch.config.api.model.inputconfig.Conditions;
 
 import com.google.gson.annotations.Expose;
 
+@ShipperConfigTypeDescription(
+  name = "Conditions",
+  description = "Describes the conditions that should be met in order to match a filter to an input element.\n" +
+                "\n" +
+                "It has the following attributes:"
+)
 public class ConditionsImpl implements Conditions {
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/conditions/fields",
+    type = "json object",
+    description = "The fields in the input element of which's value should be met."
+  )
   @Expose
   private FieldsImpl fields;
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FieldsImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FieldsImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FieldsImpl.java
index 68cd0e2..32a0348 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FieldsImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FieldsImpl.java
@@ -21,11 +21,25 @@ package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
 
 import java.util.Set;
 
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
 import org.apache.ambari.logsearch.config.api.model.inputconfig.Fields;
 
 import com.google.gson.annotations.Expose;
 
+@ShipperConfigTypeDescription(
+    name = "Fields",
+    description = "Describes a the fields which's value should be met in order to match a filter to an input element.\n" +
+                  "\n" +
+                  "It has the following attributes:"
+  )
 public class FieldsImpl implements Fields {
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/conditions/fields/type",
+    type = "list of strings",
+    description = "The acceptable values for the type field in the input element.",
+    examples = {"ambari_server", "\"spark_jobhistory_server\", \"spark_thriftserver\", \"livy_server\""}
+  )
   @Expose
   private Set<String> type;
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterDescriptorImpl.java
index 4e11715..eb9d38c 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterDescriptorImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterDescriptorImpl.java
@@ -22,35 +22,86 @@ package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
 import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterDescriptor;
 import org.apache.ambari.logsearch.config.api.model.inputconfig.PostMapValues;
 
 import com.google.gson.annotations.Expose;
 import com.google.gson.annotations.SerializedName;
 
+@ShipperConfigTypeDescription(
+  name = "Filter",
+  description = "The filter element in the [input configuration](inputConfig.md) contains a list of filter descriptions, each describing one filter applied on an input.\n" +
+                "\n" +
+                "The general elements in the json are the following:"
+)
 public abstract class FilterDescriptorImpl implements FilterDescriptor {
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/filter",
+    type = "string",
+    description = "The type of the filter.",
+    examples = {"grok", "keyvalue", "json"}
+  )
   @Expose
   private String filter;
 
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/conditions",
+    type = "json object",
+    description = "The conditions of which input to filter."
+  )
   @Expose
   private ConditionsImpl conditions;
 
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/sort_order",
+    type = "integer",
+    description = "Describes the order in which the filters should be applied.",
+    examples = {"1", "3"}
+  )
   @Expose
   @SerializedName("sort_order")
   private Integer sortOrder;
 
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/source_field",
+    type = "integer",
+    description = "The source of the filter, must be set for keyvalue filters.",
+    examples = {"field_further_to_filter"},
+    defaultValue = "log_message"
+  )
   @Expose
   @SerializedName("source_field")
   private String sourceField;
 
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/remove_source_field",
+    type = "boolean",
+    description = "Remove the source field after the filter is applied.",
+    examples = {"true", "false"},
+    defaultValue = "false"
+  )
   @Expose
   @SerializedName("remove_source_field")
   private Boolean removeSourceField;
 
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/post_map_values",
+    type = "dictionary string to list of json objects",
+    description = "Mappings done after the filtering provided it's result."
+  )
   @Expose
   @SerializedName("post_map_values")
   private Map<String, List<PostMapValuesImpl>> postMapValues;
 
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/is_enabled",
+    type = "boolean",
+    description = "A flag to show if the filter should be used.",
+    examples = {"true", "false"},
+    defaultValue = "true"
+  )
   @Expose
   @SerializedName("is_enabled")
   private Boolean isEnabled;

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterGrokDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterGrokDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterGrokDescriptorImpl.java
index 995f76b..e140df0 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterGrokDescriptorImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterGrokDescriptorImpl.java
@@ -19,20 +19,44 @@
 
 package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
 
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
 import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterGrokDescriptor;
 
 import com.google.gson.annotations.Expose;
 import com.google.gson.annotations.SerializedName;
 
+@ShipperConfigTypeDescription(
+  name = "Grok Filter",
+  description = "Grok filters have the following additional parameters:"
+)
 public class FilterGrokDescriptorImpl extends FilterDescriptorImpl implements FilterGrokDescriptor {
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/log4j_format",
+    type = "string",
+    description = "The log4j pattern of the log, not used, it is only there for documentation.",
+    examples = {"%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n"}
+  )
   @Expose
   @SerializedName("log4j_format")
   private String log4jFormat;
 
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/multiline_pattern",
+    type = "string",
+    description = "The grok pattern that shows that the line is not a log line on it's own but the part of a multi line entry.",
+    examples = {"^(%{TIMESTAMP_ISO8601:logtime})"}
+  )
   @Expose
   @SerializedName("multiline_pattern")
   private String multilinePattern;
 
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/message_pattern",
+    type = "string",
+    description = "The grok pattern to use to parse the log entry.",
+    examples = {"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}-%{SPACE}%{LOGLEVEL:level}%{SPACE}\\[%{DATA:thread_name}\\@%{INT:line_number}\\]%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}"}
+  )
   @Expose
   @SerializedName("message_pattern")
   private String messagePattern;

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterKeyValueDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterKeyValueDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterKeyValueDescriptorImpl.java
index 8e89990..1c782c5 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterKeyValueDescriptorImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterKeyValueDescriptorImpl.java
@@ -19,20 +19,48 @@
 
 package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
 
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
 import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterKeyValueDescriptor;
 
 import com.google.gson.annotations.Expose;
 import com.google.gson.annotations.SerializedName;
 
+@ShipperConfigTypeDescription(
+    name = "Key-value Filter",
+    description = "value_borders is only used if it is specified, and value_split is not.\n" +
+                  "\n" +
+                  "Key-value filters have the following additional parameters:"
+)
 public class FilterKeyValueDescriptorImpl extends FilterDescriptorImpl implements FilterKeyValueDescriptor {
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/field_split",
+    type = "string",
+    description = "The string that splits the key-value pairs.",
+    examples = {" ", ","},
+    defaultValue = "\\t"
+  )
   @Expose
   @SerializedName("field_split")
   private String fieldSplit;
 
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/value_split",
+    type = "string",
+    description = "The string that separates keys from values.",
+    examples = {":", "->"},
+    defaultValue = "="
+  )
   @Expose
   @SerializedName("value_split")
   private String valueSplit;
 
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/value_borders",
+    type = "string",
+    description = "The borders around the value, must be 2 characters long, first before it, second after it.",
+    examples = {"()", "[]", "{}"}
+  )
   @Expose
   @SerializedName("value_borders")
   private String valueBorders;

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputConfigImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputConfigImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputConfigImpl.java
index a4eba8e..6ce634f 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputConfigImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputConfigImpl.java
@@ -21,16 +21,34 @@ package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
 
 import java.util.List;
 
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
 import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterDescriptor;
 import org.apache.ambari.logsearch.config.api.model.inputconfig.InputConfig;
 import org.apache.ambari.logsearch.config.api.model.inputconfig.InputDescriptor;
 
 import com.google.gson.annotations.Expose;
 
+@ShipperConfigTypeDescription(
+  name = "Input Config",
+  description = "The input configurations are stored in json files. Each of them are describing the processing of the log files of a service.\n" +
+                "\n" +
+                "The json contains two elements:"
+)
 public class InputConfigImpl implements InputConfig {
+  @ShipperConfigElementDescription(
+    path = "/input",
+    type = "list of json objects",
+    description = "A list of input descriptions"
+  )
   @Expose
   private List<InputDescriptorImpl> input;
 
+  @ShipperConfigElementDescription(
+    path = "/filter",
+    type = "list of json objects",
+    description = "A list of filter descriptions"
+  )
   @Expose
   private List<FilterDescriptorImpl> filter;
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputDescriptorImpl.java
index 54b4b9b..cec16c8 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputDescriptorImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputDescriptorImpl.java
@@ -21,59 +21,160 @@ package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
 
 import java.util.Map;
 
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
 import org.apache.ambari.logsearch.config.api.model.inputconfig.InputDescriptor;
 
 import com.google.gson.annotations.Expose;
 import com.google.gson.annotations.SerializedName;
 
+@ShipperConfigTypeDescription(
+  name = "Input",
+  description = "The input element in the input configuration contains a list of input descriptions, each describing one source of input.\n" +
+                "\n" +
+                "The general elements in the json are the following:"
+)
 public abstract class InputDescriptorImpl implements InputDescriptor {
+  @ShipperConfigElementDescription(
+    path = "/input/[]/type",
+    type = "string",
+    description = "The log id for this source.",
+    examples = {"zookeeper", "ambari_server"}
+  )
   @Expose
   private String type;
 
+  @ShipperConfigElementDescription(
+    path = "/input/[]/rowtype",
+    type = "string",
+    description = "The type of the row.",
+    examples = {"service", "audit"}
+  )
   @Expose
   private String rowtype;
 
+  @ShipperConfigElementDescription(
+    path = "/input/[]/path",
+    type = "string",
+    description = "The path of the source, may contain '*' characters too.",
+    examples = {"/var/log/ambari-logsearch-logfeeder/logsearch-logfeeder.json", "/var/log/zookeeper/zookeeper*.log"}
+  )
   @Expose
   private String path;
 
+  @ShipperConfigElementDescription(
+    path = "/input/[]/add_fields",
+    type = "dictionary",
+    description = "The element contains field_name: field_value pairs which will be added to each rows data.",
+    examples = {"\"cluster\":\"cluster_name\""}
+  )
   @Expose
   @SerializedName("add_fields")
   private Map<String, String> addFields;
   
+  @ShipperConfigElementDescription(
+    path = "/input/[]/source",
+    type = "dictionary",
+    description = "The type of the input source.",
+    examples = {"file", "s3_file"}
+  )
   @Expose
   private String source;
   
+  @ShipperConfigElementDescription(
+    path = "/input/[]/tail",
+    type = "boolean",
+    description = "The input should check for only the latest file matching the pattern, not all of them.",
+    examples = {"true", "false"},
+    defaultValue = "true"
+  )
   @Expose
   private Boolean tail;
   
+  @ShipperConfigElementDescription(
+    path = "/input/[]/gen_event_md5",
+    type = "boolean",
+    description = "Generate an event_md5 field for each row by creating a hash of the row data.",
+    examples = {"true", "false"},
+    defaultValue = "true"
+  )
   @Expose
   @SerializedName("gen_event_md5")
   private Boolean genEventMd5;
   
+  @ShipperConfigElementDescription(
+    path = "/input/[]/use_event_md5_as_id",
+    type = "boolean",
+    description = "Generate an id for each row by creating a hash of the row data.",
+    examples = {"true", "false"},
+    defaultValue = "false"
+  )
   @Expose
   @SerializedName("use_event_md5_as_id")
   private Boolean useEventMd5AsId;
 
+  @ShipperConfigElementDescription(
+    path = "/input/[]/cache_enabled",
+    type = "boolean",
+    description = "Allows the input to use a cache to filter out duplications.",
+    examples = {"true", "false"},
+    defaultValue = "false"
+  )
   @Expose
   @SerializedName("cache_enabled")
   private Boolean cacheEnabled;
 
+  @ShipperConfigElementDescription(
+    path = "/input/[]/cache_key_field",
+    type = "string",
+    description = "Specifies the field for which to use the cache to find duplications of.",
+    examples = {"some_field_prone_to_repeating_value"},
+    defaultValue = "log_message"
+  )
   @Expose
   @SerializedName("cache_key_field")
   private String cacheKeyField;
 
+  @ShipperConfigElementDescription(
+    path = "/input/[]/cache_last_dedup_enabled",
+    type = "boolean",
+    description = "Allow to filter out entries which are same as the most recent one irrelevant of it's time.",
+    examples = {"true", "false"},
+    defaultValue = "false"
+  )
   @Expose
   @SerializedName("cache_last_dedup_enabled")
   private Boolean cacheLastDedupEnabled;
 
+  @ShipperConfigElementDescription(
+    path = "/input/[]/cache_size",
+    type = "integer",
+    description = "The number of entries to store in the cache.",
+    examples = {"50"},
+    defaultValue = "100"
+  )
   @Expose
   @SerializedName("cache_size")
   private Integer cacheSize;
 
+  @ShipperConfigElementDescription(
+    path = "/input/[]/cache_dedup_interval",
+    type = "integer",
+    description = "The maximum interval in ms which may pass between two identical log messages to filter the latter out.",
+    examples = {"500"},
+    defaultValue = "1000"
+  )
   @Expose
   @SerializedName("cache_dedup_interval")
   private Long cacheDedupInterval;
 
+  @ShipperConfigElementDescription(
+    path = "/input/[]/is_enabled",
+    type = "boolean",
+    description = "A flag to show if the input should be used.",
+    examples = {"true", "false"},
+    defaultValue = "true"
+  )
   @Expose
   @SerializedName("is_enabled")
   private Boolean isEnabled;

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputFileBaseDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputFileBaseDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputFileBaseDescriptorImpl.java
index 51c7ec8..8281daa 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputFileBaseDescriptorImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputFileBaseDescriptorImpl.java
@@ -19,20 +19,47 @@
 
 package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
 
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
 import org.apache.ambari.logsearch.config.api.model.inputconfig.InputFileBaseDescriptor;
 
 import com.google.gson.annotations.Expose;
 import com.google.gson.annotations.SerializedName;
 
+@ShipperConfigTypeDescription(
+  name = "File Input",
+  description = "File inputs have some additional parameters:"
+)
 public class InputFileBaseDescriptorImpl extends InputDescriptorImpl implements InputFileBaseDescriptor {
+  @ShipperConfigElementDescription(
+    path = "/input/[]/checkpoint_interval_ms",
+    type = "integer",
+    description = "The time interval in ms when the checkpoint file should be updated.",
+    examples = {"10000"},
+    defaultValue = "5000"
+  )
   @Expose
   @SerializedName("checkpoint_interval_ms")
   private Integer checkpointIntervalMs;
 
+  @ShipperConfigElementDescription(
+    path = "/input/[]/process_file",
+    type = "boolean",
+    description = "Should the file be processed.",
+    examples = {"true", "false"},
+    defaultValue = "true"
+  )
   @Expose
   @SerializedName("process_file")
   private Boolean processFile;
 
+  @ShipperConfigElementDescription(
+    path = "/input/[]/copy_file",
+    type = "boolean",
+    description = "Should the file be copied (only if not processed).",
+    examples = {"true", "false"},
+    defaultValue = "false"
+  )
   @Expose
   @SerializedName("copy_file")
   private Boolean copyFile;

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputS3FileDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputS3FileDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputS3FileDescriptorImpl.java
index 277a57c..19f52d3 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputS3FileDescriptorImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputS3FileDescriptorImpl.java
@@ -19,16 +19,32 @@
 
 package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
 
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
 import org.apache.ambari.logsearch.config.api.model.inputconfig.InputS3FileDescriptor;
 
 import com.google.gson.annotations.Expose;
 import com.google.gson.annotations.SerializedName;
 
+@ShipperConfigTypeDescription(
+  name = "S3 File Input",
+  description = "S3 file inputs have the following parameters in addition to the general file parameters:"
+)
 public class InputS3FileDescriptorImpl extends InputFileBaseDescriptorImpl implements InputS3FileDescriptor {
+  @ShipperConfigElementDescription(
+    path = "/input/[]/s3_access_key",
+    type = "string",
+    description = "The access key used for AWS credentials."
+  )
   @Expose
   @SerializedName("s3_access_key")
   private String s3AccessKey;
 
+  @ShipperConfigElementDescription(
+    path = "/input/[]/s3_secret_key",
+    type = "string",
+    description = "The secret key used for AWS credentials."
+  )
   @Expose
   @SerializedName("s3_secret_key")
   private String s3SecretKey;

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapAnonymizeDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapAnonymizeDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapAnonymizeDescriptorImpl.java
index 5fdbbab..8c128de 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapAnonymizeDescriptorImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapAnonymizeDescriptorImpl.java
@@ -19,20 +19,39 @@
 
 package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
 
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
 import org.apache.ambari.logsearch.config.api.model.inputconfig.MapAnonymizeDescriptor;
 
 import com.google.gson.annotations.Expose;
 import com.google.gson.annotations.SerializedName;
 
-public class MapAnonymizeDescriptorImpl implements MapAnonymizeDescriptor {
+@ShipperConfigTypeDescription(
+    name = "Map Anonymize",
+    description = "The name of the mapping element should be map_anonymize. The value json element should contain the following parameter:"
+)
+public class MapAnonymizeDescriptorImpl extends MapFieldDescriptorImpl implements MapAnonymizeDescriptor {
   @Override
   public String getJsonName() {
     return "map_anonymize";
   }
 
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/post_map_values/{field_name}/[]/map_anonymize/pattern",
+    type = "string",
+    description = "The pattern to use to identify parts to anonymize. The parts to hide should be marked with the \"<hide>\" string.",
+    examples = {"Some secret is here: <hide>, and another one is here: <hide>"}
+  )
   @Expose
   private String pattern;
 
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/post_map_values/{field_name}/[]/map_anonymize/hide_char",
+    type = "string",
+    description = "The character to hide with",
+    defaultValue = "*",
+    examples = {"X", "-"}
+  )
   @Expose
   @SerializedName("hide_char")
   private Character hideChar;

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapDateDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapDateDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapDateDescriptorImpl.java
index 2e54e7a..feec4b6 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapDateDescriptorImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapDateDescriptorImpl.java
@@ -19,21 +19,39 @@
 
 package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
 
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
 import org.apache.ambari.logsearch.config.api.model.inputconfig.MapDateDescriptor;
 
 import com.google.gson.annotations.Expose;
 import com.google.gson.annotations.SerializedName;
 
-public class MapDateDescriptorImpl implements MapDateDescriptor {
+@ShipperConfigTypeDescription(
+    name = "Map Date",
+    description = "The name of the mapping element should be map_date. The value json element may contain the following parameters:"
+)
+public class MapDateDescriptorImpl extends MapFieldDescriptorImpl implements MapDateDescriptor {
   @Override
   public String getJsonName() {
     return "map_date";
   }
 
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/post_map_values/{field_name}/[]/map_date/src_date_pattern",
+    type = "string",
+    description = "If it is specified than the mapper converts from this format to the target, and also adds missing year",
+    examples = {"MMM dd HH:mm:ss"}
+  )
   @Expose
   @SerializedName("src_date_pattern")
   private String sourceDatePattern;
 
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/post_map_values/{field_name}/[]/map_date/target_date_pattern",
+    type = "string",
+    description = "If 'epoch' then the field is parsed as seconds from 1970, otherwise the content used as pattern",
+    examples = {"yyyy-MM-dd HH:mm:ss,SSS", "epoch"}
+  )
   @Expose
   @SerializedName("target_date_pattern")
   private String targetDatePattern;

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldCopyDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldCopyDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldCopyDescriptorImpl.java
index 4a8d746..e7b8fdf 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldCopyDescriptorImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldCopyDescriptorImpl.java
@@ -19,17 +19,29 @@
 
 package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
 
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
 import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldCopyDescriptor;
 
 import com.google.gson.annotations.Expose;
 import com.google.gson.annotations.SerializedName;
 
-public class MapFieldCopyDescriptorImpl implements MapFieldCopyDescriptor {
+@ShipperConfigTypeDescription(
+    name = "Map Copy",
+    description = "The name of the mapping element should be map_copy. The value json element should contain the following parameter:"
+)
+public class MapFieldCopyDescriptorImpl extends MapFieldDescriptorImpl implements MapFieldCopyDescriptor {
   @Override
   public String getJsonName() {
     return "map_fieldcopy";
   }
 
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/post_map_values/{field_name}/[]/map_copy/copy_name",
+    type = "string",
+    description = "The name of the copied field",
+    examples = {"new_name"}
+  )
   @Expose
   @SerializedName("copy_name")
   private String copyName;

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldDescriptorImpl.java
new file mode 100644
index 0000000..101e0d4
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldDescriptorImpl.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldDescriptor;
+
+@ShipperConfigTypeDescription(
+    name = "Post Map Values",
+    description = "The Post Map Values element in the [filter](filter.md) field names as keys, the values are lists of sets of " +
+                  "post map values, each describing one mapping done on a field named before obtained after filtering.\n" +
+                  "\n" +
+                  "Currently there are four kind of mappings are supported:"
+  )
+public abstract class MapFieldDescriptorImpl implements MapFieldDescriptor {
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldNameDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldNameDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldNameDescriptorImpl.java
index bd32018..e1b71e6 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldNameDescriptorImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldNameDescriptorImpl.java
@@ -19,17 +19,29 @@
 
 package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
 
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
 import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldNameDescriptor;
 
 import com.google.gson.annotations.Expose;
 import com.google.gson.annotations.SerializedName;
 
-public class MapFieldNameDescriptorImpl implements MapFieldNameDescriptor {
+@ShipperConfigTypeDescription(
+    name = "Map Field Name",
+    description = "The name of the mapping element should be map_fieldname. The value json element should contain the following parameter:"
+)
+public class MapFieldNameDescriptorImpl extends MapFieldDescriptorImpl implements MapFieldNameDescriptor {
   @Override
   public String getJsonName() {
     return "map_fieldname";
   }
 
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/post_map_values/{field_name}/[]/map_fieldname/new_field_name",
+    type = "string",
+    description = "The name of the renamed field",
+    examples = {"new_name"}
+  )
   @Expose
   @SerializedName("new_field_name")
   private String newFieldName;

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldValueDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldValueDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldValueDescriptorImpl.java
index 599e152..a80a994 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldValueDescriptorImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldValueDescriptorImpl.java
@@ -19,21 +19,39 @@
 
 package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
 
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
 import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldValueDescriptor;
 
 import com.google.gson.annotations.Expose;
 import com.google.gson.annotations.SerializedName;
 
-public class MapFieldValueDescriptorImpl implements MapFieldValueDescriptor {
+@ShipperConfigTypeDescription(
+    name = "Map Field Value",
+    description = "The name of the mapping element should be map_fieldvalue. The value json element should contain the following parameter:"
+)
+public class MapFieldValueDescriptorImpl extends MapFieldDescriptorImpl implements MapFieldValueDescriptor {
   @Override
   public String getJsonName() {
     return "map_fieldvalue";
   }
 
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/post_map_values/{field_name}/[]/map_fieldvalue/pre_value",
+    type = "string",
+    description = "The value that the field must match (ignoring case) to be mapped",
+    examples = {"old_value"}
+  )
   @Expose
   @SerializedName("pre_value")
   private String preValue;
 
+  @ShipperConfigElementDescription(
+      path = "/filter/[]/post_map_values/{field_name}/[]/map_fieldvalue/post_value",
+      type = "string",
+      description = "The value to which the field is modified to",
+      examples = {"new_value"}
+    )
   @Expose
   @SerializedName("post_value")
   private String postValue;

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/PostMapValuesAdapter.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/PostMapValuesAdapter.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/PostMapValuesAdapter.java
index 3c21fd8..e3f9886 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/PostMapValuesAdapter.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/PostMapValuesAdapter.java
@@ -95,7 +95,7 @@ public class PostMapValuesAdapter implements JsonDeserializer<List<PostMapValues
   private JsonElement createMapperObject(PostMapValuesImpl postMapValues, JsonSerializationContext context) {
     JsonObject jsonObject = new JsonObject();
     for (MapFieldDescriptor m : postMapValues.getMappers()) {
-      jsonObject.add(((MapFieldDescriptor)m).getJsonName(), context.serialize(m));
+      jsonObject.add(((MapFieldDescriptorImpl)m).getJsonName(), context.serialize(m));
     }
     return jsonObject;
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-logfeeder/docs/filter.md
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/docs/filter.md b/ambari-logsearch/ambari-logsearch-logfeeder/docs/filter.md
index 129279b..d825290 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/docs/filter.md
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/docs/filter.md
@@ -48,6 +48,8 @@ Grok filters have the following additional parameters:
 
 ## Key-value Filter
 
+value\_borders is only used if it is specified, and value\_split is not.
+
 Key-value filters have the following additional parameters:
 
 | Field          | Description                                                                               | Default |
@@ -56,4 +58,4 @@ Key-value filters have the following additional parameters:
 | value\_split   | The string that separates keys from values                                                | "="     |
 | value\_borders | The borders around the value, must be 2 characters long, first before it, second after it | -       |
 
-If value\_borders is only used if it is specified, and value\_split is not.
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-logfeeder/docs/input.md
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/docs/input.md b/ambari-logsearch/ambari-logsearch-logfeeder/docs/input.md
index 661eeb8..1a9ce8d 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/docs/input.md
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/docs/input.md
@@ -20,20 +20,18 @@ limitations under the License.
 # Input
 
 The input element in the [input configuration](inputConfig.md) contains a list of input descriptions, each describing one source
-of input.
-
-The general elements in the json are the following:
+of input. The general elements in the json are the following:
 
 | Field                       | Description                                                                                           | Default      |
 |-----------------------------|-------------------------------------------------------------------------------------------------------|--------------|
-| type                        | The type of the input source, currently file and s3_file are supported                                | -            |
+| type                        | The log id for this source                                                                            | -            |
 | rowtype                     | The type of the row, can be service / audit                                                           | -            |
 | path                        | The path of the source, may contain '*' characters too                                                | -            |
 | add\_fields                 | The element contains field\_name: field\_value pairs which will be added to each rows data            | -            |
+| source                      | The type of the input source, currently file and s3_file are supported                                | -            |
 | tail                        | The input should check for only the latest file matching the pattern, not all of them                 | true         |
 | gen\_event\_md5             | Generate an event\_md5 field for each row by creating a hash of the row data                          | true         |
 | use\_event\_md5\_as\_id     | Generate an id for each row by creating a hash of the row data                                        | false        |
-| start\_position             | Should the parsing start from the beginning                                                           | beginning    |
 | cache\_enabled              | Allows the input to use a cache to filter out duplications                                            | true         |
 | cache\_key\_field           | Specifies the field for which to use the cache to find duplications of                                | log\_message |
 | cache\_last\_dedup\_enabled | Allow to filter out entries which are same as the most recent one irrelevant of it's time             | false        |
@@ -44,7 +42,7 @@ The general elements in the json are the following:
 
 ## File Input
 
-File inputs have the following parameters too:
+File inputs have some additional parameters:
 
 | Field                    | Description                                                        | Default |
 |--------------------------|--------------------------------------------------------------------|---------|

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-logfeeder/docs/postMapValues.md
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/docs/postMapValues.md b/ambari-logsearch/ambari-logsearch-logfeeder/docs/postMapValues.md
index 7ec439a..bc219df 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/docs/postMapValues.md
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/docs/postMapValues.md
@@ -67,4 +67,4 @@ The name of the mapping element should be map\_anonymize. The value json element
 | Field      | Description                                                                                                     |
 |------------|-----------------------------------------------------------------------------------------------------------------|
 | pattern    | The pattern to use to identify parts to anonymize. The parts to hide should be marked with the "<hide>" string. |
-| hide\_char | The character to hide with, if it is not specified then the default is 'X'                                      |
+| hide\_char | The character to hide with, if it is not specified then the default is '*'                                      |

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterJSONTest.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterJSONTest.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterJSONTest.java
index 7abf177..acc3d4d 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterJSONTest.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterJSONTest.java
@@ -131,13 +131,15 @@ public class FilterJSONTest {
   @Test
   public void testJSONFilterCode_invalidJson() throws Exception {
     LOG.info("testJSONFilterCode_invalidJson()");
+    
     init(new FilterJsonDescriptorImpl());
-    String inputStr="invalid json";
+    
+    String inputStr = "invalid json";
     try{
-    filterJson.apply(inputStr,new InputMarker(null, null, 0));
-    fail("Expected LogFeederException was not occured");
-    }catch(LogFeederException logFeederException){
-      assertEquals("Json parsing failed for inputstr = "+inputStr, logFeederException.getLocalizedMessage());
+      filterJson.apply(inputStr,new InputMarker(null, null, 0));
+      fail("Expected LogFeederException was not occured");
+    } catch(LogFeederException logFeederException) {
+      assertEquals("Json parsing failed for inputstr = " + inputStr, logFeederException.getLocalizedMessage());
     }
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/common/ShipperConfigDescriptionStorage.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/common/ShipperConfigDescriptionStorage.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/common/ShipperConfigDescriptionStorage.java
new file mode 100644
index 0000000..7d4bc2c
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/common/ShipperConfigDescriptionStorage.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.logsearch.common;
+
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.model.response.ShipperConfigDescriptionData;
+import org.reflections.Reflections;
+import org.reflections.scanners.FieldAnnotationsScanner;
+
+import javax.annotation.PostConstruct;
+import javax.inject.Named;
+import java.lang.reflect.Field;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+
+@Named
+public class ShipperConfigDescriptionStorage {
+
+  private static final String SHIPPER_CONFIG_PACKAGE = "org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl";
+  
+  private final List<ShipperConfigDescriptionData> shipperConfigDescription = new ArrayList<>();
+
+  @PostConstruct
+  public void postConstruct() {
+    Thread loadShipperConfigDescriptionThread = new Thread("load_shipper_config_description") {
+      @Override
+      public void run() {
+        fillShipperConfigDescriptions();
+      }
+    };
+    loadShipperConfigDescriptionThread.setDaemon(true);
+    loadShipperConfigDescriptionThread.start();
+  }
+
+  public List<ShipperConfigDescriptionData> getShipperConfigDescription() {
+    return shipperConfigDescription;
+  }
+
+  private void fillShipperConfigDescriptions() {
+    Reflections reflections = new Reflections(SHIPPER_CONFIG_PACKAGE, new FieldAnnotationsScanner());
+    Set<Field> fields = reflections.getFieldsAnnotatedWith(ShipperConfigElementDescription.class);
+    for (Field field : fields) {
+      ShipperConfigElementDescription description = field.getAnnotation(ShipperConfigElementDescription.class);
+      shipperConfigDescription.add(new ShipperConfigDescriptionData(description.path(), description.description(),
+          description.examples(), description.defaultValue()));
+    }
+    
+    shipperConfigDescription.sort((o1, o2) -> o1.getPath().compareTo(o2.getPath()));
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/doc/DocConstants.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/doc/DocConstants.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/doc/DocConstants.java
index 6d1382d..da0a8bb 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/doc/DocConstants.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/doc/DocConstants.java
@@ -104,6 +104,7 @@ public class DocConstants {
     public static final String GET_AUTH_DETAILS_OD = "Get authentication details.";
     public static final String GET_ALL_PROPERTIES_INFO_OD = "List all available properties for Log Search and Log Feeder";
     public static final String GET_LOGSEARCH_PROPERTIES_INFO_OD = "List all available properties for Log Search property file (e.g: logsearch.properties/logfeeder.properties)";
+    public static final String GET_ALL_SHIPPER_CONFIG_INFO_OD = "List all available shipper configuration element";
   }
 
   public class EventHistoryDescriptions {

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/InfoManager.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/InfoManager.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/InfoManager.java
index f6d0449..2f63492 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/InfoManager.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/InfoManager.java
@@ -25,7 +25,9 @@ import java.util.Map;
 
 import org.apache.ambari.logsearch.conf.AuthPropsConfig;
 import org.apache.ambari.logsearch.common.PropertyDescriptionStorage;
+import org.apache.ambari.logsearch.common.ShipperConfigDescriptionStorage;
 import org.apache.ambari.logsearch.model.response.PropertyDescriptionData;
+import org.apache.ambari.logsearch.model.response.ShipperConfigDescriptionData;
 
 import javax.inject.Inject;
 import javax.inject.Named;
@@ -39,6 +41,9 @@ public class InfoManager extends JsonManagerBase {
   @Inject
   private PropertyDescriptionStorage propertyDescriptionStore;
 
+  @Inject
+  private ShipperConfigDescriptionStorage shipperConfigDescriptionStore;
+
   public Map<String, Boolean> getAuthMap() {
     Map<String, Boolean> authMap = new HashMap<>();
     authMap.put("external", authPropsConfig.isAuthExternalEnabled());
@@ -56,4 +61,8 @@ public class InfoManager extends JsonManagerBase {
   public List<PropertyDescriptionData> getLogSearchPropertyDescriptions(String propertiesFile) {
     return getPropertyDescriptions().get(propertiesFile);
   }
+  
+  public List<ShipperConfigDescriptionData> getLogSearchShipperConfigDescription() {
+    return shipperConfigDescriptionStore.getShipperConfigDescription();
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/response/ShipperConfigDescriptionData.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/response/ShipperConfigDescriptionData.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/response/ShipperConfigDescriptionData.java
new file mode 100644
index 0000000..91f7420
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/response/ShipperConfigDescriptionData.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.logsearch.model.response;
+
+public class ShipperConfigDescriptionData {
+  private final String path;
+
+  private final String description;
+
+  private final String[] examples;
+
+  private final String defaultValue;
+
+  public ShipperConfigDescriptionData(String path, String description, String[] examples, String defaultValue) {
+    this.path = path;
+    this.description = description;
+    this.examples = examples;
+    this.defaultValue = defaultValue;
+  }
+
+  public String getPath() {
+    return path;
+  }
+
+  public String getDescription() {
+    return description;
+  }
+
+  public String[] getExamples() {
+    return examples;
+  }
+
+  public String getDefaultValue() {
+    return defaultValue;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/InfoResource.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/InfoResource.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/InfoResource.java
index 6ea0bab..e49be90 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/InfoResource.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/InfoResource.java
@@ -29,12 +29,14 @@ import io.swagger.annotations.Api;
 import io.swagger.annotations.ApiOperation;
 import org.apache.ambari.logsearch.manager.InfoManager;
 import org.apache.ambari.logsearch.model.response.PropertyDescriptionData;
+import org.apache.ambari.logsearch.model.response.ShipperConfigDescriptionData;
 import org.springframework.context.annotation.Scope;
 
 import java.util.List;
 import java.util.Map;
 
 import static org.apache.ambari.logsearch.doc.DocConstants.PublicOperationDescriptions.GET_ALL_PROPERTIES_INFO_OD;
+import static org.apache.ambari.logsearch.doc.DocConstants.PublicOperationDescriptions.GET_ALL_SHIPPER_CONFIG_INFO_OD;
 import static org.apache.ambari.logsearch.doc.DocConstants.PublicOperationDescriptions.GET_LOGSEARCH_PROPERTIES_INFO_OD;
 import static org.apache.ambari.logsearch.doc.DocConstants.PublicOperationDescriptions.GET_AUTH_DETAILS_OD;
 
@@ -70,4 +72,12 @@ public class InfoResource {
   public List<PropertyDescriptionData> getPropertyFileDescription(@PathParam("propertyFile") String propertyFile) {
     return infoManager.getLogSearchPropertyDescriptions(propertyFile);
   }
+
+  @GET
+  @Path("/shipperconfig")
+  @Produces({"application/json"})
+  @ApiOperation(GET_ALL_SHIPPER_CONFIG_INFO_OD)
+  public List<ShipperConfigDescriptionData> getShipperConfigDescription() {
+    return infoManager.getLogSearchShipperConfigDescription();
+  }
 }


[32/36] ambari git commit: AMBARI-21445. Fixes the following bugs : (1). Make Hive Kerberos keytab files group non-readable (2). HiveServer2 Authentication via LDAP to work correctly (3). Remove leading while spaces for the hive-env and hive-interactive-

Posted by lp...@apache.org.
AMBARI-21445. Fixes the following bugs : (1). Make Hive Kerberos keytab files group non-readable (2). HiveServer2 Authentication via LDAP to work correctly (3). Remove leading while spaces for the hive-env and hive-interactive-env template.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/eb3d3ea6
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/eb3d3ea6
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/eb3d3ea6

Branch: refs/heads/feature-branch-AMBARI-21307
Commit: eb3d3ea6e5eb9464a135f851658d4aa5b3988efa
Parents: 9f788c3
Author: Swapan Shridhar <ss...@hortonworks.com>
Authored: Tue Jul 11 15:37:08 2017 -0700
Committer: Swapan Shridhar <ss...@hortonworks.com>
Committed: Wed Jul 12 11:55:44 2017 -0700

----------------------------------------------------------------------
 .../0.12.0.2.0/package/scripts/params_linux.py  |   4 +
 .../0.12.0.2.0/package/scripts/service_check.py |   3 +-
 .../services/HIVE/configuration/hive-env.xml    |  78 +++++-----
 .../HIVE/configuration/hive-interactive-env.xml |  62 ++++----
 .../stacks/HDP/2.6/services/HIVE/kerberos.json  | 151 +++++++++++++++++++
 .../stacks/HDP/2.6/services/YARN/kerberos.json  |   2 +-
 6 files changed, 228 insertions(+), 72 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/eb3d3ea6/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
index 21b3d8b..9939536 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
@@ -849,3 +849,7 @@ ranger_hive_metastore_lookup = default('/configurations/ranger-hive-plugin-prope
 
 if security_enabled:
   hive_metastore_principal_with_host = hive_metastore_principal.replace('_HOST', hostname.lower())
+
+# For ldap - hive_check
+hive_ldap_user= config['configurations']['hive-env'].get('alert_ldap_username','')
+hive_ldap_passwd=config['configurations']['hive-env'].get('alert_ldap_password','')
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/eb3d3ea6/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/service_check.py
index d144c34..271fff9 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/service_check.py
@@ -123,7 +123,8 @@ class HiveServiceCheckDefault(HiveServiceCheck):
                                params.hive_server_principal, kinit_cmd, params.smokeuser,
                                transport_mode=params.hive_transport_mode, http_endpoint=params.hive_http_endpoint,
                                ssl=params.hive_ssl, ssl_keystore=ssl_keystore,
-                               ssl_password=ssl_password)
+                               ssl_password=ssl_password, ldap_username=params.hive_ldap_user,
+                               ldap_password=params.hive_ldap_passwd)
         Logger.info("Successfully connected to {0} on port {1}".format(address, server_port))
         workable_server_available = True
       except:

http://git-wip-us.apache.org/repos/asf/ambari/blob/eb3d3ea6/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-env.xml
index a6cf1bc..929c10d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-env.xml
@@ -60,56 +60,56 @@
     <display-name>hive-env template</display-name>
     <description>This is the jinja template for hive-env.sh file</description>
     <value>
-      export HADOOP_USER_CLASSPATH_FIRST=true  #this prevents old metrics libs from mapreduce lib from bringing in old jar deps overriding HIVE_LIB
-      if [ "$SERVICE" = "cli" ]; then
-      if [ -z "$DEBUG" ]; then
-      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseNUMA -XX:+UseParallelGC -XX:-UseGCOverheadLimit"
-      else
-      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
-      fi
-      fi
+export HADOOP_USER_CLASSPATH_FIRST=true  #this prevents old metrics libs from mapreduce lib from bringing in old jar deps overriding HIVE_LIB
+if [ "$SERVICE" = "cli" ]; then
+  if [ -z "$DEBUG" ]; then
+    export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseNUMA -XX:+UseParallelGC -XX:-UseGCOverheadLimit"
+  else
+    export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+  fi
+fi
 
-      # The heap size of the jvm stared by hive shell script can be controlled via:
+# The heap size of the jvm stared by hive shell script can be controlled via:
 
-      if [ "$SERVICE" = "metastore" ]; then
-      export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore
-      else
-      export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client
-      fi
+if [ "$SERVICE" = "metastore" ]; then
+  export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore
+else
+  export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client
+fi
 
-      export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m"
-      export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS{{heap_dump_opts}}"
+export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m"
+export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS{{heap_dump_opts}}"
 
-      # Larger heap size may be required when running queries over large number of files or partitions.
-      # By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
-      # appropriate for hive server (hwi etc).
+# Larger heap size may be required when running queries over large number of files or partitions.
+# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
+# appropriate for hive server (hwi etc).
 
 
-      # Set HADOOP_HOME to point to a specific hadoop install directory
-      HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+# Set HADOOP_HOME to point to a specific hadoop install directory
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
 
-      export HIVE_HOME=${HIVE_HOME:-{{hive_home_dir}}}
+export HIVE_HOME=${HIVE_HOME:-{{hive_home_dir}}}
 
-      # Hive Configuration Directory can be controlled by:
-      export HIVE_CONF_DIR=${HIVE_CONF_DIR:-{{hive_config_dir}}}
+# Hive Configuration Directory can be controlled by:
+export HIVE_CONF_DIR=${HIVE_CONF_DIR:-{{hive_config_dir}}}
 
-      # Folder containing extra libraries required for hive compilation/execution can be controlled by:
-      if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
-        if [ -f "${HIVE_AUX_JARS_PATH}" ]; then
-          export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
-        elif [ -d "/usr/hdp/current/hive-webhcat/share/hcatalog" ]; then
-          export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar
-        fi
-      elif [ -d "/usr/hdp/current/hive-webhcat/share/hcatalog" ]; then
-        export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar
-      fi
+# Folder containing extra libraries required for hive compilation/execution can be controlled by:
+if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
+  if [ -f "${HIVE_AUX_JARS_PATH}" ]; then
+    export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
+  elif [ -d "/usr/hdp/current/hive-webhcat/share/hcatalog" ]; then
+    export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar
+  fi
+elif [ -d "/usr/hdp/current/hive-webhcat/share/hcatalog" ]; then
+  export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar
+fi
 
-      export METASTORE_PORT={{hive_metastore_port}}
+export METASTORE_PORT={{hive_metastore_port}}
 
-      {% if sqla_db_used or lib_dir_available %}
-      export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:{{jdbc_libs_dir}}"
-      export JAVA_LIBRARY_PATH="$JAVA_LIBRARY_PATH:{{jdbc_libs_dir}}"
-      {% endif %}
+{% if sqla_db_used or lib_dir_available %}
+export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:{{jdbc_libs_dir}}"
+export JAVA_LIBRARY_PATH="$JAVA_LIBRARY_PATH:{{jdbc_libs_dir}}"
+{% endif %}
     </value>
     <value-attributes>
       <type>content</type>

http://git-wip-us.apache.org/repos/asf/ambari/blob/eb3d3ea6/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-env.xml
index ada4859..86720f4 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-env.xml
@@ -100,47 +100,47 @@
     <display-name>hive-interactive-env template</display-name>
     <description>This is the jinja template for hive-env.sh file</description>
     <value>
-      if [ "$SERVICE" = "cli" ]; then
-      if [ -z "$DEBUG" ]; then
-      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
-      else
-      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
-      fi
-      fi
+if [ "$SERVICE" = "cli" ]; then
+  if [ -z "$DEBUG" ]; then
+    export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
+  else
+    export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+  fi
+fi
 
-      # The heap size of the jvm stared by hive shell script can be controlled via:
+# The heap size of the jvm stared by hive shell script can be controlled via:
 
-      if [ "$SERVICE" = "metastore" ]; then
-      export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore
-      else
-      export HADOOP_HEAPSIZE={{hive_interactive_heapsize}} # Setting for HiveServer2 and Client
-      fi
+if [ "$SERVICE" = "metastore" ]; then
+  export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore
+else
+  export HADOOP_HEAPSIZE={{hive_interactive_heapsize}} # Setting for HiveServer2 and Client
+fi
 
-      export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m"
-      export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS{{heap_dump_opts}}"
+export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m"
+export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS{{heap_dump_opts}}"
 
-      # Larger heap size may be required when running queries over large number of files or partitions.
-      # By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
-      # appropriate for hive server (hwi etc).
+# Larger heap size may be required when running queries over large number of files or partitions.
+# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
+# appropriate for hive server (hwi etc).
 
 
-      # Set HADOOP_HOME to point to a specific hadoop install directory
-      HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+# Set HADOOP_HOME to point to a specific hadoop install directory
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
 
-      # Hive Configuration Directory can be controlled by:
-      export HIVE_CONF_DIR={{hive_server_interactive_conf_dir}}
+# Hive Configuration Directory can be controlled by:
+export HIVE_CONF_DIR={{hive_server_interactive_conf_dir}}
 
-      # Add additional hcatalog jars
-      if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
-        export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
-      else
-        export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-server2-hive2/lib/hive-hcatalog-core.jar
-      fi
+# Add additional hcatalog jars
+if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
+  export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
+else
+  export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-server2-hive2/lib/hive-hcatalog-core.jar
+fi
 
-      export METASTORE_PORT={{hive_metastore_port}}
+export METASTORE_PORT={{hive_metastore_port}}
 
-      # Spark assembly contains a conflicting copy of HiveConf from hive-1.2
-      export HIVE_SKIP_SPARK_ASSEMBLY=true
+# Spark assembly contains a conflicting copy of HiveConf from hive-1.2
+export HIVE_SKIP_SPARK_ASSEMBLY=true
 
     </value>
     <value-attributes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/eb3d3ea6/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/kerberos.json
new file mode 100644
index 0000000..b6e57e1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/kerberos.json
@@ -0,0 +1,151 @@
+{
+  "services": [
+    {
+      "name": "HIVE",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+        {
+          "hive-site": {
+            "hive.metastore.sasl.enabled": "true",
+            "hive.server2.authentication": "KERBEROS"
+          }
+        },
+        {
+          "ranger-hive-audit": {
+            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
+            "xasecure.audit.jaas.Client.option.storeKey": "false",
+            "xasecure.audit.jaas.Client.option.serviceName": "solr",
+            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "HIVE_METASTORE",
+          "identities": [
+            {
+              "name": "/HIVE/HIVE_SERVER/hive_server_hive",
+              "principal": {
+                "configuration": "hive-site/hive.metastore.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "hive-site/hive.metastore.kerberos.keytab.file"
+              }
+            }
+          ]
+        },
+        {
+          "name": "HIVE_SERVER",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "hive_server_hive",
+              "principal": {
+                "value": "hive/_HOST@${realm}",
+                "type": "service",
+                "configuration": "hive-site/hive.server2.authentication.kerberos.principal",
+                "local_username": "${hive-env/hive_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hive.service.keytab",
+                "owner": {
+                  "name": "${hive-env/hive_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hive-site/hive.server2.authentication.kerberos.keytab"
+              }
+            },
+            {
+              "name": "atlas_kafka",
+              "reference": "/HIVE/HIVE_SERVER/hive_server_hive",
+              "principal": {
+                "configuration": "hive-atlas-application.properties/atlas.jaas.KafkaClient.option.principal"
+              },
+              "keytab": {
+                "configuration": "hive-atlas-application.properties/atlas.jaas.KafkaClient.option.keyTab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "hive-site/hive.server2.authentication.spnego.principal"
+              },
+              "keytab": {
+                "configuration": "hive-site/hive.server2.authentication.spnego.keytab"
+              }
+            },
+            {
+              "name": "ranger_audit",
+              "reference": "/HIVE/HIVE_SERVER/hive_server_hive",
+              "principal": {
+                "configuration": "ranger-hive-audit/xasecure.audit.jaas.Client.option.principal"
+              },
+              "keytab": {
+                "configuration": "ranger-hive-audit/xasecure.audit.jaas.Client.option.keyTab"
+              }
+            }
+          ]
+        },
+        {
+          "name": "HIVE_SERVER_INTERACTIVE",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "/HIVE/HIVE_SERVER/hive_server_hive"
+            },
+            {
+              "name": "/HIVE/HIVE_SERVER/spnego"
+            },
+            {
+              "name": "/YARN/NODEMANAGER/llap_zk_hive"
+            }
+          ]
+        },
+        {
+          "name": "WEBHCAT_SERVER",
+          "identities": [
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "webhcat-site/templeton.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "webhcat-site/templeton.kerberos.keytab"
+              }
+            }
+          ],
+          "configurations": [
+            {
+              "core-site": {
+                "hadoop.proxyuser.HTTP.hosts": "${clusterHostInfo/webhcat_server_host|append(core-site/hadoop.proxyuser.HTTP.hosts, \\\\,, true)}"
+              }
+            },
+            {
+              "webhcat-site": {
+                "templeton.kerberos.secret": "secret",
+                "templeton.hive.properties": "hive.metastore.local=false,hive.metastore.uris=${clusterHostInfo/hive_metastore_host|each(thrift://%s:9083, \\\\,, \\s*\\,\\s*)},hive.metastore.sasl.enabled=true,hive.metastore.execute.setugi=true,hive.metastore.warehouse.dir=/apps/hive/warehouse,hive.exec.mode.local.auto=false,hive.metastore.kerberos.principal=hive/_HOST@${realm}"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/eb3d3ea6/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
index b1501b8..60d50eb 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
@@ -117,7 +117,7 @@
                 },
                 "group": {
                   "name": "${cluster-env/user_group}",
-                  "access": "r"
+                  "access": ""
                 },
                 "configuration": "hive-interactive-site/hive.llap.zk.sm.keytab.file"
               },


[33/36] ambari git commit: AMBARI-21451 - Expected Values Like original_stack Are Missing On Downgrades (jonathanhurley)

Posted by lp...@apache.org.
AMBARI-21451 - Expected Values Like original_stack Are Missing On Downgrades (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f27f3aff
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f27f3aff
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f27f3aff

Branch: refs/heads/feature-branch-AMBARI-21307
Commit: f27f3affbb4c7f49944dcefc7581ac228b103e3f
Parents: eb3d3ea
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Wed Jul 12 13:30:16 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Wed Jul 12 19:26:37 2017 -0400

----------------------------------------------------------------------
 .../libraries/functions/stack_features.py       |  28 ++--
 .../controller/ActionExecutionContext.java      |  30 ++--
 .../controller/AmbariActionExecutionHelper.java |  15 +-
 .../ClusterStackVersionResourceProvider.java    |   2 +-
 .../upgrades/UpgradeUserKerberosDescriptor.java | 142 +++++++------------
 .../ambari/server/state/UpgradeContext.java     |  16 ++-
 .../SPARK/1.2.1/package/scripts/params.py       |  11 +-
 .../SPARK/1.2.1/package/scripts/setup_spark.py  |   6 +-
 .../1.2.1/package/scripts/spark_service.py      |   6 +-
 .../UpgradeUserKerberosDescriptorTest.java      |  59 ++++++--
 .../src/test/python/TestStackFeature.py         |  44 ++++--
 .../test/python/stacks/2.0.6/configs/nn_eu.json |   2 +-
 .../stacks/2.0.6/configs/nn_eu_standby.json     |   2 +-
 .../2.1/configs/hive-metastore-upgrade.json     |   2 +-
 .../python/stacks/2.2/configs/knox_upgrade.json |   2 +-
 15 files changed, 199 insertions(+), 168 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py b/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
index 576c138..24201dd 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
@@ -105,7 +105,10 @@ def get_stack_feature_version(config):
 
   # something like 2.4.0.0-1234; represents the version for the command
   # (or None if this is a cluster install and it hasn't been calculated yet)
-  version = default("/commandParams/version", None)
+  # this is always guaranteed to be the correct version for the command, even in
+  # upgrade and downgrade scenarios
+  command_version = default("/commandParams/version", None)
+  command_stack = default("/commandParams/target_stack", None)
 
   # something like 2.4.0.0-1234
   # (or None if this is a cluster install and it hasn't been calculated yet)
@@ -115,13 +118,13 @@ def get_stack_feature_version(config):
   upgrade_direction = default("/commandParams/upgrade_direction", None)
 
   # start out with the value that's right 99% of the time
-  version_for_stack_feature_checks = version if version is not None else stack_version
+  version_for_stack_feature_checks = command_version if command_version is not None else stack_version
 
   # if this is not an upgrade, then we take the simple path
   if upgrade_direction is None:
     Logger.info(
-      "Stack Feature Version Info: stack_version={0}, version={1}, current_cluster_version={2} -> {3}".format(
-        stack_version, version, current_cluster_version, version_for_stack_feature_checks))
+      "Stack Feature Version Info: Cluster Stack={0}, Cluster Current Version={1}, Command Stack={2}, Command Version={3}-> {4}".format(
+        stack_version, current_cluster_version, command_stack, command_version, version_for_stack_feature_checks))
 
     return version_for_stack_feature_checks
 
@@ -130,15 +133,12 @@ def get_stack_feature_version(config):
   is_stop_command = _is_stop_command(config)
   if not is_stop_command:
     Logger.info(
-      "Stack Feature Version Info: stack_version={0}, version={1}, current_cluster_version={2}, upgrade_direction={3} -> {4}".format(
-        stack_version, version, current_cluster_version, upgrade_direction,
+      "Stack Feature Version Info: Cluster Stack={0}, Cluster Current Version={1}, Command Stack={2}, Command Version={3}, Upgrade Direction={4} -> {5}".format(
+        stack_version, current_cluster_version, command_stack, command_version, upgrade_direction,
         version_for_stack_feature_checks))
 
     return version_for_stack_feature_checks
 
-  original_stack = default("/commandParams/original_stack", None)
-  target_stack = default("/commandParams/target_stack", None)
-
   # something like 2.5.0.0-5678 (or None)
   downgrade_from_version = default("/commandParams/downgrade_from_version", None)
 
@@ -154,15 +154,13 @@ def get_stack_feature_version(config):
     # UPGRADE
     if current_cluster_version is not None:
       version_for_stack_feature_checks = current_cluster_version
-    elif original_stack is not None:
-      version_for_stack_feature_checks = format_stack_version(original_stack)
     else:
-      version_for_stack_feature_checks = version if version is not None else stack_version
+      version_for_stack_feature_checks = command_version if command_version is not None else stack_version
 
   Logger.info(
-    "Stack Feature Version Info: stack_version={0}, version={1}, current_cluster_version={2}, upgrade_direction={3}, original_stack={4}, target_stack={5}, downgrade_from_version={6}, stop_command={7} -> {8}".format(
-      stack_version, version, current_cluster_version, upgrade_direction, original_stack,
-      target_stack, downgrade_from_version, is_stop_command, version_for_stack_feature_checks))
+    "Stack Feature Version Info: Cluster Stack={0}, Cluster Current Version={1}, Command Stack={2}, Command Version={3}, Upgrade Direction={4}, stop_command={5} -> {6}".format(
+      stack_version, current_cluster_version, command_stack, command_version, upgrade_direction,
+      is_stop_command, version_for_stack_feature_checks))
 
   return version_for_stack_feature_checks
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
index 34d6db9..5d71869 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
@@ -27,7 +27,7 @@ import org.apache.ambari.server.actionmanager.TargetHostType;
 import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.controller.internal.RequestOperationLevel;
 import org.apache.ambari.server.controller.internal.RequestResourceFilter;
-import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 
 /**
  * The context required to create tasks and stages for a custom action
@@ -44,7 +44,7 @@ public class ActionExecutionContext {
   private String expectedComponentName;
   private boolean hostsInMaintenanceModeExcluded = true;
   private boolean allowRetry = false;
-  private StackId stackId;
+  private RepositoryVersionEntity repositoryVersion;
 
   private List<ExecutionCommandVisitor> m_visitors = new ArrayList<>();
 
@@ -175,27 +175,29 @@ public class ActionExecutionContext {
   }
 
   /**
-   * Gets the stack to use for generating stack-associated values for a command.
-   * In some cases the cluster's stack is not the correct one to use, such as
-   * when distributing a repository.
+   * Gets the stack/version to use for generating stack-associated values for a
+   * command. In some cases the cluster's stack is not the correct one to use,
+   * such as when distributing a repository.
    *
-   * @return the stackId the stack to use when generating stack-specific content
-   *         for the command.
+   * @return the repository for the stack/version to use when generating
+   *         stack-specific content for the command.
+   *
+   * @return
    */
-  public StackId getStackId() {
-    return stackId;
+  public RepositoryVersionEntity getRepositoryVersion() {
+    return repositoryVersion;
   }
 
   /**
-   * Sets the stack to use for generating stack-associated values for a command.
-   * In some cases the cluster's stack is not the correct one to use, such as
-   * when distributing a repository.
+   * Sets the stack/version to use for generating stack-associated values for a
+   * command. In some cases the cluster's stack is not the correct one to use,
+   * such as when distributing a repository.
    *
    * @param stackId
    *          the stackId to use for stack-based properties on the command.
    */
-  public void setStackId(StackId stackId) {
-    this.stackId = stackId;
+  public void setRepositoryVersion(RepositoryVersionEntity repositoryVersion) {
+    this.repositoryVersion = repositoryVersion;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
index 391daa9..55356c7 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
@@ -539,13 +539,18 @@ public class AmbariActionExecutionHelper {
     // if the repo is null, see if any values from the context should go on the
     // host params and then return
     if (null == repositoryVersion) {
-      if (null != actionContext.getStackId()) {
-        StackId stackId = actionContext.getStackId();
+      // see if the action context has a repository set to use for the command
+      if (null != actionContext.getRepositoryVersion()) {
+        StackId stackId = actionContext.getRepositoryVersion().getStackId();
         hostLevelParams.put(STACK_NAME, stackId.getStackName());
         hostLevelParams.put(STACK_VERSION, stackId.getStackVersion());
       }
 
       return;
+    } else {
+      StackId stackId = repositoryVersion.getStackId();
+      hostLevelParams.put(STACK_NAME, stackId.getStackName());
+      hostLevelParams.put(STACK_VERSION, stackId.getStackVersion());
     }
 
     JsonObject rootJsonObject = new JsonObject();
@@ -569,11 +574,5 @@ public class AmbariActionExecutionHelper {
     }
 
     hostLevelParams.put(REPO_INFO, rootJsonObject.toString());
-
-    // set the host level params if not already set by whoever is creating this command
-    if (!hostLevelParams.containsKey(STACK_NAME) || !hostLevelParams.containsKey(STACK_VERSION)) {
-      hostLevelParams.put(STACK_NAME, repositoryVersion.getStackName());
-      hostLevelParams.put(STACK_VERSION, repositoryVersion.getStackVersion());
-    }
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
index c4fce8a..9ecea95 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
@@ -613,7 +613,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
     ActionExecutionContext actionContext = new ActionExecutionContext(cluster.getClusterName(),
         INSTALL_PACKAGES_ACTION, Collections.singletonList(filter), roleParams);
 
-    actionContext.setStackId(stackId);
+    actionContext.setRepositoryVersion(repoVersion);
     actionContext.setTimeout(Short.valueOf(configuration.getDefaultAgentTaskTimeout(true)));
 
     repoVersionHelper.addCommandRepository(actionContext, osFamily, repoVersion, repoInfo);

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptor.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptor.java
index 59690a3..78aaa77 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptor.java
@@ -22,6 +22,8 @@ import java.util.List;
 import java.util.TreeMap;
 import java.util.concurrent.ConcurrentMap;
 
+import org.apache.ambari.annotations.Experimental;
+import org.apache.ambari.annotations.ExperimentalFeature;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleCommand;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
@@ -29,10 +31,10 @@ import org.apache.ambari.server.agent.CommandReport;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.orm.dao.ArtifactDAO;
 import org.apache.ambari.server.orm.entities.ArtifactEntity;
-import org.apache.ambari.server.serveraction.AbstractServerAction;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.UpgradeContext;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptorFactory;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptorUpdateHelper;
@@ -48,34 +50,9 @@ import com.google.inject.Inject;
  *
  * @see org.apache.ambari.server.state.kerberos.KerberosDescriptorUpdateHelper
  */
-public class UpgradeUserKerberosDescriptor extends AbstractServerAction {
+public class UpgradeUserKerberosDescriptor extends AbstractUpgradeServerAction {
   private static final Logger LOG = LoggerFactory.getLogger(UpgradeUserKerberosDescriptor.class);
 
-  /**
-   * The upgrade direction.
-   *
-   * @see Direction
-   */
-  private static final String UPGRADE_DIRECTION_KEY = "upgrade_direction";
-
-  /**
-   * The original "current" stack of the cluster before the upgrade started.
-   * This is the same regardless of whether the current direction is
-   * {@link Direction#UPGRADE} or {@link Direction#DOWNGRADE}.
-   *
-   * @see Direction
-   */
-  private static final String ORIGINAL_STACK_KEY = "original_stack";
-
-  /**
-   * The target upgrade stack before the upgrade started. This is the same
-   * regardless of whether the current direction is {@link Direction#UPGRADE} or
-   * {@link Direction#DOWNGRADE}.
-   *
-   * @see Direction
-   */
-  private static final String TARGET_STACK_KEY = "target_stack";
-
   private final static String KERBEROS_DESCRIPTOR_NAME = "kerberos_descriptor";
   private final static String KERBEROS_DESCRIPTOR_BACKUP_NAME = "kerberos_descriptor_backup";
 
@@ -108,70 +85,73 @@ public class UpgradeUserKerberosDescriptor extends AbstractServerAction {
     List<String> messages = new ArrayList<>();
     List<String> errorMessages = new ArrayList<>();
 
-    if (cluster != null) {
-      logMessage(messages, "Obtaining the user-defined Kerberos descriptor");
+    UpgradeContext upgradeContext = getUpgradeContext(cluster);
 
-      TreeMap<String, String> foreignKeys = new TreeMap<>();
-      foreignKeys.put("cluster", String.valueOf(cluster.getClusterId()));
+    logMessage(messages, "Obtaining the user-defined Kerberos descriptor");
 
-      ArtifactEntity entity = artifactDAO.findByNameAndForeignKeys("kerberos_descriptor", foreignKeys);
-      KerberosDescriptor userDescriptor = (entity == null) ? null : kerberosDescriptorFactory.createInstance(entity.getArtifactData());
+    TreeMap<String, String> foreignKeys = new TreeMap<>();
+    foreignKeys.put("cluster", String.valueOf(cluster.getClusterId()));
 
-      if (userDescriptor != null) {
-        StackId originalStackId = getStackIdFromCommandParams(ORIGINAL_STACK_KEY);
-        StackId targetStackId = getStackIdFromCommandParams(TARGET_STACK_KEY);
+    ArtifactEntity entity = artifactDAO.findByNameAndForeignKeys("kerberos_descriptor", foreignKeys);
+    KerberosDescriptor userDescriptor = (entity == null) ? null : kerberosDescriptorFactory.createInstance(entity.getArtifactData());
 
-        if (isDowngrade()) {
-          restoreDescriptor(foreignKeys, messages, errorMessages);
-        } else {
-          backupDescriptor(foreignKeys, messages, errorMessages);
+    if (userDescriptor != null) {
+
+      @Experimental(
+          feature = ExperimentalFeature.PATCH_UPGRADES,
+          comment = "This needs to be correctly done per-service")
+
+      StackId originalStackId = cluster.getCurrentStackVersion();
+      StackId targetStackId = upgradeContext.getRepositoryVersion().getStackId();
+
+      if (upgradeContext.getDirection() == Direction.DOWNGRADE) {
+        restoreDescriptor(foreignKeys, messages, errorMessages);
+      } else {
+        backupDescriptor(foreignKeys, messages, errorMessages);
 
-          KerberosDescriptor newDescriptor = null;
-          KerberosDescriptor previousDescriptor = null;
+        KerberosDescriptor newDescriptor = null;
+        KerberosDescriptor previousDescriptor = null;
 
-          if (targetStackId == null) {
-            logErrorMessage(messages, errorMessages, "The new stack version information was not found.");
-          } else {
-            logMessage(messages, String.format("Obtaining new stack Kerberos descriptor for %s.", targetStackId.toString()));
-            newDescriptor = ambariMetaInfo.getKerberosDescriptor(targetStackId.getStackName(), targetStackId.getStackVersion());
+        if (targetStackId == null) {
+          logErrorMessage(messages, errorMessages, "The new stack version information was not found.");
+        } else {
+          logMessage(messages, String.format("Obtaining new stack Kerberos descriptor for %s.", targetStackId.toString()));
+          newDescriptor = ambariMetaInfo.getKerberosDescriptor(targetStackId.getStackName(), targetStackId.getStackVersion());
 
-            if (newDescriptor == null) {
-              logErrorMessage(messages, errorMessages, String.format("The Kerberos descriptor for the new stack version, %s, was not found.", targetStackId.toString()));
-            }
+          if (newDescriptor == null) {
+            logErrorMessage(messages, errorMessages, String.format("The Kerberos descriptor for the new stack version, %s, was not found.", targetStackId.toString()));
           }
+        }
 
-          if (originalStackId == null) {
-            logErrorMessage(messages, errorMessages, "The previous stack version information was not found.");
-          } else {
-            logMessage(messages, String.format("Obtaining previous stack Kerberos descriptor for %s.", originalStackId.toString()));
-            previousDescriptor = ambariMetaInfo.getKerberosDescriptor(originalStackId.getStackName(), originalStackId.getStackVersion());
+        if (originalStackId == null) {
+          logErrorMessage(messages, errorMessages, "The previous stack version information was not found.");
+        } else {
+          logMessage(messages, String.format("Obtaining previous stack Kerberos descriptor for %s.", originalStackId.toString()));
+          previousDescriptor = ambariMetaInfo.getKerberosDescriptor(originalStackId.getStackName(), originalStackId.getStackVersion());
 
-            if (newDescriptor == null) {
-              logErrorMessage(messages, errorMessages, String.format("The Kerberos descriptor for the previous stack version, %s, was not found.", originalStackId.toString()));
-            }
+          if (newDescriptor == null) {
+            logErrorMessage(messages, errorMessages, String.format("The Kerberos descriptor for the previous stack version, %s, was not found.", originalStackId.toString()));
           }
+        }
 
-          if (errorMessages.isEmpty()) {
-            logMessage(messages, "Updating the user-specified Kerberos descriptor.");
+        if (errorMessages.isEmpty()) {
+          logMessage(messages, "Updating the user-specified Kerberos descriptor.");
 
-            KerberosDescriptor updatedDescriptor = KerberosDescriptorUpdateHelper.updateUserKerberosDescriptor(
-                previousDescriptor,
-                newDescriptor,
-                userDescriptor);
+          KerberosDescriptor updatedDescriptor = KerberosDescriptorUpdateHelper.updateUserKerberosDescriptor(
+              previousDescriptor,
+              newDescriptor,
+              userDescriptor);
 
-            logMessage(messages, "Storing updated user-specified Kerberos descriptor.");
+          logMessage(messages, "Storing updated user-specified Kerberos descriptor.");
 
-            entity.setArtifactData(updatedDescriptor.toMap());
-            artifactDAO.merge(entity);
+          entity.setArtifactData(updatedDescriptor.toMap());
+          artifactDAO.merge(entity);
 
-            logMessage(messages, "Successfully updated the user-specified Kerberos descriptor.");
-          }
+          logMessage(messages, "Successfully updated the user-specified Kerberos descriptor.");
         }
-      } else {
-        logMessage(messages, "A user-specified Kerberos descriptor was not found. No updates are necessary.");
       }
     } else {
-      logErrorMessage(messages, errorMessages, String.format("The cluster named %s was not found.", clusterName));
+      logMessage(messages, "A user-specified Kerberos descriptor was not found. No updates are necessary.");
     }
 
     if (!errorMessages.isEmpty()) {
@@ -181,24 +161,6 @@ public class UpgradeUserKerberosDescriptor extends AbstractServerAction {
     return createCommandReport(0, HostRoleStatus.COMPLETED, "{}", StringUtils.join(messages, "\n"), StringUtils.join(errorMessages, "\n"));
   }
 
-  /**
-   * Determines if upgrade direction is {@link Direction#UPGRADE} or {@link Direction#DOWNGRADE}.
-   *
-   * @return {@code true} if {@link Direction#DOWNGRADE}; {@code false} if {@link Direction#UPGRADE}
-   */
-  private boolean isDowngrade() {
-    return Direction.DOWNGRADE.name().equalsIgnoreCase(getCommandParameterValue(UPGRADE_DIRECTION_KEY));
-  }
-
-  private StackId getStackIdFromCommandParams(String commandParamKey) {
-    String stackId = getCommandParameterValue(commandParamKey);
-    if (stackId == null) {
-      return null;
-    } else {
-      return new StackId(stackId);
-    }
-  }
-
   private void logMessage(List<String> messages, String message) {
     LOG.info(message);
     messages.add(message);

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
index 3ecf64d..1695bd3 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
@@ -99,7 +99,13 @@ public class UpgradeContext {
   public static final String COMMAND_PARAM_TASKS = "tasks";
   public static final String COMMAND_PARAM_STRUCT_OUT = "structured_out";
 
-  /**
+  @Deprecated
+  @Experimental(
+      feature = ExperimentalFeature.PATCH_UPGRADES,
+      comment = "This isn't needed anymore, but many python classes still use it")
+  public static final String COMMAND_PARAM_DOWNGRADE_FROM_VERSION = "downgrade_from_version";
+
+  /*
    * The cluster that the upgrade is for.
    */
   final private Cluster m_cluster;
@@ -744,6 +750,7 @@ public class UpgradeContext {
    * <ul>
    * <li>{@link #COMMAND_PARAM_CLUSTER_NAME}
    * <li>{@link #COMMAND_PARAM_DIRECTION}
+   * <li>{@link #COMMAND_PARAM_DOWNGRADE_FROM_VERSION}
    * <li>{@link #COMMAND_PARAM_UPGRADE_TYPE}
    * <li>{@link KeyNames#REFRESH_CONFIG_TAGS_BEFORE_EXECUTION} - necessary in
    * order to have the commands contain the correct configurations. Otherwise,
@@ -758,8 +765,13 @@ public class UpgradeContext {
   public Map<String, String> getInitializedCommandParameters() {
     Map<String, String> parameters = new HashMap<>();
 
+    Direction direction = getDirection();
     parameters.put(COMMAND_PARAM_CLUSTER_NAME, m_cluster.getClusterName());
-    parameters.put(COMMAND_PARAM_DIRECTION, getDirection().name().toLowerCase());
+    parameters.put(COMMAND_PARAM_DIRECTION, direction.name().toLowerCase());
+
+    if (direction == Direction.DOWNGRADE) {
+      parameters.put(COMMAND_PARAM_DOWNGRADE_FROM_VERSION, m_repositoryVersion.getVersion());
+    }
 
     if (null != getType()) {
       // use the serialized attributes of the enum to convert it to a string,

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/params.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/params.py
index 74fd76a..93b4944 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/params.py
@@ -23,6 +23,7 @@ import status_params
 
 from setup_spark import *
 from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.stack_features import get_stack_feature_version
 from resource_management.libraries.functions.constants import StackFeature
 from resource_management.libraries.functions import conf_select, stack_select
 from resource_management.libraries.functions.get_stack_version import get_stack_version
@@ -56,10 +57,8 @@ upgrade_direction = default("/commandParams/upgrade_direction", None)
 java_home = config['hostLevelParams']['java_home']
 stack_name = status_params.stack_name
 stack_root = Script.get_stack_root()
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-if upgrade_direction == Direction.DOWNGRADE:
-  stack_version_unformatted = config['commandParams']['original_stack'].split("-")[1]
-stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+version_for_stack_feature_checks = get_stack_feature_version(config)
 
 sysprep_skip_copy_tarballs_hdfs = get_sysprep_skip_copy_tarballs_hdfs()
 
@@ -70,7 +69,7 @@ spark_conf = '/etc/spark/conf'
 hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
 hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
 
-if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted):
+if check_stack_feature(StackFeature.ROLLING_UPGRADE, version_for_stack_feature_checks):
   hadoop_home = stack_select.get_hadoop_dir("home")
   spark_conf = format("{stack_root}/current/{component_directory}/conf")
   spark_log_dir = config['configurations']['spark-env']['spark_log_dir']
@@ -211,7 +210,7 @@ dfs_type = default("/commandParams/dfs_type", "")
 # livy is only supported from HDP 2.5
 has_livyserver = False
 
-if stack_version_formatted and check_stack_feature(StackFeature.SPARK_LIVY, stack_version_formatted) and "livy-env" in config['configurations']:
+if check_stack_feature(StackFeature.SPARK_LIVY, version_for_stack_feature_checks) and "livy-env" in config['configurations']:
   livy_component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "LIVY_SERVER")
   livy_conf = format("{stack_root}/current/{livy_component_directory}/conf")
   livy_log_dir = config['configurations']['livy-env']['livy_log_dir']

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/setup_spark.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/setup_spark.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/setup_spark.py
index 50c1555..53c8f9e 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/setup_spark.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/setup_spark.py
@@ -118,11 +118,11 @@ def setup_spark(env, type, upgrade_type=None, action=None, config_dir=None):
       mode=0644
     )
 
-  effective_version = params.version if upgrade_type is not None else params.stack_version_formatted
+  effective_version = params.version if upgrade_type is not None else params.version_for_stack_feature_checks
   if effective_version:
     effective_version = format_stack_version(effective_version)
 
-  if effective_version and check_stack_feature(StackFeature.SPARK_JAVA_OPTS_SUPPORT, effective_version):
+  if check_stack_feature(StackFeature.SPARK_JAVA_OPTS_SUPPORT, effective_version):
     File(os.path.join(params.spark_conf, 'java-opts'),
       owner=params.spark_user,
       group=params.spark_group,
@@ -134,7 +134,7 @@ def setup_spark(env, type, upgrade_type=None, action=None, config_dir=None):
       action="delete"
     )
 
-  if params.spark_thrift_fairscheduler_content and effective_version and check_stack_feature(StackFeature.SPARK_16PLUS, effective_version):
+  if params.spark_thrift_fairscheduler_content and check_stack_feature(StackFeature.SPARK_16PLUS, effective_version):
     # create spark-thrift-fairscheduler.xml
     File(os.path.join(config_dir,"spark-thrift-fairscheduler.xml"),
       owner=params.spark_user,

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/spark_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/spark_service.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/spark_service.py
index 31a296a..2838186 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/spark_service.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/spark_service.py
@@ -34,11 +34,11 @@ def spark_service(name, upgrade_type=None, action=None):
 
   if action == 'start':
 
-    effective_version = params.version if upgrade_type is not None else params.stack_version_formatted
+    effective_version = params.version if upgrade_type is not None else params.version_for_stack_feature_checks
     if effective_version:
       effective_version = format_stack_version(effective_version)
 
-    if name == 'jobhistoryserver' and effective_version and check_stack_feature(StackFeature.SPARK_16PLUS, effective_version):
+    if name == 'jobhistoryserver' and check_stack_feature(StackFeature.SPARK_16PLUS, effective_version):
       # copy spark-hdp-assembly.jar to hdfs
       copy_to_hdfs("spark", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
       # create spark history directory
@@ -58,7 +58,7 @@ def spark_service(name, upgrade_type=None, action=None):
 
     # Spark 1.3.1.2.3, and higher, which was included in HDP 2.3, does not have a dependency on Tez, so it does not
     # need to copy the tarball, otherwise, copy it.
-    if params.stack_version_formatted and check_stack_feature(StackFeature.TEZ_FOR_SPARK, params.stack_version_formatted):
+    if check_stack_feature(StackFeature.TEZ_FOR_SPARK, params.version_for_stack_feature_checks):
       resource_created = copy_to_hdfs("tez", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
       if resource_created:
         params.HdfsResource(None, action="execute")

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptorTest.java
index ed92955..86f6d3b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptorTest.java
@@ -36,11 +36,17 @@ import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.orm.dao.ArtifactDAO;
 import org.apache.ambari.server.orm.entities.ArtifactEntity;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
+import org.apache.ambari.server.orm.entities.UpgradeEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.UpgradeContext;
+import org.apache.ambari.server.state.UpgradeContextFactory;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptorFactory;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptorUpdateHelper;
+import org.apache.ambari.server.state.stack.upgrade.Direction;
 import org.easymock.Capture;
 import org.easymock.EasyMock;
 import org.junit.Before;
@@ -58,23 +64,34 @@ import org.powermock.modules.junit4.PowerMockRunner;
 public class UpgradeUserKerberosDescriptorTest {
   private Clusters clusters;
   private Cluster cluster;
+  private UpgradeEntity upgrade;
+  private UpgradeContext upgradeContext;
   private AmbariMetaInfo ambariMetaInfo;
   private KerberosDescriptorFactory kerberosDescriptorFactory;
   private ArtifactDAO artifactDAO;
+  private UpgradeContextFactory upgradeContextFactory;
 
   private TreeMap<String, Field> fields = new TreeMap<>();
+  private StackId HDP_24 = new StackId("HDP", "2.4");
 
   @Before
   public void setup() throws Exception {
     clusters = EasyMock.createMock(Clusters.class);
     cluster = EasyMock.createMock(Cluster.class);
+    upgrade = EasyMock.createNiceMock(UpgradeEntity.class);
     kerberosDescriptorFactory = EasyMock.createNiceMock(KerberosDescriptorFactory.class);
     ambariMetaInfo = EasyMock.createMock(AmbariMetaInfo.class);
     artifactDAO = EasyMock.createNiceMock(ArtifactDAO.class);
+    upgradeContextFactory = EasyMock.createNiceMock(UpgradeContextFactory.class);
+    upgradeContext = EasyMock.createNiceMock(UpgradeContext.class);
 
     expect(clusters.getCluster((String) anyObject())).andReturn(cluster).anyTimes();
     expect(cluster.getClusterId()).andReturn(1l).atLeastOnce();
-    replay(clusters, cluster);
+    expect(cluster.getCurrentStackVersion()).andReturn(HDP_24).atLeastOnce();
+    expect(cluster.getUpgradeInProgress()).andReturn(upgrade).atLeastOnce();
+    expect(upgradeContextFactory.create(cluster, upgrade)).andReturn(upgradeContext).atLeastOnce();
+
+    replay(clusters, cluster, upgradeContextFactory, upgrade);
 
     prepareFields();
 
@@ -82,12 +99,16 @@ public class UpgradeUserKerberosDescriptorTest {
 
   @Test
   public void testUpgrade() throws Exception {
+    StackId stackId = new StackId("HDP", "2.5");
+    RepositoryVersionEntity repositoryVersion = EasyMock.createNiceMock(RepositoryVersionEntity.class);
+    expect(repositoryVersion.getStackId()).andReturn(stackId).atLeastOnce();
+
+    expect(upgradeContext.getDirection()).andReturn(Direction.UPGRADE).atLeastOnce();
+    expect(upgradeContext.getRepositoryVersion()).andReturn(repositoryVersion).atLeastOnce();
+    replay(repositoryVersion, upgradeContext);
 
     Map<String, String> commandParams = new HashMap<>();
     commandParams.put("clusterName", "c1");
-    commandParams.put("upgrade_direction", "UPGRADE");
-    commandParams.put("original_stack", "HDP-2.4");
-    commandParams.put("target_stack", "HDP-2.5");
 
     ExecutionCommand executionCommand = new ExecutionCommand();
     executionCommand.setCommandParams(commandParams);
@@ -140,12 +161,16 @@ public class UpgradeUserKerberosDescriptorTest {
 
   @Test
   public void testDowngrade() throws Exception {
+    StackId stackId = new StackId("HDP", "2.5");
+    RepositoryVersionEntity repositoryVersion = EasyMock.createNiceMock(RepositoryVersionEntity.class);
+    expect(repositoryVersion.getStackId()).andReturn(stackId).atLeastOnce();
+
+    expect(upgradeContext.getDirection()).andReturn(Direction.DOWNGRADE).atLeastOnce();
+    expect(upgradeContext.getRepositoryVersion()).andReturn(repositoryVersion).atLeastOnce();
+    replay(repositoryVersion, upgradeContext);
 
     Map<String, String> commandParams = new HashMap<>();
     commandParams.put("clusterName", "c1");
-    commandParams.put("upgrade_direction", "DOWNGRADE");
-    commandParams.put("original_stack", "HDP-2.4");
-    commandParams.put("target_stack", "HDP-2.5");
 
     ExecutionCommand executionCommand = new ExecutionCommand();
     executionCommand.setCommandParams(commandParams);
@@ -188,12 +213,19 @@ public class UpgradeUserKerberosDescriptorTest {
   }
 
   private void prepareFields() throws NoSuchFieldException {
-    String[] fieldsNames = {"artifactDAO","clusters","ambariMetaInfo","kerberosDescriptorFactory"};
-    for(String fieldName : fieldsNames)
-    {
-      Field clustersField = UpgradeUserKerberosDescriptor.class.getDeclaredField(fieldName);
-      clustersField.setAccessible(true);
-      fields.put(fieldName, clustersField);
+    String[] fieldsNames = { "artifactDAO", "clusters", "ambariMetaInfo",
+        "kerberosDescriptorFactory", "m_upgradeContextFactory" };
+
+    for (String fieldName : fieldsNames) {
+      try {
+        Field clustersField = UpgradeUserKerberosDescriptor.class.getDeclaredField(fieldName);
+        clustersField.setAccessible(true);
+        fields.put(fieldName, clustersField);
+      } catch( NoSuchFieldException noSuchFieldException ){
+        Field clustersField = UpgradeUserKerberosDescriptor.class.getSuperclass().getDeclaredField(fieldName);
+        clustersField.setAccessible(true);
+        fields.put(fieldName, clustersField);        
+      }
     }
   }
   private void injectFields(UpgradeUserKerberosDescriptor action) throws IllegalAccessException {
@@ -201,5 +233,6 @@ public class UpgradeUserKerberosDescriptorTest {
     fields.get("clusters").set(action, clusters);
     fields.get("ambariMetaInfo").set(action, ambariMetaInfo);
     fields.get("kerberosDescriptorFactory").set(action, kerberosDescriptorFactory);
+    fields.get("m_upgradeContextFactory").set(action, upgradeContextFactory);
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/test/python/TestStackFeature.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/TestStackFeature.py b/ambari-server/src/test/python/TestStackFeature.py
index 0116a7a..230734c 100644
--- a/ambari-server/src/test/python/TestStackFeature.py
+++ b/ambari-server/src/test/python/TestStackFeature.py
@@ -28,6 +28,32 @@ from unittest import TestCase
 Logger.initialize_logger()
 
 class TestStackFeature(TestCase):
+  """
+  EU Upgrade (HDP 2.5 to HDP 2.6)
+    - STOP
+      hostLevelParams/stack_name = HDP
+      hostLevelParams/stack_version = 2.5
+      hostLevelParams/current_version = 2.5.0.0-1237
+      commandParams/version = 2.5.0.0-1237
+    - START
+      hostLevelParams/stack_name = HDP
+      hostLevelParams/stack_version = 2.6
+      hostLevelParams/current_version = 2.5.0.0-1237
+      commandParams/version = 2.6.0.0-334
+
+  EU Downgrade (HDP 2.6 to HDP 2.5)
+    - STOP
+    hostLevelParams/stack_name = HDP
+    hostLevelParams/stack_version = 2.6
+    hostLevelParams/current_version = 2.5.0.0-1237
+    commandParams/version = 2.6.0.0-334
+    - START
+    hostLevelParams/stack_name = HDP
+    hostLevelParams/stack_version = 2.5
+    hostLevelParams/current_version = 2.5.0.0-1237
+    commandParams/version = 2.5.0.0-1237
+  """
+
   def test_get_stack_feature_version_missing_params(self):
     try:
       stack_feature_version = get_stack_feature_version({})
@@ -122,7 +148,7 @@ class TestStackFeature(TestCase):
         "current_version":  "2.4.0.0-1234"
       },
       "commandParams": {
-        "original_stack": "2.4",
+        "source_stack": "2.4",
         "target_stack": "2.5",
         "upgrade_direction": "upgrade",
         "version": "2.5.9.9-9999"
@@ -143,8 +169,8 @@ class TestStackFeature(TestCase):
         "current_version":"2.4.0.0-1234"
       },
       "commandParams":{
-        "original_stack":"2.4",
-        "target_stack":"2.5",
+        "source_stack":"2.5",
+        "target_stack":"2.4",
         "upgrade_direction":"downgrade",
         "version":"2.4.0.0-1234",
         "downgrade_from_version": "2.5.9.9-9999"
@@ -166,10 +192,10 @@ class TestStackFeature(TestCase):
         "current_version":"2.4.0.0-1234"
       },
       "commandParams":{
-        "original_stack":"2.4",
-        "target_stack":"2.5",
+        "source_stack":"2.5",
+        "target_stack":"2.4",
         "upgrade_direction":"downgrade",
-        "version":"2.4.0.0-1234",
+        "version":"2.5.9.9-9999",
         "downgrade_from_version":"2.5.9.9-9999"
       }
     }
@@ -189,10 +215,10 @@ class TestStackFeature(TestCase):
         "custom_command":"STOP"
       },
       "commandParams":{
-        "original_stack":"2.4",
-        "target_stack":"2.5",
+        "source_stack":"2.5",
+        "target_stack":"2.4",
         "upgrade_direction":"downgrade",
-        "version":"2.4.0.0-1234",
+        "version":"2.5.9.9-9999",
         "downgrade_from_version":"2.5.9.9-9999"
       }
     }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json
index 7f77d83..3aadf2c 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json
@@ -25,7 +25,7 @@
         "upgrade_type": "nonrolling_upgrade",
         "version": "2.3.2.0-2844", 
         "forceRefreshConfigTagsBeforeExecution": "*", 
-        "original_stack": "HDP-2.2", 
+        "source_stack": "HDP-2.2",
         "command_timeout": "1200", 
         "target_stack": "HDP-2.3", 
         "desired_namenode_role": "standby", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu_standby.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu_standby.json b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu_standby.json
index 87b18af..2d48ff6 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu_standby.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu_standby.json
@@ -25,7 +25,7 @@
         "upgrade_type": "nonrolling_upgrade",
         "version": "2.3.2.0-2844", 
         "forceRefreshConfigTagsBeforeExecution": "*", 
-        "original_stack": "HDP-2.2", 
+        "source_stack": "HDP-2.2",
         "command_timeout": "1200", 
         "target_stack": "HDP-2.3", 
         "desired_namenode_role": "standby", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/test/python/stacks/2.1/configs/hive-metastore-upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/hive-metastore-upgrade.json b/ambari-server/src/test/python/stacks/2.1/configs/hive-metastore-upgrade.json
index 99fcba0..021695b 100644
--- a/ambari-server/src/test/python/stacks/2.1/configs/hive-metastore-upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.1/configs/hive-metastore-upgrade.json
@@ -13,7 +13,7 @@
         "upgrade_type": "nonrolling_upgrade",
         "version": "2.3.2.0-2950", 
         "forceRefreshConfigTagsBeforeExecution": "*", 
-        "original_stack": "HDP-2.3", 
+        "source_stack": "HDP-2.3",
         "command_timeout": "1200", 
         "target_stack": "HDP-2.3", 
         "script_type": "PYTHON"

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/test/python/stacks/2.2/configs/knox_upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/knox_upgrade.json b/ambari-server/src/test/python/stacks/2.2/configs/knox_upgrade.json
index a9db11c..1805c3b 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/knox_upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/knox_upgrade.json
@@ -22,7 +22,7 @@
         "upgrade_type": "rolling_upgrade",
         "command_retry_max_attempt_count": "3", 
         "version": "2.3.0.0-2096", 
-        "original_stack": "HDP-2.3", 
+        "source_stack": "HDP-2.3",
         "command_retry_enabled": "false", 
         "command_timeout": "1200", 
         "target_stack": "HDP-2.3", 


[25/36] ambari git commit: AMBARI-21441. Discrepancy in the OS name in ambari for PPC (aonishuk)

Posted by lp...@apache.org.
AMBARI-21441. Discrepancy in the OS name in ambari for PPC (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/51e62ad5
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/51e62ad5
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/51e62ad5

Branch: refs/heads/feature-branch-AMBARI-21307
Commit: 51e62ad578ab1f21a163c08c2bff6dec3fb24f7c
Parents: 639f452
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Tue Jul 11 13:24:19 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Tue Jul 11 13:24:19 2017 +0300

----------------------------------------------------------------------
 .../src/main/resources/stacks/HDP/2.6/repos/repoinfo.xml       | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/51e62ad5/ambari-server/src/main/resources/stacks/HDP/2.6/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/repos/repoinfo.xml
index 23441f5..ff132aa 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/repos/repoinfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/repos/repoinfo.xml
@@ -31,15 +31,15 @@
       <unique>false</unique>
     </repo>
   </os>
-  <os family="redhat-ppc6">
+  <os family="redhat-ppc7">
     <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.6.0.3</baseurl>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos7/2.x/updates/2.6.0.3</baseurl>
       <repoid>HDP-2.6</repoid>
       <reponame>HDP</reponame>
       <unique>true</unique>
     </repo>
     <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6</baseurl>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/ppc64le</baseurl>
       <repoid>HDP-UTILS-1.1.0.21</repoid>
       <reponame>HDP-UTILS</reponame>
       <unique>false</unique>


[20/36] ambari git commit: AMBARI-21427. Assigning hosts concurrently to same config group may fail with 'org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException: Config group already exist'. (stoader)

Posted by lp...@apache.org.
AMBARI-21427. Assigning hosts concurrently to same config group may fail with 'org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException: Config group already exist'. (stoader)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/3c9f125c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/3c9f125c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/3c9f125c

Branch: refs/heads/feature-branch-AMBARI-21307
Commit: 3c9f125cc08269558f35a971c321777d331de1ca
Parents: 7f3d3b2
Author: Toader, Sebastian <st...@hortonworks.com>
Authored: Mon Jul 10 13:02:20 2017 +0200
Committer: Toader, Sebastian <st...@hortonworks.com>
Committed: Mon Jul 10 13:02:45 2017 +0200

----------------------------------------------------------------------
 .../ambari/server/topology/AmbariContext.java   | 28 +++++++++++++++++---
 1 file changed, 24 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/3c9f125c/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
index 9b64edc..dee0e6c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
@@ -30,6 +30,7 @@ import java.util.SortedSet;
 import java.util.TreeSet;
 import java.util.concurrent.Callable;
 import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.Lock;
 
 import javax.annotation.Nullable;
 import javax.inject.Inject;
@@ -81,6 +82,7 @@ import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.Striped;
 import com.google.inject.Provider;
 
 
@@ -121,6 +123,16 @@ public class AmbariContext {
 
   private final static Logger LOG = LoggerFactory.getLogger(AmbariContext.class);
 
+
+  /**
+   * When config groups are created using Blueprints these are created when
+   * hosts join a hostgroup and are added to the corresponding config group.
+   * Since hosts join in parallel there might be a race condition in creating
+   * the config group a host is to be added to. Thus we need to synchronize
+   * the creation of config groups with the same name.
+   */
+  private Striped<Lock> configGroupCreateLock = Striped.lazyWeakLock(1);
+
   public boolean isClusterKerberosEnabled(long clusterId) {
     Cluster cluster;
     try {
@@ -341,11 +353,17 @@ public class AmbariContext {
   }
 
   public void registerHostWithConfigGroup(final String hostName, final ClusterTopology topology, final String groupName) {
+    String qualifiedGroupName = getConfigurationGroupName(topology.getBlueprint().getName(), groupName);
+
+    Lock configGroupLock = configGroupCreateLock.get(qualifiedGroupName);
+
     try {
+      configGroupLock.lock();
+
       boolean hostAdded = RetryHelper.executeWithRetry(new Callable<Boolean>() {
         @Override
         public Boolean call() throws Exception {
-          return addHostToExistingConfigGroups(hostName, topology, groupName);
+          return addHostToExistingConfigGroups(hostName, topology, qualifiedGroupName);
         }
       });
       if (!hostAdded) {
@@ -355,6 +373,9 @@ public class AmbariContext {
       LOG.error("Unable to register config group for host: ", e);
       throw new RuntimeException("Unable to register config group for host: " + hostName);
     }
+    finally {
+      configGroupLock.unlock();
+    }
   }
 
   public RequestStatusResponse installHost(String hostName, String clusterName, Collection<String> skipInstallForComponents, Collection<String> dontSkipInstallForComponents, boolean skipFailure) {
@@ -562,7 +583,7 @@ public class AmbariContext {
   /**
    * Add the new host to an existing config group.
    */
-  private boolean addHostToExistingConfigGroups(String hostName, ClusterTopology topology, String groupName) {
+  private boolean addHostToExistingConfigGroups(String hostName, ClusterTopology topology, String configGroupName) {
     boolean addedHost = false;
     Clusters clusters;
     Cluster cluster;
@@ -576,9 +597,8 @@ public class AmbariContext {
     // I don't know of a method to get config group by name
     //todo: add a method to get config group by name
     Map<Long, ConfigGroup> configGroups = cluster.getConfigGroups();
-    String qualifiedGroupName = getConfigurationGroupName(topology.getBlueprint().getName(), groupName);
     for (ConfigGroup group : configGroups.values()) {
-      if (group.getName().equals(qualifiedGroupName)) {
+      if (group.getName().equals(configGroupName)) {
         try {
           Host host = clusters.getHost(hostName);
           addedHost = true;


[19/36] ambari git commit: AMBARI-20950. HdfsResource can not handle S3 URL when hbase.rootdir is set to S3 URL (aonishuk)

Posted by lp...@apache.org.
AMBARI-20950. HdfsResource can not handle S3 URL when hbase.rootdir is set to S3 URL (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7f3d3b21
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7f3d3b21
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7f3d3b21

Branch: refs/heads/feature-branch-AMBARI-21307
Commit: 7f3d3b21a961581678cb7c072ec71e5eb15d7da9
Parents: d0f7a51
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Mon Jul 10 12:58:10 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Jul 10 12:58:10 2017 +0300

----------------------------------------------------------------------
 .../HBASE/0.96.0.2.0/package/scripts/hbase.py           | 12 +++++++-----
 .../HBASE/0.96.0.2.0/package/scripts/params_linux.py    |  3 +++
 2 files changed, 10 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/7f3d3b21/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py
index 8ad802e..cec6b2a 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py
@@ -17,6 +17,7 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
+from urlparse import urlparse
 import os
 import sys
 from resource_management.libraries.script.script import Script
@@ -200,11 +201,12 @@ def hbase(name=None):
       owner=params.hbase_user
     )
   if name == "master":
-    params.HdfsResource(params.hbase_hdfs_root_dir,
-                         type="directory",
-                         action="create_on_execute",
-                         owner=params.hbase_user
-    )
+    if not params.hbase_hdfs_root_dir_protocol or params.hbase_hdfs_root_dir_protocol == urlparse(params.default_fs).scheme:
+      params.HdfsResource(params.hbase_hdfs_root_dir,
+                           type="directory",
+                           action="create_on_execute",
+                           owner=params.hbase_user
+      )
     params.HdfsResource(params.hbase_staging_dir,
                          type="directory",
                          action="create_on_execute",

http://git-wip-us.apache.org/repos/asf/ambari/blob/7f3d3b21/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
index d45aea6..e05da06 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
@@ -17,6 +17,8 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
+from urlparse import urlparse
+
 import status_params
 import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
 
@@ -237,6 +239,7 @@ else:
 hbase_env_sh_template = config['configurations']['hbase-env']['content']
 
 hbase_hdfs_root_dir = config['configurations']['hbase-site']['hbase.rootdir']
+hbase_hdfs_root_dir_protocol = urlparse(hbase_hdfs_root_dir).scheme
 hbase_staging_dir = "/apps/hbase/staging"
 #for create_hdfs_directory
 hostname = config["hostname"]


[26/36] ambari git commit: AMBARI-21432 - Allow Services To Be Stopped During an EU Between Stack Vendors (jonathanhurley)

Posted by lp...@apache.org.
AMBARI-21432 - Allow Services To Be Stopped During an EU Between Stack Vendors (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/880853a6
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/880853a6
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/880853a6

Branch: refs/heads/feature-branch-AMBARI-21307
Commit: 880853a665dc07c68ec5f05975e01eba7bb561ee
Parents: 51e62ad
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Sun Jul 9 18:18:22 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Tue Jul 11 10:31:12 2017 -0400

----------------------------------------------------------------------
 .../libraries/functions/conf_select.py          | 56 +++++---------------
 .../2.0.6/HBASE/test_phoenix_queryserver.py     | 23 --------
 .../stacks/2.0.6/YARN/test_historyserver.py     | 21 +-------
 3 files changed, 15 insertions(+), 85 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/880853a6/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py b/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
index 3e01cf6..4f11633 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
@@ -399,7 +399,6 @@ def get_hadoop_conf_dir(force_latest_on_upgrade=False):
   stack_root = Script.get_stack_root()
   stack_version = Script.get_stack_version()
   version = None
-  allow_setting_conf_select_symlink = False
 
   if not Script.in_stack_upgrade():
     # During normal operation, the HDP stack must be 2.3 or higher
@@ -413,27 +412,10 @@ def get_hadoop_conf_dir(force_latest_on_upgrade=False):
 
       if not os.path.islink(hadoop_conf_dir) and stack_name and version:
         version = str(version)
-        allow_setting_conf_select_symlink = True
   else:
-    # During an upgrade/downgrade, which can be a Rolling or Express Upgrade, need to calculate it based on the version
-    '''
-    Whenever upgrading to HDP 2.2, or downgrading back to 2.2, need to use /etc/hadoop/conf
-    Whenever upgrading to HDP 2.3, or downgrading back to 2.3, need to use a versioned hadoop conf dir
-
-    Type__|_Source_|_Target_|_Direction_____________|_Comment_____________________________________________________________
-    Normal|        | 2.2    |                       | Use /etc/hadoop/conf
-    Normal|        | 2.3    |                       | Use /etc/hadoop/conf, which should be a symlink to <stack-root>/current/hadoop-client/conf
-    EU    | 2.1    | 2.3    | Upgrade               | Use versioned <stack-root>/current/hadoop-client/conf
-          |        |        | No Downgrade Allowed  | Invalid
-    EU/RU | 2.2    | 2.2.*  | Any                   | Use <stack-root>/current/hadoop-client/conf
-    EU/RU | 2.2    | 2.3    | Upgrade               | Use <stack-root>/$version/hadoop/conf, which should be a symlink destination
-          |        |        | Downgrade             | Use <stack-root>/current/hadoop-client/conf
-    EU/RU | 2.3    | 2.3.*  | Any                   | Use <stack-root>/$version/hadoop/conf, which should be a symlink destination
-    '''
-
     # The "stack_version" is the desired stack, e.g., 2.2 or 2.3
     # In an RU, it is always the desired stack, and doesn't change even during the Downgrade!
-    # In an RU Downgrade from HDP 2.3 to 2.2, the first thing we do is 
+    # In an RU Downgrade from HDP 2.3 to 2.2, the first thing we do is
     # rm /etc/[component]/conf and then mv /etc/[component]/conf.backup /etc/[component]/conf
     if stack_version and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version):
       hadoop_conf_dir = os.path.join(stack_root, "current", "hadoop-client", "conf")
@@ -442,13 +424,16 @@ def get_hadoop_conf_dir(force_latest_on_upgrade=False):
       # is the version upgrading/downgrading to.
       stack_info = stack_select._get_upgrade_stack()
 
-      if stack_info is not None:
-        stack_name = stack_info[0]
-        version = stack_info[1]
-      else:
-        raise Fail("Unable to get parameter 'version'")
-      
-      Logger.info("In the middle of a stack upgrade/downgrade for Stack {0} and destination version {1}, determining which hadoop conf dir to use.".format(stack_name, version))
+      if stack_info is None:
+        raise Fail("Unable to retrieve the upgrade/downgrade stack information from the request")
+
+      stack_name = stack_info[0]
+      version = stack_info[1]
+
+      Logger.info(
+        "An upgrade/downgrade for {0}-{1} is in progress, determining which hadoop conf dir to use.".format(
+          stack_name, version))
+
       # This is the version either upgrading or downgrading to.
       if version and check_stack_feature(StackFeature.CONFIG_VERSIONING, version):
         # Determine if <stack-selector-tool> has been run and if not, then use the current
@@ -465,21 +450,6 @@ def get_hadoop_conf_dir(force_latest_on_upgrade=False):
         hadoop_conf_dir = os.path.join(stack_root, version, "hadoop", "conf")
         Logger.info("Hadoop conf dir: {0}".format(hadoop_conf_dir))
 
-        allow_setting_conf_select_symlink = True
-
-  if allow_setting_conf_select_symlink:
-    # If not in the middle of an upgrade and on HDP 2.3 or higher, or if
-    # upgrading stack to version 2.3.0.0 or higher (which may be upgrade or downgrade), then consider setting the
-    # symlink for /etc/hadoop/conf.
-    # If a host does not have any HDFS or YARN components (e.g., only ZK), then it will not contain /etc/hadoop/conf
-    # Therefore, any calls to <conf-selector-tool> will fail.
-    # For that reason, if the hadoop conf directory exists, then make sure it is set.
-    if os.path.exists(hadoop_conf_dir):
-      conf_selector_name = stack_tools.get_stack_tool_name(stack_tools.CONF_SELECTOR_NAME)
-      Logger.info("The hadoop conf dir {0} exists, will call {1} on it for version {2}".format(
-              hadoop_conf_dir, conf_selector_name, version))
-      select(stack_name, "hadoop", version)
-
   Logger.info("Using hadoop conf dir: {0}".format(hadoop_conf_dir))
   return hadoop_conf_dir
 
@@ -587,7 +557,7 @@ def convert_conf_directories_to_symlinks(package, version, dirs, skip_existing_l
 
 
   # <stack-root>/current/[component] is already set to to the correct version, e.g., <stack-root>/[version]/[component]
-  
+
   select(stack_name, package, version, ignore_errors = True)
 
   # Symlink /etc/[component]/conf to /etc/[component]/conf.backup
@@ -702,4 +672,4 @@ def _get_backup_conf_directory(old_conf):
   """
   old_parent = os.path.abspath(os.path.join(old_conf, os.pardir))
   backup_dir = os.path.join(old_parent, "conf.backup")
-  return backup_dir
+  return backup_dir
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/880853a6/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py
index 60022e1..1b324d4 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py
@@ -76,8 +76,6 @@ class TestPhoenixQueryServer(RMFTestCase):
       call_mocks = [(0, None, None)]
     )
 
-    self.assert_call_to_get_hadoop_conf_dir()
-
     self.assertResourceCalled('Execute',
       '/usr/hdp/current/phoenix-server/bin/queryserver.py stop',
       environment = {'JAVA_HOME':'/usr/jdk64/jdk1.8.0_40',
@@ -134,8 +132,6 @@ class TestPhoenixQueryServer(RMFTestCase):
       call_mocks = [(0, None, None)]
     )
 
-    self.assert_call_to_get_hadoop_conf_dir()
-
     self.assertResourceCalled('Execute',
       '/usr/hdp/current/phoenix-server/bin/queryserver.py stop',
       environment = {'JAVA_HOME':'/usr/jdk64/jdk1.8.0_40',
@@ -217,18 +213,7 @@ class TestPhoenixQueryServer(RMFTestCase):
 
     self.assertNoMoreResources()
 
-  def assert_call_to_get_hadoop_conf_dir(self):
-    # From call to conf_select.get_hadoop_conf_dir()
-    self.assertResourceCalled("Execute", ("cp", "-R", "-p", "/etc/hadoop/conf", "/etc/hadoop/conf.backup"),
-                              not_if = "test -e /etc/hadoop/conf.backup",
-                              sudo = True)
-    self.assertResourceCalled("Directory", "/etc/hadoop/conf",
-                              action = ["delete"])
-    self.assertResourceCalled("Link", "/etc/hadoop/conf", to="/etc/hadoop/conf.backup")
-
   def assert_configure_default(self):
-    self.assert_call_to_get_hadoop_conf_dir()
-
     self.assertResourceCalled('Directory', '/etc/hbase',
       mode = 0755
     )
@@ -330,8 +315,6 @@ class TestPhoenixQueryServer(RMFTestCase):
     )
 
   def assert_configure_secured(self):
-    self.assert_call_to_get_hadoop_conf_dir()
-
     self.assertResourceCalled('Directory', '/etc/hbase',
       mode = 0755
     )
@@ -459,10 +442,4 @@ class TestPhoenixQueryServer(RMFTestCase):
         cd_access = 'a',
     )
     self.assertResourceCalledIgnoreEarlier('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'phoenix-server', '2.3.0.0-1234'), sudo=True)
-
-    self.assertResourceCalled("Execute", ("cp", "-R", "-p", "/etc/hadoop/conf", "/etc/hadoop/conf.backup"),
-                              not_if = "test -e /etc/hadoop/conf.backup",
-                              sudo = True)
-    self.assertResourceCalled("Directory", "/etc/hadoop/conf", action = ["delete"])
-    self.assertResourceCalled("Link", "/etc/hadoop/conf", to="/etc/hadoop/conf.backup")
     self.assertNoMoreResources()

http://git-wip-us.apache.org/repos/asf/ambari/blob/880853a6/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
index 53d16fd..b29cfb5 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
@@ -749,15 +749,6 @@ class TestHistoryServer(RMFTestCase):
                               group = 'hadoop',
                               )
 
-  def assert_call_to_get_hadoop_conf_dir(self):
-    # From call to conf_select.get_hadoop_conf_dir()
-    self.assertResourceCalled("Execute", ("cp", "-R", "-p", "/etc/hadoop/conf", "/etc/hadoop/conf.backup"),
-                              not_if = "test -e /etc/hadoop/conf.backup",
-                              sudo = True)
-    self.assertResourceCalled("Directory", "/etc/hadoop/conf",
-                              action = ["delete"])
-    self.assertResourceCalled("Link", "/etc/hadoop/conf", to="/etc/hadoop/conf.backup")
-
   @patch.object(functions, "get_stack_version", new = MagicMock(return_value="2.3.0.0-1234"))
   @patch("resource_management.libraries.functions.copy_tarball.copy_to_hdfs")
   def test_pre_upgrade_restart_23(self, copy_to_hdfs_mock):
@@ -783,8 +774,6 @@ class TestHistoryServer(RMFTestCase):
     self.assertTrue(call("slider", "hadoop", "hdfs", skip=False) in copy_to_hdfs_mock.call_args_list)
 
     # From call to conf_select.get_hadoop_conf_dir()
-    self.assert_call_to_get_hadoop_conf_dir()
-    self.assert_call_to_get_hadoop_conf_dir()
 
     self.assertResourceCalled('HdfsResource', None,
         immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
@@ -800,11 +789,5 @@ class TestHistoryServer(RMFTestCase):
 
     self.assertNoMoreResources()
 
-    self.assertEquals(5, mocks_dict['call'].call_count)
-    self.assertEquals(5, mocks_dict['checked_call'].call_count)
-    self.assertEquals(
-      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
-       mocks_dict['checked_call'].call_args_list[0][0][0])
-    self.assertEquals(
-      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
-       mocks_dict['call'].call_args_list[0][0][0])
+    self.assertEquals(1, mocks_dict['call'].call_count)
+    self.assertEquals(1, mocks_dict['checked_call'].call_count)