You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jo...@apache.org on 2017/07/08 00:43:25 UTC

[01/10] ambari git commit: AMBARI-21430 - Allow Multiple Versions of Stack Tools to Co-Exist (jonathanhurley)

Repository: ambari
Updated Branches:
  refs/heads/branch-feature-AMBARI-21348 b6c5d78ac -> 6a3bfd5d0


http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
index 7dd3990..b19920f 100644
--- a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
+++ b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
@@ -102,8 +102,12 @@ class RMFTestCase(TestCase):
     else:
       raise RuntimeError("Please specify either config_file_path or config_dict parameter")
 
-    self.config_dict["configurations"]["cluster-env"]["stack_tools"] = RMFTestCase.get_stack_tools()
-    self.config_dict["configurations"]["cluster-env"]["stack_features"] = RMFTestCase.get_stack_features()
+    # add the stack tools & features from the stack if the test case's JSON file didn't have them
+    if "stack_tools" not in self.config_dict["configurations"]["cluster-env"]:
+      self.config_dict["configurations"]["cluster-env"]["stack_tools"] = RMFTestCase.get_stack_tools()
+
+    if "stack_features" not in self.config_dict["configurations"]["cluster-env"]:
+      self.config_dict["configurations"]["cluster-env"]["stack_features"] = RMFTestCase.get_stack_features()
 
     if config_overrides:
       for key, value in config_overrides.iteritems():


[10/10] ambari git commit: AMBARI-21430 - Allow Multiple Versions of Stack Tools to Co-Exist (jonathanhurley)

Posted by jo...@apache.org.
AMBARI-21430 - Allow Multiple Versions of Stack Tools to Co-Exist (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/6a3bfd5d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/6a3bfd5d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/6a3bfd5d

Branch: refs/heads/branch-feature-AMBARI-21348
Commit: 6a3bfd5d03b5c6752f9b84c7c6ad530372622090
Parents: b6c5d78
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Fri Jul 7 14:36:05 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Fri Jul 7 20:40:56 2017 -0400

----------------------------------------------------------------------
 .../libraries/functions/stack_features.py       |   13 +
 .../libraries/functions/stack_tools.py          |   39 +
 .../libraries/script/script.py                  |   19 +-
 .../actionmanager/ExecutionCommandWrapper.java  |   20 +-
 .../controller/ActionExecutionContext.java      |   26 +
 .../controller/AmbariActionExecutionHelper.java |   21 +-
 .../BlueprintConfigurationProcessor.java        |  232 +--
 .../ClusterStackVersionResourceProvider.java    |  180 ++-
 .../internal/UpgradeResourceProvider.java       |   50 +-
 .../ambari/server/state/ConfigHelper.java       |   47 +-
 .../ambari/server/topology/AmbariContext.java   |   36 +-
 .../server/upgrade/UpgradeCatalog252.java       |   63 +-
 .../package/alerts/alert_hive_metastore.py      |   11 +-
 .../package/alerts/alert_llap_app_status.py     |   12 +-
 .../package/alerts/alert_check_oozie_server.py  |    8 +-
 .../resources/host_scripts/alert_disk_space.py  |   10 +-
 .../host_scripts/alert_version_select.py        |   16 +-
 .../4.0/configuration/cluster-env.xml           |   19 +-
 .../4.0/properties/stack_features.json          |  422 +++---
 .../BigInsights/4.0/properties/stack_tools.json |   14 +-
 .../HDP/2.0.6/configuration/cluster-env.xml     |   16 +-
 .../HDP/2.0.6/properties/stack_features.json    |  852 +++++------
 .../HDP/2.0.6/properties/stack_tools.json       |   16 +-
 .../PERF/1.0/configuration/cluster-env.xml      |   16 +-
 .../PERF/1.0/properties/stack_features.json     |   38 +-
 .../stacks/PERF/1.0/properties/stack_tools.json |   16 +-
 .../BlueprintConfigurationProcessorTest.java    |   38 +-
 ...ClusterStackVersionResourceProviderTest.java |  276 ++--
 .../ClusterConfigurationRequestTest.java        |  113 +-
 .../common-services/configs/hawq_default.json   |    6 +-
 .../python/host_scripts/TestAlertDiskSpace.py   |   16 +-
 .../2.5/configs/ranger-admin-default.json       |  990 ++++++-------
 .../2.5/configs/ranger-admin-secured.json       | 1108 +++++++--------
 .../stacks/2.5/configs/ranger-kms-default.json  | 1158 +++++++--------
 .../stacks/2.5/configs/ranger-kms-secured.json  | 1320 +++++++++---------
 .../2.6/configs/ranger-admin-default.json       |  953 +++++++------
 .../2.6/configs/ranger-admin-secured.json       | 1066 +++++++-------
 .../src/test/python/stacks/utils/RMFTestCase.py |    8 +-
 38 files changed, 4923 insertions(+), 4341 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py b/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
index 2b3df5f..7811e26 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
@@ -42,6 +42,12 @@ def check_stack_feature(stack_feature, stack_version):
 
   from resource_management.libraries.functions.default import default
   from resource_management.libraries.functions.version import compare_versions
+
+  stack_name = default("/hostLevelParams/stack_name", None)
+  if stack_name is None:
+    Logger.warning("Cannot find the stack name in the command. Stack features cannot be loaded")
+    return False
+
   stack_features_config = default("/configurations/cluster-env/stack_features", None)
 
   if not stack_version:
@@ -50,6 +56,13 @@ def check_stack_feature(stack_feature, stack_version):
 
   if stack_features_config:
     data = json.loads(stack_features_config)
+
+    if stack_name not in data:
+      Logger.warning("Cannot find stack features for the stack named {0}".format(stack_name))
+      return False
+
+    data = data[stack_name]
+
     for feature in data["stack_features"]:
       if feature["name"] == stack_feature:
         if "min_version" in feature:

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py b/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
index 02ae62d..420ae11 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
@@ -39,15 +39,33 @@ def get_stack_tool(name):
   :return: tool_name, tool_path, tool_package
   """
   from resource_management.libraries.functions.default import default
+
+  stack_name = default("/hostLevelParams/stack_name", None)
+  if stack_name is None:
+    Logger.warning("Cannot find the stack name in the command. Stack tools cannot be loaded")
+    return (None, None, None)
+
   stack_tools = None
   stack_tools_config = default("/configurations/cluster-env/stack_tools", None)
   if stack_tools_config:
     stack_tools = json.loads(stack_tools_config)
 
+  if stack_tools is None:
+    Logger.warning("The stack tools could not be found in cluster-env")
+    return (None, None, None)
+
+  if stack_name not in stack_tools:
+    Logger.warning("Cannot find stack tools for the stack named {0}".format(stack_name))
+    return (None, None, None)
+
+  # load the stack tooks keyed by the stack name
+  stack_tools = stack_tools[stack_name]
+
   if not stack_tools or not name or name.lower() not in stack_tools:
     Logger.warning("Cannot find config for {0} stack tool in {1}".format(str(name), str(stack_tools)))
     return (None, None, None)
 
+
   tool_config = stack_tools[name.lower()]
 
   # Return fixed length (tool_name, tool_path tool_package) tuple
@@ -81,3 +99,24 @@ def get_stack_tool_package(name):
   """
   (tool_name, tool_path, tool_package) = get_stack_tool(name)
   return tool_package
+
+
+def get_stack_root(stack_name, stack_root_json):
+  """
+  Get the stack-specific install root directory from the raw, JSON-escaped properties.
+  :param stack_name:
+  :param stack_root_json:
+  :return: stack_root
+  """
+  from resource_management.libraries.functions.default import default
+
+  if stack_root_json is None:
+    return "/usr/{0}".format(stack_name.lower())
+
+  stack_root = json.loads(stack_root_json)
+
+  if stack_name not in stack_root:
+    Logger.warning("Cannot determine stack root for stack named {0}".format(stack_name))
+    return "/usr/{0}".format(stack_name.lower())
+
+  return stack_root[stack_name]

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-common/src/main/python/resource_management/libraries/script/script.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/script/script.py b/ambari-common/src/main/python/resource_management/libraries/script/script.py
index 04928de..0df6900 100644
--- a/ambari-common/src/main/python/resource_management/libraries/script/script.py
+++ b/ambari-common/src/main/python/resource_management/libraries/script/script.py
@@ -553,7 +553,11 @@ class Script(object):
     :return: a stack name or None
     """
     from resource_management.libraries.functions.default import default
-    return default("/hostLevelParams/stack_name", "HDP")
+    stack_name = default("/hostLevelParams/stack_name", None)
+    if stack_name is None:
+      stack_name = default("/configurations/cluster-env/stack_name", "HDP")
+
+    return stack_name
 
   @staticmethod
   def get_stack_root():
@@ -563,7 +567,18 @@ class Script(object):
     """
     from resource_management.libraries.functions.default import default
     stack_name = Script.get_stack_name()
-    return default("/configurations/cluster-env/stack_root", "/usr/{0}".format(stack_name.lower()))
+    stack_root_json = default("/configurations/cluster-env/stack_root", None)
+
+    if stack_root_json is None:
+      return "/usr/{0}".format(stack_name.lower())
+
+    stack_root = json.loads(stack_root_json)
+
+    if stack_name not in stack_root:
+      Logger.warning("Cannot determine stack root for stack named {0}".format(stack_name))
+      return "/usr/{0}".format(stack_name.lower())
+
+    return stack_root[stack_name]
 
   @staticmethod
   def get_stack_version():

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java
index fc66f53..28946e7 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java
@@ -204,19 +204,25 @@ public class ExecutionCommandWrapper {
             effectiveClusterVersion.getRepositoryVersion().getVersion());
       }
 
-      // add the stack and common-services folders to the command
+      // add the stack and common-services folders to the command, but only if
+      // they don't exist - they may have been put on here with specific values
+      // ahead of time
       StackId stackId = cluster.getDesiredStackVersion();
       StackInfo stackInfo = ambariMetaInfo.getStack(stackId.getStackName(),
           stackId.getStackVersion());
 
-      commandParams.put(HOOKS_FOLDER, stackInfo.getStackHooksFolder());
+      if (!commandParams.containsKey(HOOKS_FOLDER)) {
+        commandParams.put(HOOKS_FOLDER, stackInfo.getStackHooksFolder());
+      }
 
-      String serviceName = executionCommand.getServiceName();
-      if (!StringUtils.isEmpty(serviceName)) {
-        ServiceInfo serviceInfo = ambariMetaInfo.getService(stackId.getStackName(),
-            stackId.getStackVersion(), serviceName);
+      if (!commandParams.containsKey(SERVICE_PACKAGE_FOLDER)) {
+        String serviceName = executionCommand.getServiceName();
+        if (!StringUtils.isEmpty(serviceName)) {
+          ServiceInfo serviceInfo = ambariMetaInfo.getService(stackId.getStackName(),
+              stackId.getStackVersion(), serviceName);
 
-        commandParams.put(SERVICE_PACKAGE_FOLDER, serviceInfo.getServicePackageFolder());
+          commandParams.put(SERVICE_PACKAGE_FOLDER, serviceInfo.getServicePackageFolder());
+        }
       }
     } catch (ClusterNotFoundException cnfe) {
       // it's possible that there are commands without clusters; in such cases,

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
index 3681eda..c361094 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
@@ -25,6 +25,7 @@ import java.util.Map;
 import org.apache.ambari.server.actionmanager.TargetHostType;
 import org.apache.ambari.server.controller.internal.RequestOperationLevel;
 import org.apache.ambari.server.controller.internal.RequestResourceFilter;
+import org.apache.ambari.server.state.StackId;
 
 /**
  * The context required to create tasks and stages for a custom action
@@ -41,6 +42,7 @@ public class ActionExecutionContext {
   private String expectedComponentName;
   private boolean hostsInMaintenanceModeExcluded = true;
   private boolean allowRetry = false;
+  private StackId stackId;
 
   /**
    * {@code true} if slave/client component failures should be automatically
@@ -168,6 +170,30 @@ public class ActionExecutionContext {
     this.autoSkipFailures = autoSkipFailures;
   }
 
+  /**
+   * Gets the stack to use for generating stack-associated values for a command.
+   * In some cases the cluster's stack is not the correct one to use, such as
+   * when distributing a repository.
+   *
+   * @return the stackId the stack to use when generating stack-specific content
+   *         for the command.
+   */
+  public StackId getStackId() {
+    return stackId;
+  }
+
+  /**
+   * Sets the stack to use for generating stack-associated values for a command.
+   * In some cases the cluster's stack is not the correct one to use, such as
+   * when distributing a repository.
+   *
+   * @param stackId
+   *          the stackId to use for stack-based properties on the command.
+   */
+  public void setStackId(StackId stackId) {
+    this.stackId = stackId;
+  }
+
   @Override
   public String toString() {
     return "ActionExecutionContext{" +

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
index d556b60..f75fb41 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
@@ -454,10 +454,12 @@ public class AmbariActionExecutionHelper {
       for (Map.Entry<String, String> dbConnectorName : configs.getDatabaseConnectorNames().entrySet()) {
         hostLevelParams.put(dbConnectorName.getKey(), dbConnectorName.getValue());
       }
+
       for (Map.Entry<String, String> previousDBConnectorName : configs.getPreviousDatabaseConnectorNames().entrySet()) {
         hostLevelParams.put(previousDBConnectorName.getKey(), previousDBConnectorName.getValue());
       }
-      addRepoInfoToHostLevelParams(cluster, hostLevelParams, hostName);
+
+      addRepoInfoToHostLevelParams(cluster, actionContext, hostLevelParams, hostName);
 
       Map<String, String> roleParams = execCmd.getRoleParams();
       if (roleParams == null) {
@@ -517,7 +519,8 @@ public class AmbariActionExecutionHelper {
   *
   * */
 
-  private void addRepoInfoToHostLevelParams(Cluster cluster, Map<String, String> hostLevelParams, String hostName) throws AmbariException {
+  private void addRepoInfoToHostLevelParams(Cluster cluster, ActionExecutionContext actionContext,
+      Map<String, String> hostLevelParams, String hostName) throws AmbariException {
     if (null == cluster) {
       return;
     }
@@ -526,6 +529,7 @@ public class AmbariActionExecutionHelper {
     JsonArray repositories = new JsonArray();
     ClusterVersionEntity clusterVersionEntity = clusterVersionDAO.findByClusterAndStateCurrent(
         cluster.getClusterName());
+
     if (clusterVersionEntity != null && clusterVersionEntity.getRepositoryVersion() != null) {
       String hostOsFamily = clusters.getHost(hostName).getOsFamily();
       for (OperatingSystemEntity operatingSystemEntity : clusterVersionEntity.getRepositoryVersion().getOperatingSystems()) {
@@ -547,8 +551,15 @@ public class AmbariActionExecutionHelper {
 
     hostLevelParams.put(REPO_INFO, rootJsonObject.toString());
 
-    StackId stackId = cluster.getCurrentStackVersion();
-    hostLevelParams.put(STACK_NAME, stackId.getStackName());
-    hostLevelParams.put(STACK_VERSION, stackId.getStackVersion());
+    // set the host level params if not already set by whoever is creating this command
+    if (!hostLevelParams.containsKey(STACK_NAME) || !hostLevelParams.containsKey(STACK_VERSION)) {
+      // see if the action context has a stack ID set to use, otherwise use the
+      // cluster's current stack ID
+      StackId stackId = actionContext.getStackId() != null ? actionContext.getStackId()
+          : cluster.getCurrentStackVersion();
+
+      hostLevelParams.put(STACK_NAME, stackId.getStackName());
+      hostLevelParams.put(STACK_VERSION, stackId.getStackVersion());
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
index 061cdf7..50cea9e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
@@ -34,7 +34,10 @@ import java.util.Set;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.PropertyDependencyInfo;
+import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.ValueAttributesInfo;
 import org.apache.ambari.server.topology.AdvisedConfiguration;
 import org.apache.ambari.server.topology.Blueprint;
@@ -84,31 +87,31 @@ public class BlueprintConfigurationProcessor {
    * Single host topology updaters
    */
   protected static Map<String, Map<String, PropertyUpdater>> singleHostTopologyUpdaters =
-      new HashMap<String, Map<String, PropertyUpdater>>();
+      new HashMap<>();
 
   /**
    * Multi host topology updaters
    */
   private static Map<String, Map<String, PropertyUpdater>> multiHostTopologyUpdaters =
-      new HashMap<String, Map<String, PropertyUpdater>>();
+      new HashMap<>();
 
   /**
    * Database host topology updaters
    */
   private static Map<String, Map<String, PropertyUpdater>> dbHostTopologyUpdaters =
-      new HashMap<String, Map<String, PropertyUpdater>>();
+      new HashMap<>();
 
   /**
    * Updaters for properties which need 'm' appended
    */
   private static Map<String, Map<String, PropertyUpdater>> mPropertyUpdaters =
-      new HashMap<String, Map<String, PropertyUpdater>>();
+      new HashMap<>();
 
   /**
    * Non topology related updaters
    */
   private static Map<String, Map<String, PropertyUpdater>> nonTopologyUpdaters =
-      new HashMap<String, Map<String, PropertyUpdater>>();
+      new HashMap<>();
 
   /**
    * Updaters that preserve the original property value, functions
@@ -117,13 +120,13 @@ public class BlueprintConfigurationProcessor {
    * cluster creation
    */
   private Map<String, Map<String, PropertyUpdater>> removePropertyUpdaters =
-    new HashMap<String, Map<String, PropertyUpdater>>();
+    new HashMap<>();
 
   /**
    * Collection of all updaters
    */
   private static Collection<Map<String, Map<String, PropertyUpdater>>> allUpdaters =
-      new ArrayList<Map<String, Map<String, PropertyUpdater>>>();
+      new ArrayList<>();
 
   /**
    * Compiled regex for hostgroup token.
@@ -152,7 +155,7 @@ public class BlueprintConfigurationProcessor {
    *   expected hostname information is not found.
    */
   private static Set<String> configPropertiesWithHASupport =
-    new HashSet<String>(Arrays.asList("fs.defaultFS", "hbase.rootdir", "instance.volumes", "policymgr_external_url", "xasecure.audit.destination.hdfs.dir"));
+    new HashSet<>(Arrays.asList("fs.defaultFS", "hbase.rootdir", "instance.volumes", "policymgr_external_url", "xasecure.audit.destination.hdfs.dir"));
 
   /**
    * Statically-defined list of filters to apply on property exports.
@@ -233,8 +236,8 @@ public class BlueprintConfigurationProcessor {
       singleHostTopologyUpdaters.put("oozie-env", oozieEnvUpdaters);
       singleHostTopologyUpdaters.put("oozie-site", oozieSiteUpdaters);
     } else {
-      Map<String, PropertyUpdater> oozieEnvOriginalValueMap = new HashMap<String, PropertyUpdater>();
-      Map<String, PropertyUpdater> oozieSiteOriginalValueMap = new HashMap<String, PropertyUpdater>();
+      Map<String, PropertyUpdater> oozieEnvOriginalValueMap = new HashMap<>();
+      Map<String, PropertyUpdater> oozieSiteOriginalValueMap = new HashMap<>();
       // register updaters for Oozie properties that may point to an external DB
       oozieEnvOriginalValueMap.put("oozie_existing_mysql_host", new OriginalValuePropertyUpdater());
       oozieEnvOriginalValueMap.put("oozie_existing_oracle_host", new OriginalValuePropertyUpdater());
@@ -247,7 +250,7 @@ public class BlueprintConfigurationProcessor {
       removePropertyUpdaters.put("oozie-site", oozieSiteOriginalValueMap);
     }
 
-    Map<String, PropertyUpdater> hiveEnvOriginalValueMap = new HashMap<String, PropertyUpdater>();
+    Map<String, PropertyUpdater> hiveEnvOriginalValueMap = new HashMap<>();
     // register updaters for Hive properties that may point to an external DB
     hiveEnvOriginalValueMap.put("hive_existing_oracle_host", new OriginalValuePropertyUpdater());
     hiveEnvOriginalValueMap.put("hive_existing_mssql_server_2_host", new OriginalValuePropertyUpdater());
@@ -283,7 +286,7 @@ public class BlueprintConfigurationProcessor {
   }
 
   public Collection<String> getRequiredHostGroups() {
-    Collection<String> requiredHostGroups = new HashSet<String>();
+    Collection<String> requiredHostGroups = new HashSet<>();
 
     for (Map<String, Map<String, PropertyUpdater>> updaterMap : createCollectionOfUpdaters()) {
       for (Map.Entry<String, Map<String, PropertyUpdater>> entry : updaterMap.entrySet()) {
@@ -322,7 +325,7 @@ public class BlueprintConfigurationProcessor {
    * @return Set of config type names that were updated by this update call
    */
   public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
-      Set<String> configTypesUpdated = new HashSet<String>();
+      Set<String> configTypesUpdated = new HashSet<>();
     Configuration clusterConfig = clusterTopology.getConfiguration();
     Map<String, HostGroupInfo> groupInfoMap = clusterTopology.getHostGroupInfo();
 
@@ -350,7 +353,7 @@ public class BlueprintConfigurationProcessor {
             final String originalValue = typeMap.get(propertyName);
             final String updatedValue =
               updater.updateForClusterCreate(propertyName, originalValue, clusterProps, clusterTopology);
-            
+
             if(updatedValue == null ) {
               continue;
             }
@@ -413,6 +416,7 @@ public class BlueprintConfigurationProcessor {
     }
 
     // Explicitly set any properties that are required but not currently provided in the stack definition.
+    setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
     setRetryConfiguration(clusterConfig, configTypesUpdated);
     setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
     addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
@@ -485,7 +489,7 @@ public class BlueprintConfigurationProcessor {
       doOozieServerHAUpdate();
     }
 
-    Collection<Configuration> allConfigs = new ArrayList<Configuration>();
+    Collection<Configuration> allConfigs = new ArrayList<>();
     allConfigs.add(clusterTopology.getConfiguration());
     for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) {
       Configuration hgConfiguration = groupInfo.getConfiguration();
@@ -705,7 +709,7 @@ public class BlueprintConfigurationProcessor {
    */
   private Collection<Map<String, Map<String, PropertyUpdater>>> addNameNodeHAUpdaters(Collection<Map<String, Map<String, PropertyUpdater>>> updaters) {
     Collection<Map<String, Map<String, PropertyUpdater>>> highAvailabilityUpdaters =
-      new LinkedList<Map<String, Map<String, PropertyUpdater>>>();
+      new LinkedList<>();
 
     // always add the statically-defined list of updaters to the list to use
     // in processing cluster configuration
@@ -732,7 +736,7 @@ public class BlueprintConfigurationProcessor {
    */
   private Collection<Map<String, Map<String, PropertyUpdater>>> addYarnResourceManagerHAUpdaters(Collection<Map<String, Map<String, PropertyUpdater>>> updaters) {
     Collection<Map<String, Map<String, PropertyUpdater>>> highAvailabilityUpdaters =
-      new LinkedList<Map<String, Map<String, PropertyUpdater>>>();
+      new LinkedList<>();
 
     // always add the statically-defined list of updaters to the list to use
     // in processing cluster configuration
@@ -758,7 +762,7 @@ public class BlueprintConfigurationProcessor {
    */
   private Collection<Map<String, Map<String, PropertyUpdater>>> addOozieServerHAUpdaters(Collection<Map<String, Map<String, PropertyUpdater>>> updaters) {
     Collection<Map<String, Map<String, PropertyUpdater>>> highAvailabilityUpdaters =
-      new LinkedList<Map<String, Map<String, PropertyUpdater>>>();
+      new LinkedList<>();
 
     // always add the statically-defined list of updaters to the list to use
     // in processing cluster configuration
@@ -857,8 +861,8 @@ public class BlueprintConfigurationProcessor {
    * @return a Map of registered PropertyUpdaters for handling HA properties in hdfs-site
    */
   private Map<String, Map<String, PropertyUpdater>> createMapOfNameNodeHAUpdaters() {
-    Map<String, Map<String, PropertyUpdater>> highAvailabilityUpdaters = new HashMap<String, Map<String, PropertyUpdater>>();
-    Map<String, PropertyUpdater> hdfsSiteUpdatersForAvailability = new HashMap<String, PropertyUpdater>();
+    Map<String, Map<String, PropertyUpdater>> highAvailabilityUpdaters = new HashMap<>();
+    Map<String, PropertyUpdater> hdfsSiteUpdatersForAvailability = new HashMap<>();
     highAvailabilityUpdaters.put("hdfs-site", hdfsSiteUpdatersForAvailability);
 
     //todo: Do we need to call this for HG configurations?
@@ -888,8 +892,8 @@ public class BlueprintConfigurationProcessor {
    * @return a Map of registered PropertyUpdaters for handling HA properties in yarn-site
    */
   private Map<String, Map<String, PropertyUpdater>> createMapOfYarnResourceManagerHAUpdaters() {
-    Map<String, Map<String, PropertyUpdater>> highAvailabilityUpdaters = new HashMap<String, Map<String, PropertyUpdater>>();
-    Map<String, PropertyUpdater> yarnSiteUpdatersForAvailability = new HashMap<String, PropertyUpdater>();
+    Map<String, Map<String, PropertyUpdater>> highAvailabilityUpdaters = new HashMap<>();
+    Map<String, PropertyUpdater> yarnSiteUpdatersForAvailability = new HashMap<>();
     highAvailabilityUpdaters.put("yarn-site", yarnSiteUpdatersForAvailability);
 
     Map<String, String> yarnSiteConfig = clusterTopology.getConfiguration().getFullProperties().get("yarn-site");
@@ -915,8 +919,8 @@ public class BlueprintConfigurationProcessor {
    * @return a Map of registered PropertyUpdaters for handling HA properties in oozie-site
    */
   private Map<String, Map<String, PropertyUpdater>> createMapOfOozieServerHAUpdaters() {
-    Map<String, Map<String, PropertyUpdater>> highAvailabilityUpdaters = new HashMap<String, Map<String, PropertyUpdater>>();
-    Map<String, PropertyUpdater> oozieSiteUpdatersForAvailability = new HashMap<String, PropertyUpdater>();
+    Map<String, Map<String, PropertyUpdater>> highAvailabilityUpdaters = new HashMap<>();
+    Map<String, PropertyUpdater> oozieSiteUpdatersForAvailability = new HashMap<>();
     highAvailabilityUpdaters.put("oozie-site", oozieSiteUpdatersForAvailability);
 
     // register a multi-host property updater for this Oozie property.
@@ -1190,7 +1194,7 @@ public class BlueprintConfigurationProcessor {
                   groupInfo.getHostGroupName() + "%");
             }
           }
-          Collection<String> addedGroups = new HashSet<String>();
+          Collection<String> addedGroups = new HashSet<>();
           String[] toks = propValue.split(",");
           boolean inBrackets = propValue.startsWith("[");
 
@@ -1232,7 +1236,7 @@ public class BlueprintConfigurationProcessor {
   //todo: replace this with parseHostGroupToken which would return a hostgroup or null
   private static Collection<String> getHostStrings(String val, ClusterTopology topology) {
 
-    Collection<String> hosts = new LinkedHashSet<String>();
+    Collection<String> hosts = new LinkedHashSet<>();
     Matcher m = HOSTGROUP_PORT_REGEX.matcher(val);
     while (m.find()) {
       String groupName = m.group(1);
@@ -1264,7 +1268,7 @@ public class BlueprintConfigurationProcessor {
    *         elements in this property
    */
   private static String[] splitAndTrimStrings(String propertyName) {
-    List<String> namesWithoutWhitespace = new LinkedList<String>();
+    List<String> namesWithoutWhitespace = new LinkedList<>();
     for (String service : propertyName.split(",")) {
       namesWithoutWhitespace.add(service.trim());
     }
@@ -1496,7 +1500,7 @@ public class BlueprintConfigurationProcessor {
         Collection<String> matchingGroups = topology.getHostGroupsForComponent(component);
         int matchingGroupCount = matchingGroups.size();
         if (matchingGroupCount != 0) {
-          return new HashSet<String>(matchingGroups);
+          return new HashSet<>(matchingGroups);
         } else {
           Cardinality cardinality = topology.getBlueprint().getStack().getCardinality(component);
           // if no matching host groups are found for a component whose configuration
@@ -1908,7 +1912,7 @@ public class BlueprintConfigurationProcessor {
      * @return list of hosts that have the given components
      */
     private Collection<String> getHostStringsFromLocalhost(String origValue, ClusterTopology topology) {
-      Set<String> hostStrings = new HashSet<String>();
+      Set<String> hostStrings = new HashSet<>();
       if(origValue.contains("localhost")) {
         Matcher localhostMatcher = LOCALHOST_PORT_REGEX.matcher(origValue);
         String port = null;
@@ -1950,7 +1954,7 @@ public class BlueprintConfigurationProcessor {
     private String removePorts(Collection<String> hostStrings) {
       String port = null;
       if(!usePortForEachHost && !hostStrings.isEmpty()) {
-        Set<String> temp = new HashSet<String>();
+        Set<String> temp = new HashSet<>();
 
         // extract port
         Iterator<String> i = hostStrings.iterator();
@@ -1985,7 +1989,7 @@ public class BlueprintConfigurationProcessor {
                                                     Map<String, Map<String, String>> properties,
                                                     ClusterTopology topology) {
 
-      Collection<String> requiredHostGroups = new HashSet<String>();
+      Collection<String> requiredHostGroups = new HashSet<>();
 
       // add all host groups specified in host group tokens
       Matcher m = HOSTGROUP_PORT_REGEX.matcher(origValue);
@@ -2158,8 +2162,9 @@ public class BlueprintConfigurationProcessor {
       StringBuilder sb = new StringBuilder();
 
       Matcher m = REGEX_IN_BRACKETS.matcher(origValue);
-      if (m.matches())
+      if (m.matches()) {
         origValue = m.group("INNER");
+      }
 
       if (origValue != null) {
         sb.append("[");
@@ -2167,8 +2172,9 @@ public class BlueprintConfigurationProcessor {
         for (String value : origValue.split(",")) {
 
           m = REGEX_IN_QUOTES.matcher(value);
-          if (m.matches())
+          if (m.matches()) {
             value = m.group("INNER");
+          }
 
           if (!isFirst) {
             sb.append(",");
@@ -2202,6 +2208,7 @@ public class BlueprintConfigurationProcessor {
    */
   private static class OriginalValuePropertyUpdater implements PropertyUpdater {
 
+    @Override
     public String updateForClusterCreate(String propertyName,
                                          String origValue,
                                          Map<String, Map<String, String>> properties,
@@ -2232,7 +2239,7 @@ public class BlueprintConfigurationProcessor {
   private static class TempletonHivePropertyUpdater implements PropertyUpdater {
 
     private Map<String, PropertyUpdater> mapOfKeysToUpdaters =
-      new HashMap<String, PropertyUpdater>();
+      new HashMap<>();
 
     TempletonHivePropertyUpdater() {
       // the only known property that requires hostname substitution is hive.metastore.uris,
@@ -2297,7 +2304,7 @@ public class BlueprintConfigurationProcessor {
         return Collections.emptySet();
       }
 
-      Collection<String> requiredGroups = new HashSet<String>();
+      Collection<String> requiredGroups = new HashSet<>();
       // split out the key/value pairs
       String[] keyValuePairs = origValue.split(",");
       for (String keyValuePair : keyValuePairs) {
@@ -2344,57 +2351,57 @@ public class BlueprintConfigurationProcessor {
     allUpdaters.add(mPropertyUpdaters);
     allUpdaters.add(nonTopologyUpdaters);
 
-    Map<String, PropertyUpdater> amsSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> hdfsSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> mapredSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> coreSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> hbaseSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> yarnSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> hiveSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> hiveSiteNonTopologyMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> oozieSiteOriginalValueMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> oozieSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> stormSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> stormSiteNonTopologyMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> accumuloSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> falconStartupPropertiesMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> kafkaBrokerMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> kafkaBrokerNonTopologyMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> atlasPropsMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> mapredEnvMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> mHadoopEnvMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> shHadoopEnvMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> hbaseEnvMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> hiveEnvMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> hiveInteractiveEnvMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> hiveInteractiveSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> oozieEnvMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> oozieEnvHeapSizeMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> multiWebhcatSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> multiHbaseSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> multiStormSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> multiCoreSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> multiHdfsSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> multiHiveSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> multiKafkaBrokerMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> multiSliderClientMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> multiYarnSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> multiOozieSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> multiAccumuloSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> multiRangerKmsSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> dbHiveSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> rangerAdminPropsMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> rangerEnvPropsMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> rangerYarnAuditPropsMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> rangerHdfsAuditPropsMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> rangerHbaseAuditPropsMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> rangerHiveAuditPropsMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> rangerKnoxAuditPropsMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> rangerKafkaAuditPropsMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> rangerStormAuditPropsMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> rangerAtlasAuditPropsMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> hawqSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> zookeeperEnvMap = new HashMap<String, PropertyUpdater>();
+    Map<String, PropertyUpdater> amsSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> hdfsSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> mapredSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> coreSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> hbaseSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> yarnSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> hiveSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> hiveSiteNonTopologyMap = new HashMap<>();
+    Map<String, PropertyUpdater> oozieSiteOriginalValueMap = new HashMap<>();
+    Map<String, PropertyUpdater> oozieSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> stormSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> stormSiteNonTopologyMap = new HashMap<>();
+    Map<String, PropertyUpdater> accumuloSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> falconStartupPropertiesMap = new HashMap<>();
+    Map<String, PropertyUpdater> kafkaBrokerMap = new HashMap<>();
+    Map<String, PropertyUpdater> kafkaBrokerNonTopologyMap = new HashMap<>();
+    Map<String, PropertyUpdater> atlasPropsMap = new HashMap<>();
+    Map<String, PropertyUpdater> mapredEnvMap = new HashMap<>();
+    Map<String, PropertyUpdater> mHadoopEnvMap = new HashMap<>();
+    Map<String, PropertyUpdater> shHadoopEnvMap = new HashMap<>();
+    Map<String, PropertyUpdater> hbaseEnvMap = new HashMap<>();
+    Map<String, PropertyUpdater> hiveEnvMap = new HashMap<>();
+    Map<String, PropertyUpdater> hiveInteractiveEnvMap = new HashMap<>();
+    Map<String, PropertyUpdater> hiveInteractiveSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> oozieEnvMap = new HashMap<>();
+    Map<String, PropertyUpdater> oozieEnvHeapSizeMap = new HashMap<>();
+    Map<String, PropertyUpdater> multiWebhcatSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> multiHbaseSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> multiStormSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> multiCoreSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> multiHdfsSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> multiHiveSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> multiKafkaBrokerMap = new HashMap<>();
+    Map<String, PropertyUpdater> multiSliderClientMap = new HashMap<>();
+    Map<String, PropertyUpdater> multiYarnSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> multiOozieSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> multiAccumuloSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> multiRangerKmsSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> dbHiveSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> rangerAdminPropsMap = new HashMap<>();
+    Map<String, PropertyUpdater> rangerEnvPropsMap = new HashMap<>();
+    Map<String, PropertyUpdater> rangerYarnAuditPropsMap = new HashMap<>();
+    Map<String, PropertyUpdater> rangerHdfsAuditPropsMap = new HashMap<>();
+    Map<String, PropertyUpdater> rangerHbaseAuditPropsMap = new HashMap<>();
+    Map<String, PropertyUpdater> rangerHiveAuditPropsMap = new HashMap<>();
+    Map<String, PropertyUpdater> rangerKnoxAuditPropsMap = new HashMap<>();
+    Map<String, PropertyUpdater> rangerKafkaAuditPropsMap = new HashMap<>();
+    Map<String, PropertyUpdater> rangerStormAuditPropsMap = new HashMap<>();
+    Map<String, PropertyUpdater> rangerAtlasAuditPropsMap = new HashMap<>();
+    Map<String, PropertyUpdater> hawqSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> zookeeperEnvMap = new HashMap<>();
 
     singleHostTopologyUpdaters.put("ams-site", amsSiteMap);
     singleHostTopologyUpdaters.put("hdfs-site", hdfsSiteMap);
@@ -2533,7 +2540,7 @@ public class BlueprintConfigurationProcessor {
         String atlasHookClass = "org.apache.atlas.hive.hook.HiveHook";
         String[] hiveHooks = origValue.split(",");
 
-        List<String> hiveHooksClean = new ArrayList<String>();
+        List<String> hiveHooksClean = new ArrayList<>();
         for(String hiveHook : hiveHooks) {
           if (!StringUtils.isBlank(hiveHook.trim())) {
             hiveHooksClean.add(hiveHook.trim());
@@ -2786,7 +2793,7 @@ public class BlueprintConfigurationProcessor {
 
   private Collection<String> setupHDFSProxyUsers(Configuration configuration, Set<String> configTypesUpdated) {
     // AMBARI-5206
-    final Map<String , String> userProps = new HashMap<String , String>();
+    final Map<String , String> userProps = new HashMap<>();
 
     Collection<String> services = clusterTopology.getBlueprint().getServices();
     if (services.contains("HDFS")) {
@@ -2925,6 +2932,49 @@ public class BlueprintConfigurationProcessor {
 
 
   /**
+   * Sets the read-only properties for stack features & tools, overriding
+   * anything provided in the blueprint.
+   *
+   * @param configuration
+   *          the configuration to update with values from the stack.
+   * @param configTypesUpdated
+   *          the list of configuration types updated (cluster-env will be added
+   *          to this).
+   * @throws ConfigurationTopologyException
+   */
+  private void setStackToolsAndFeatures(Configuration configuration, Set<String> configTypesUpdated)
+      throws ConfigurationTopologyException {
+    ConfigHelper configHelper = clusterTopology.getAmbariContext().getConfigHelper();
+    Stack stack = clusterTopology.getBlueprint().getStack();
+    String stackName = stack.getName();
+    String stackVersion = stack.getVersion();
+
+    StackId stackId = new StackId(stackName, stackVersion);
+
+    Set<String> properties = Sets.newHashSet(ConfigHelper.CLUSTER_ENV_STACK_NAME_PROPERTY,
+        ConfigHelper.CLUSTER_ENV_STACK_ROOT_PROPERTY, ConfigHelper.CLUSTER_ENV_STACK_TOOLS_PROPERTY,
+        ConfigHelper.CLUSTER_ENV_STACK_FEATURES_PROPERTY);
+
+    try {
+      Map<String, Map<String, String>> defaultStackProperties = configHelper.getDefaultStackProperties(stackId);
+      Map<String,String> clusterEnvDefaultProperties = defaultStackProperties.get(CLUSTER_ENV_CONFIG_TYPE_NAME);
+
+      for( String property : properties ){
+        if (defaultStackProperties.containsKey(property)) {
+          configuration.setProperty(CLUSTER_ENV_CONFIG_TYPE_NAME, property,
+              clusterEnvDefaultProperties.get(property));
+
+          // make sure to include the configuration type as being updated
+          configTypesUpdated.add(CLUSTER_ENV_CONFIG_TYPE_NAME);
+        }
+      }
+    } catch( AmbariException ambariException ){
+      throw new ConfigurationTopologyException("Unable to retrieve the stack tools and features",
+          ambariException);
+    }
+  }
+
+  /**
    * Ensure that the specified property exists.
    * If not, set a default value.
    *
@@ -3045,7 +3095,7 @@ public class BlueprintConfigurationProcessor {
 
     @Override
     public boolean isPropertyIncluded(String propertyName, String propertyValue, String configType, ClusterTopology topology) {
-      return !(this.propertyConfigType.equals(configType) &&
+      return !(propertyConfigType.equals(configType) &&
              this.propertyName.equals(propertyName));
     }
   }
@@ -3201,7 +3251,7 @@ public class BlueprintConfigurationProcessor {
      * namenode.
      */
     private final Set<String> setOfHDFSPropertyNamesNonHA =
-      Collections.unmodifiableSet( new HashSet<String>(Arrays.asList("dfs.namenode.http-address", "dfs.namenode.https-address", "dfs.namenode.rpc-address")));
+      Collections.unmodifiableSet( new HashSet<>(Arrays.asList("dfs.namenode.http-address", "dfs.namenode.https-address", "dfs.namenode.rpc-address")));
 
 
     /**
@@ -3271,7 +3321,7 @@ public class BlueprintConfigurationProcessor {
      * Set of HAWQ Property names that are only valid in a HA scenario.
      */
     private final Set<String> setOfHawqPropertyNamesNonHA =
-            Collections.unmodifiableSet( new HashSet<String>(Arrays.asList(HAWQ_SITE_HAWQ_STANDBY_ADDRESS_HOST)));
+            Collections.unmodifiableSet( new HashSet<>(Arrays.asList(HAWQ_SITE_HAWQ_STANDBY_ADDRESS_HOST)));
 
 
     /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
index 64e0b14..9ea6083 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
@@ -76,6 +76,8 @@ import org.apache.ambari.server.serveraction.upgrades.FinalizeUpgradeAction;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.ComponentInfo;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.MaintenanceState;
 import org.apache.ambari.server.state.RepositoryVersionState;
@@ -195,6 +197,13 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
   private static HostComponentStateDAO hostComponentStateDAO;
 
   /**
+   * Used for updating the existing stack tools with those of the stack being
+   * distributed.
+   */
+  @Inject
+  private static Provider<ConfigHelper> configHelperProvider;
+
+  /**
    * We have to include such a hack here, because if we
    * make finalizeUpgradeAction field static and request injection
    * for it, there will be a circle dependency error
@@ -216,11 +225,11 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
   @Override
   public Set<Resource> getResourcesAuthorized(Request request, Predicate predicate) throws
       SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-    final Set<Resource> resources = new HashSet<Resource>();
+    final Set<Resource> resources = new HashSet<>();
     final Set<String> requestedIds = getRequestPropertyIds(request, predicate);
     final Set<Map<String, Object>> propertyMaps = getPropertyMaps(predicate);
 
-    List<ClusterVersionEntity> requestedEntities = new ArrayList<ClusterVersionEntity>();
+    List<ClusterVersionEntity> requestedEntities = new ArrayList<>();
     for (Map<String, Object> propertyMap: propertyMaps) {
       final String clusterName = propertyMap.get(CLUSTER_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID).toString();
       final Long id;
@@ -244,7 +253,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
     for (ClusterVersionEntity entity: requestedEntities) {
       final Resource resource = new ResourceImpl(Resource.Type.ClusterStackVersion);
 
-      final Map<String, List<String>> hostStates = new HashMap<String, List<String>>();
+      final Map<String, List<String>> hostStates = new HashMap<>();
       for (RepositoryVersionState state: RepositoryVersionState.values()) {
         hostStates.put(state.name(), new ArrayList<String>());
       }
@@ -295,12 +304,10 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
 
     String clName;
     final String desiredRepoVersion;
-    String stackName;
-    String stackVersion;
 
     Map<String, Object> propertyMap = iterator.next();
 
-    Set<String> requiredProperties = new HashSet<String>();
+    Set<String> requiredProperties = new HashSet<>();
     requiredProperties.add(CLUSTER_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID);
     requiredProperties.add(CLUSTER_STACK_VERSION_REPOSITORY_VERSION_PROPERTY_ID);
     requiredProperties.add(CLUSTER_STACK_VERSION_STACK_PROPERTY_ID);
@@ -335,19 +342,29 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
           cluster.getClusterName(), entity.getDirection().getText(false)));
     }
 
-    final StackId stackId;
-    if (propertyMap.containsKey(CLUSTER_STACK_VERSION_STACK_PROPERTY_ID) &&
-            propertyMap.containsKey(CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID)) {
-      stackName = (String) propertyMap.get(CLUSTER_STACK_VERSION_STACK_PROPERTY_ID);
-      stackVersion = (String) propertyMap.get(CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID);
-      stackId = new StackId(stackName, stackVersion);
-      if (! ami.isSupportedStack(stackName, stackVersion)) {
-        throw new NoSuchParentResourceException(String.format("Stack %s is not supported",
-                stackId));
-      }
-    } else { // Using stack that is current for cluster
-      StackId currentStackVersion = cluster.getCurrentStackVersion();
-      stackId = currentStackVersion;
+    String stackName = (String) propertyMap.get(CLUSTER_STACK_VERSION_STACK_PROPERTY_ID);
+    String stackVersion = (String) propertyMap.get(CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID);
+    if (StringUtils.isBlank(stackName) || StringUtils.isBlank(stackVersion)) {
+      String message = String.format(
+          "Both the %s and %s properties are required when distributing a new stack",
+          CLUSTER_STACK_VERSION_STACK_PROPERTY_ID, CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID);
+
+      throw new SystemException(message);
+    }
+
+    final StackId stackId = new StackId(stackName, stackVersion);
+
+    if (!ami.isSupportedStack(stackName, stackVersion)) {
+      throw new NoSuchParentResourceException(String.format("Stack %s is not supported", stackId));
+    }
+
+    // bootstrap the stack tools if necessary for the stack which is being
+    // distributed
+    try {
+      bootstrapStackTools(stackId, cluster);
+    } catch (AmbariException ambariException) {
+      throw new SystemException("Unable to modify stack tools for new stack being distributed",
+          ambariException);
     }
 
     RepositoryVersionEntity repoVersionEnt = repositoryVersionDAO.findByStackAndVersion(
@@ -491,7 +508,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
 
     // build the list of OS repos
     List<OperatingSystemEntity> operatingSystems = repoVersionEnt.getOperatingSystems();
-    Map<String, List<RepositoryEntity>> perOsRepos = new HashMap<String, List<RepositoryEntity>>();
+    Map<String, List<RepositoryEntity>> perOsRepos = new HashMap<>();
     for (OperatingSystemEntity operatingSystem : operatingSystems) {
 
       if (operatingSystem.isAmbariManagedRepos()) {
@@ -504,7 +521,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
     RequestStageContainer req = createRequest();
 
     Iterator<Host> hostIterator = hosts.iterator();
-    Map<String, String> hostLevelParams = new HashMap<String, String>();
+    Map<String, String> hostLevelParams = new HashMap<>();
     hostLevelParams.put(JDK_LOCATION, getManagementController().getJdkResourceUrl());
     String hostParamsJson = StageUtils.getGson().toJson(hostLevelParams);
 
@@ -538,7 +555,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
 
     boolean hasStage = false;
 
-    ArrayList<Stage> stages = new ArrayList<Stage>(batchCount);
+    ArrayList<Stage> stages = new ArrayList<>(batchCount);
     for (int batchId = 1; batchId <= batchCount; batchId++) {
       // Create next stage
       String stageName;
@@ -618,8 +635,8 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
 
 
     // determine packages for all services that are installed on host
-    List<ServiceOsSpecific.Package> packages = new ArrayList<ServiceOsSpecific.Package>();
-    Set<String> servicesOnHost = new HashSet<String>();
+    List<ServiceOsSpecific.Package> packages = new ArrayList<>();
+    Set<String> servicesOnHost = new HashSet<>();
     List<ServiceComponentHost> components = cluster.getServiceComponentHosts(host.getHostName());
     for (ServiceComponentHost component : components) {
       if (repoServices.isEmpty() || repoServices.contains(component.getServiceName())) {
@@ -670,7 +687,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
     final String packageList = gson.toJson(packages);
     final String repoList = gson.toJson(repoInfo);
 
-    Map<String, String> params = new HashMap<String, String>();
+    Map<String, String> params = new HashMap<>();
     params.put("stack_id", stackId.getStackId());
     params.put("repository_version", repoVersion.getVersion());
     params.put("base_urls", repoList);
@@ -689,19 +706,17 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
       params.put(KeyNames.PACKAGE_VERSION, xml.getPackageVersion(osFamily));
     }
 
-
     // add host to this stage
     RequestResourceFilter filter = new RequestResourceFilter(null, null,
             Collections.singletonList(host.getHostName()));
 
-    ActionExecutionContext actionContext = new ActionExecutionContext(
-            cluster.getClusterName(), INSTALL_PACKAGES_ACTION,
-            Collections.singletonList(filter),
-            params);
+    ActionExecutionContext actionContext = new ActionExecutionContext(cluster.getClusterName(),
+        INSTALL_PACKAGES_ACTION, Collections.singletonList(filter), params);
+
+    actionContext.setStackId(stackId);
     actionContext.setTimeout(Short.valueOf(configuration.getDefaultAgentTaskTimeout(true)));
 
     return actionContext;
-
   }
 
 
@@ -787,7 +802,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
       }
       Map<String, Object> propertyMap = iterator.next();
 
-      Set<String> requiredProperties = new HashSet<String>();
+      Set<String> requiredProperties = new HashSet<>();
       requiredProperties.add(CLUSTER_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID);
       requiredProperties.add(CLUSTER_STACK_VERSION_REPOSITORY_VERSION_PROPERTY_ID);
       requiredProperties.add(CLUSTER_STACK_VERSION_STATE_PROPERTY_ID);
@@ -826,7 +841,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
       }
 
       if (!force) {
-        Map<String, String> args = new HashMap<String, String>();
+        Map<String, String> args = new HashMap<>();
         if (newStateStr.equals(RepositoryVersionState.CURRENT.toString())) {
           // Finalize upgrade workflow
           args.put(FinalizeUpgradeAction.UPGRADE_DIRECTION_KEY, "upgrade");
@@ -841,7 +856,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
 
         // Get a host name to populate the hostrolecommand table's hostEntity.
         String defaultHostName;
-        ArrayList<Host> hosts = new ArrayList<Host>(cluster.getHosts());
+        ArrayList<Host> hosts = new ArrayList<>(cluster.getHosts());
         if (!hosts.isEmpty()) {
           Collections.sort(hosts);
           defaultHostName = hosts.get(0).getHostName();
@@ -976,4 +991,101 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
   }
 
 
+  /**
+   * Ensures that the stack tools and stack features are set on
+   * {@link ConfigHelper#CLUSTER_ENV} for the stack of the repository being
+   * distributed. This step ensures that the new repository can be distributed
+   * with the correct tools.
+   * <p/>
+   * If the cluster's current stack name matches that of the new stack or the
+   * new stack's tools are already added in the configuration, then this method
+   * will not change anything.
+   *
+   * @param stackId
+   *          the stack of the repository being distributed (not {@code null}).
+   * @param cluster
+   *          the cluster the new stack/repo is being distributed for (not
+   *          {@code null}).
+   * @throws AmbariException
+   */
+  private void bootstrapStackTools(StackId stackId, Cluster cluster) throws AmbariException {
+    // if the stack name is the same as the cluster's current stack name, then
+    // there's no work to do
+    if (StringUtils.equals(stackId.getStackName(),
+        cluster.getCurrentStackVersion().getStackName())) {
+      return;
+    }
+
+    ConfigHelper configHelper = configHelperProvider.get();
+
+    // get the stack tools/features for the stack being distributed
+    Map<String, Map<String, String>> defaultStackConfigurationsByType = configHelper.getDefaultProperties(
+        stackId, cluster);
+
+    Map<String, String> clusterEnvDefaults = defaultStackConfigurationsByType.get(
+        ConfigHelper.CLUSTER_ENV);
+
+    Config clusterEnv = cluster.getDesiredConfigByType(ConfigHelper.CLUSTER_ENV);
+    Map<String, String> clusterEnvProperties = clusterEnv.getProperties();
+
+    // the 3 properties we need to check and update
+    Set<String> properties = Sets.newHashSet(ConfigHelper.CLUSTER_ENV_STACK_ROOT_PROPERTY,
+        ConfigHelper.CLUSTER_ENV_STACK_TOOLS_PROPERTY,
+        ConfigHelper.CLUSTER_ENV_STACK_FEATURES_PROPERTY);
+
+    // any updates are stored here and merged into the existing config type
+    Map<String, String> updatedProperties = new HashMap<>();
+
+    for (String property : properties) {
+      // determine if the property exists in the stack being distributed (it
+      // kind of has to, but we'll be safe if it's not found)
+      String newStackDefaultJson = clusterEnvDefaults.get(property);
+      if (StringUtils.isBlank(newStackDefaultJson)) {
+        continue;
+      }
+
+      String existingPropertyJson = clusterEnvProperties.get(property);
+
+      // if the stack tools/features property doesn't exist, then just set the
+      // one from the new stack
+      if (StringUtils.isBlank(existingPropertyJson)) {
+        updatedProperties.put(property, newStackDefaultJson);
+        continue;
+      }
+
+      // now is the hard part - we need to check to see if the new stack tools
+      // exists alongside the current tools and if it doesn't, then add the new
+      // tools in
+      final Map<String, Object> existingJson;
+      final Map<String, ?> newStackJsonAsObject;
+      if (StringUtils.equals(property, ConfigHelper.CLUSTER_ENV_STACK_ROOT_PROPERTY)) {
+        existingJson = gson.<Map<String, Object>> fromJson(existingPropertyJson, Map.class);
+        newStackJsonAsObject = gson.<Map<String, String>> fromJson(newStackDefaultJson, Map.class);
+      } else {
+        existingJson = gson.<Map<String, Object>> fromJson(existingPropertyJson,
+            Map.class);
+
+        newStackJsonAsObject = gson.<Map<String, Map<Object, Object>>> fromJson(newStackDefaultJson,
+            Map.class);
+      }
+
+      if (existingJson.keySet().contains(stackId.getStackName())) {
+        continue;
+      }
+
+      existingJson.put(stackId.getStackName(), newStackJsonAsObject.get(stackId.getStackName()));
+
+      String newJson = gson.toJson(existingJson);
+      updatedProperties.put(property, newJson);
+    }
+
+    if (!updatedProperties.isEmpty()) {
+      AmbariManagementController amc = getManagementController();
+      String serviceNote = String.format(
+          "Adding stack tools for %s while distributing a new repository", stackId.toString());
+
+      configHelper.updateConfigType(cluster, amc, clusterEnv.getType(), updatedProperties, null,
+          amc.getAuthName(), serviceNote);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
index 0dacb56..1130026 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
@@ -205,6 +205,12 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
    */
   protected static final String UPGRADE_HOST_ORDERED_HOSTS = "Upgrade/host_order";
 
+  /**
+   * The role that will be used when creating HRC's for the type
+   * {@link StageWrapper.Type#UPGRADE_TASKS}.
+   */
+  protected static final String EXECUTE_TASK_ROLE = "ru_execute_tasks";
+
   /*
    * Lifted from RequestResourceProvider
    */
@@ -1327,6 +1333,32 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     }
   }
 
+  /**
+   * Creates an action stage using the {@link #EXECUTE_TASK_ROLE} custom action
+   * to execute some Python command.
+   *
+   * @param context
+   *          the upgrade context.
+   * @param request
+   *          the request object to add the stage to.
+   * @param effectiveStackId
+   *          the stack ID to use when generating content for the command. On
+   *          some upgrade types, this may change during the course of the
+   *          upgrade orchestration. An express upgrade changes this after
+   *          stopping all services.
+   * @param entity
+   *          the upgrade entity to set the stage information on
+   * @param wrapper
+   *          the stage wrapper containing information to generate the stage.
+   * @param skippable
+   *          {@code true} to mark the stage as being skippable if a failure
+   *          occurs.
+   * @param supportsAutoSkipOnFailure
+   *          {@code true} to automatically skip on a failure.
+   * @param allowRetry
+   *          {@code true} to be able to retry the failed stage.
+   * @throws AmbariException
+   */
   private void makeActionStage(UpgradeContext context, RequestStageContainer request,
       StackId effectiveStackId, UpgradeItemEntity entity, StageWrapper wrapper, boolean skippable,
       boolean supportsAutoSkipOnFailure, boolean allowRetry) throws AmbariException {
@@ -1356,21 +1388,22 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
         && wrapper.getTasks().get(0).getService() != null) {
 
       AmbariMetaInfo ambariMetaInfo = s_metaProvider.get();
-      StackId stackId = context.getTargetStackId();
 
-      StackInfo stackInfo = ambariMetaInfo.getStack(stackId.getStackName(),
-          stackId.getStackVersion());
+      StackInfo stackInfo = ambariMetaInfo.getStack(effectiveStackId.getStackName(),
+          effectiveStackId.getStackVersion());
 
       String serviceName = wrapper.getTasks().get(0).getService();
-      ServiceInfo serviceInfo = ambariMetaInfo.getService(stackId.getStackName(),
-          stackId.getStackVersion(), serviceName);
+      ServiceInfo serviceInfo = ambariMetaInfo.getService(effectiveStackId.getStackName(),
+          effectiveStackId.getStackVersion(), serviceName);
 
       params.put(SERVICE_PACKAGE_FOLDER, serviceInfo.getServicePackageFolder());
       params.put(HOOKS_FOLDER, stackInfo.getStackHooksFolder());
     }
 
     ActionExecutionContext actionContext = new ActionExecutionContext(cluster.getClusterName(),
-        "ru_execute_tasks", Collections.singletonList(filter), params);
+        EXECUTE_TASK_ROLE, Collections.singletonList(filter), params);
+
+    actionContext.setStackId(effectiveStackId);
 
     // hosts in maintenance mode are excluded from the upgrade
     actionContext.setMaintenanceModeHostExcluded(true);
@@ -1464,6 +1497,9 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
 
     ActionExecutionContext actionContext = new ActionExecutionContext(cluster.getClusterName(),
         function, filters, commandParams);
+
+    actionContext.setStackId(effectiveStackId);
+
     actionContext.setTimeout(wrapper.getMaxTimeout(s_configuration));
     actionContext.setRetryAllowed(allowRetry);
     actionContext.setAutoSkipFailures(context.isComponentFailureAutoSkipped());
@@ -1523,6 +1559,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     ActionExecutionContext actionContext = new ActionExecutionContext(cluster.getClusterName(),
         "SERVICE_CHECK", filters, commandParams);
 
+    actionContext.setStackId(effectiveStackId);
     actionContext.setTimeout(wrapper.getMaxTimeout(s_configuration));
     actionContext.setRetryAllowed(allowRetry);
     actionContext.setAutoSkipFailures(context.isServiceCheckFailureAutoSkipped());
@@ -1665,6 +1702,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
         Role.AMBARI_SERVER_ACTION.toString(), Collections.<RequestResourceFilter> emptyList(),
         commandParams);
 
+    actionContext.setStackId(effectiveStackId);
     actionContext.setTimeout(Short.valueOf((short) -1));
     actionContext.setRetryAllowed(allowRetry);
     actionContext.setAutoSkipFailures(context.isComponentFailureAutoSkipped());

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
index 13114dd..ab8026c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
@@ -31,7 +31,6 @@ import java.util.Set;
 import java.util.TreeMap;
 import java.util.concurrent.TimeUnit;
 
-import com.google.common.base.Objects;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.configuration.Configuration;
@@ -46,6 +45,7 @@ import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.base.Objects;
 import com.google.common.cache.Cache;
 import com.google.common.cache.CacheBuilder;
 import com.google.common.collect.Maps;
@@ -89,8 +89,10 @@ public class ConfigHelper {
   public static final String CLUSTER_ENV_RETRY_COMMANDS = "commands_to_retry";
   public static final String CLUSTER_ENV_RETRY_MAX_TIME_IN_SEC = "command_retry_max_time_in_sec";
   public static final String COMMAND_RETRY_MAX_TIME_IN_SEC_DEFAULT = "600";
+  public static final String CLUSTER_ENV_STACK_NAME_PROPERTY = "stack_name";
   public static final String CLUSTER_ENV_STACK_FEATURES_PROPERTY = "stack_features";
   public static final String CLUSTER_ENV_STACK_TOOLS_PROPERTY = "stack_tools";
+  public static final String CLUSTER_ENV_STACK_ROOT_PROPERTY = "stack_root";
 
   public static final String HTTP_ONLY = "HTTP_ONLY";
   public static final String HTTPS_ONLY = "HTTPS_ONLY";
@@ -652,7 +654,7 @@ public class ConfigHelper {
     }
 
     for (Service service : cluster.getServices().values()) {
-      Set<PropertyInfo> serviceProperties = new HashSet<PropertyInfo>(servicesMap.get(service.getName()).getProperties());
+      Set<PropertyInfo> serviceProperties = new HashSet<>(servicesMap.get(service.getName()).getProperties());
       for (PropertyInfo serviceProperty : serviceProperties) {
         if (serviceProperty.getPropertyTypes().contains(propertyType)) {
           String stackPropertyConfigType = fileNameToConfigType(serviceProperty.getFilename());
@@ -907,13 +909,16 @@ public class ConfigHelper {
     return properties;
   }
 
-  public Set<PropertyInfo> getStackProperties(Cluster cluster) throws AmbariException {
-    StackId stackId = cluster.getCurrentStackVersion();
+  public Set<PropertyInfo> getStackProperties(StackId stackId) throws AmbariException {
     StackInfo stack = ambariMetaInfo.getStack(stackId.getStackName(), stackId.getStackVersion());
-
     return ambariMetaInfo.getStackProperties(stack.getName(), stack.getVersion());
   }
 
+  public Set<PropertyInfo> getStackProperties(Cluster cluster) throws AmbariException {
+    StackId stackId = cluster.getCurrentStackVersion();
+    return getStackProperties(stackId);
+  }
+
   /**
    * A helper method to create a new {@link Config} for a given configuration
    * type and updates to the current values, if any. This method will perform the following tasks:
@@ -1128,6 +1133,38 @@ public class ConfigHelper {
   }
 
   /**
+   * Gets the default properties from the specified stack when a cluster is
+   * first installed.
+   *
+   * @param stack
+   *          the stack to pull stack-values from (not {@code null})
+   * @return a mapping of configuration type to map of key/value pairs for the
+   *         default configurations.
+   * @throws AmbariException
+   */
+  public Map<String, Map<String, String>> getDefaultStackProperties(StackId stack)
+      throws AmbariException {
+    Map<String, Map<String, String>> defaultPropertiesByType = new HashMap<>();
+
+    // populate the stack (non-service related) properties
+    Set<org.apache.ambari.server.state.PropertyInfo> stackConfigurationProperties = ambariMetaInfo.getStackProperties(
+        stack.getStackName(), stack.getStackVersion());
+
+    for (PropertyInfo stackDefaultProperty : stackConfigurationProperties) {
+      String type = ConfigHelper.fileNameToConfigType(stackDefaultProperty.getFilename());
+
+      if (!defaultPropertiesByType.containsKey(type)) {
+        defaultPropertiesByType.put(type, new HashMap<String, String>());
+      }
+
+      defaultPropertiesByType.get(type).put(stackDefaultProperty.getName(),
+          stackDefaultProperty.getValue());
+    }
+
+    return defaultPropertiesByType;
+  }
+
+  /**
    * Gets the default properties from the specified stack and services when a
    * cluster is first installed.
    *

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
index 106d7c8..a2c0b9b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
@@ -69,6 +69,7 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.ConfigFactory;
+import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.SecurityType;
@@ -79,6 +80,7 @@ import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Sets;
+import com.google.inject.Provider;
 
 
 /**
@@ -99,6 +101,12 @@ public class AmbariContext {
   @Inject
   ConfigFactory configFactory;
 
+  /**
+   * Used for getting configuration property values from stack and services.
+   */
+  @Inject
+  private Provider<ConfigHelper> configHelper;
+
   private static AmbariManagementController controller;
   private static ClusterController clusterController;
   //todo: task id's.  Use existing mechanism for getting next task id sequence
@@ -205,8 +213,8 @@ public class AmbariContext {
     } catch (AmbariException e) {
       throw new RuntimeException("Failed to persist service and component resources: " + e, e);
     }
-    Set<ServiceRequest> serviceRequests = new HashSet<ServiceRequest>();
-    Set<ServiceComponentRequest> componentRequests = new HashSet<ServiceComponentRequest>();
+    Set<ServiceRequest> serviceRequests = new HashSet<>();
+    Set<ServiceComponentRequest> componentRequests = new HashSet<>();
     for (String service : services) {
       String credentialStoreEnabled = topology.getBlueprint().getCredentialStoreEnabled(service);
       serviceRequests.add(new ServiceRequest(clusterName, service, null, credentialStoreEnabled));
@@ -223,13 +231,13 @@ public class AmbariContext {
     }
     // set all services state to INSTALLED->STARTED
     // this is required so the user can start failed services at the service level
-    Map<String, Object> installProps = new HashMap<String, Object>();
+    Map<String, Object> installProps = new HashMap<>();
     installProps.put(ServiceResourceProvider.SERVICE_SERVICE_STATE_PROPERTY_ID, "INSTALLED");
     installProps.put(ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
-    Map<String, Object> startProps = new HashMap<String, Object>();
+    Map<String, Object> startProps = new HashMap<>();
     startProps.put(ServiceResourceProvider.SERVICE_SERVICE_STATE_PROPERTY_ID, "STARTED");
     startProps.put(ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
-    Predicate predicate = new EqualsPredicate<String>(
+    Predicate predicate = new EqualsPredicate<>(
         ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
     try {
       getServiceResourceProvider().updateResources(
@@ -262,7 +270,7 @@ public class AmbariContext {
     }
     String clusterName = cluster.getClusterName();
 
-    Map<String, Object> properties = new HashMap<String, Object>();
+    Map<String, Object> properties = new HashMap<>();
     properties.put(HostResourceProvider.HOST_CLUSTER_NAME_PROPERTY_ID, clusterName);
     properties.put(HostResourceProvider.HOST_NAME_PROPERTY_ID, hostName);
     properties.put(HostResourceProvider.HOST_RACK_INFO_PROPERTY_ID, host.getRackInfo());
@@ -275,7 +283,7 @@ public class AmbariContext {
           hostName, e.toString()), e);
     }
 
-    final Set<ServiceComponentHostRequest> requests = new HashSet<ServiceComponentHostRequest>();
+    final Set<ServiceComponentHostRequest> requests = new HashSet<>();
 
     for (Map.Entry<String, Collection<String>> entry : components.entrySet()) {
       String service = entry.getKey();
@@ -589,7 +597,7 @@ public class AmbariContext {
    * and the hosts associated with the host group are assigned to the config group.
    */
   private void createConfigGroupsAndRegisterHost(ClusterTopology topology, String groupName) throws AmbariException {
-    Map<String, Map<String, Config>> groupConfigs = new HashMap<String, Map<String, Config>>();
+    Map<String, Map<String, Config>> groupConfigs = new HashMap<>();
     Stack stack = topology.getBlueprint().getStack();
 
     // get the host-group config with cluster creation template overrides
@@ -608,7 +616,7 @@ public class AmbariContext {
       //todo: attributes
       Map<String, Config> serviceConfigs = groupConfigs.get(service);
       if (serviceConfigs == null) {
-        serviceConfigs = new HashMap<String, Config>();
+        serviceConfigs = new HashMap<>();
         groupConfigs.put(service, serviceConfigs);
       }
       serviceConfigs.put(type, config);
@@ -669,6 +677,16 @@ public class AmbariContext {
     return String.format("%s:%s", bpName, hostGroupName);
   }
 
+  /**
+   * Gets an instance of {@link ConfigHelper} for classes which are not
+   * dependency injected.
+   *
+   * @return a {@link ConfigHelper} instance.
+   */
+  public ConfigHelper getConfigHelper() {
+    return configHelper.get();
+  }
+
   private synchronized HostResourceProvider getHostResourceProvider() {
     if (hostResourceProvider == null) {
       hostResourceProvider = (HostResourceProvider)

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
index 0ab8180..e7764b8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
@@ -18,10 +18,20 @@
 package org.apache.ambari.server.upgrade;
 
 import java.sql.SQLException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
-
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigHelper;
+import org.apache.ambari.server.state.PropertyInfo;
+import org.apache.hadoop.metrics2.sink.relocated.commons.lang.StringUtils;
+
+import com.google.common.collect.Sets;
 import com.google.inject.Inject;
 import com.google.inject.Injector;
 
@@ -42,6 +52,8 @@ public class UpgradeCatalog252 extends AbstractUpgradeCatalog {
   private static final String UPGRADE_ITEM_TABLE = "upgrade_item";
   private static final String UPGRADE_ID_COLUMN = "upgrade_id";
 
+  private static final String CLUSTER_ENV = "cluster-env";
+
   /**
    * Constructor.
    *
@@ -89,6 +101,7 @@ public class UpgradeCatalog252 extends AbstractUpgradeCatalog {
    */
   @Override
   protected void executeDMLUpdates() throws AmbariException, SQLException {
+    resetStackToolsAndFeatures();
   }
 
   /**
@@ -135,4 +148,52 @@ public class UpgradeCatalog252 extends AbstractUpgradeCatalog {
     dbAccessor.addFKConstraint(UPGRADE_TABLE, "FK_upgrade_to_repo_id",
         UPGRADE_TABLE_FROM_REPO_COLUMN, "repo_version", "repo_version_id", false);
   }
+
+  /**
+   * Resets the following properties in {@code cluster-env} to their new
+   * defaults:
+   * <ul>
+   * <li>stack_root
+   * <li>stack_tools
+   * <li>stack_features
+   * <ul>
+   *
+   * @throws AmbariException
+   */
+  private void resetStackToolsAndFeatures() throws AmbariException {
+    Set<String> propertiesToReset = Sets.newHashSet("stack_tools", "stack_features", "stack_root");
+
+    Clusters clusters = injector.getInstance(Clusters.class);
+    ConfigHelper configHelper = injector.getInstance(ConfigHelper.class);
+
+    Map<String, Cluster> clusterMap = clusters.getClusters();
+    for (Cluster cluster : clusterMap.values()) {
+      Config clusterEnv = cluster.getDesiredConfigByType(CLUSTER_ENV);
+      if (null == clusterEnv) {
+        continue;
+      }
+
+      Map<String, String> newStackProperties = new HashMap<>();
+      Set<PropertyInfo> stackProperties = configHelper.getStackProperties(cluster);
+      if (null == stackProperties) {
+        continue;
+      }
+
+      for (PropertyInfo propertyInfo : stackProperties) {
+        String fileName = propertyInfo.getFilename();
+        if (StringUtils.isEmpty(fileName)) {
+          continue;
+        }
+
+        if (StringUtils.equals(ConfigHelper.fileNameToConfigType(fileName), CLUSTER_ENV)) {
+          String stackPropertyName = propertyInfo.getName();
+          if (propertiesToReset.contains(stackPropertyName)) {
+            newStackProperties.put(stackPropertyName, propertyInfo.getValue());
+          }
+        }
+      }
+
+      updateConfigurationPropertiesForCluster(cluster, CLUSTER_ENV, newStackProperties, true, false);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_metastore.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_metastore.py
index 32df7d3..5b4fd68 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_metastore.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_metastore.py
@@ -27,6 +27,7 @@ import logging
 from resource_management.core import global_lock
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions import stack_tools
 from resource_management.core.resources import Execute
 from resource_management.core.signal_utils import TerminateStrategy
 from ambari_commons.os_check import OSConst
@@ -56,6 +57,7 @@ SMOKEUSER_PRINCIPAL_DEFAULT = 'ambari-qa@EXAMPLE.COM'
 SMOKEUSER_SCRIPT_PARAM_KEY = 'default.smoke.user'
 SMOKEUSER_DEFAULT = 'ambari-qa'
 
+STACK_NAME = '{{cluster-env/stack_name}}'
 STACK_ROOT = '{{cluster-env/stack_root}}'
 
 HIVE_CONF_DIR_LEGACY = '/etc/hive/conf.server'
@@ -78,7 +80,7 @@ def get_tokens():
   """
   return (SECURITY_ENABLED_KEY,SMOKEUSER_KEYTAB_KEY,SMOKEUSER_PRINCIPAL_KEY,
     HIVE_METASTORE_URIS_KEY, SMOKEUSER_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY,
-    STACK_ROOT)
+    STACK_NAME, STACK_ROOT)
 
 @OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
 def get_tokens():
@@ -175,9 +177,10 @@ def execute(configurations={}, parameters={}, host_name=None):
     bin_dir = HIVE_BIN_DIR_LEGACY
 
 
-    if STACK_ROOT in configurations:
-      hive_conf_dir = configurations[STACK_ROOT] + format("/current/hive-metastore/conf")
-      hive_bin_dir = configurations[STACK_ROOT] + format("/current/hive-metastore/bin")
+    if STACK_NAME in configurations and STACK_ROOT in configurations:
+      stack_root = stack_tools.get_stack_root(configurations[STACK_NAME], configurations[STACK_ROOT])
+      hive_conf_dir = stack_root + format("/current/hive-metastore/conf")
+      hive_bin_dir = stack_root + format("/current/hive-metastore/bin")
 
       if os.path.exists(hive_conf_dir):
         conf_dir = hive_conf_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py
index 98d1899..e46c896 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py
@@ -26,7 +26,7 @@ import subprocess
 
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions import get_kinit_path
-from ambari_commons.os_check import OSConst
+from resource_management.libraries.functions import stack_tools
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 from resource_management.core import shell
 from resource_management.core.resources import Execute
@@ -58,6 +58,7 @@ HIVE_AUTHENTICATION_DEFAULT = 'NOSASL'
 HIVE_USER_KEY = '{{hive-env/hive_user}}'
 HIVE_USER_DEFAULT = 'default.smoke.user'
 
+STACK_NAME = '{{cluster-env/stack_name}}'
 STACK_ROOT = '{{cluster-env/stack_root}}'
 STACK_ROOT_DEFAULT = Script.get_stack_root()
 
@@ -88,7 +89,7 @@ def get_tokens():
   to build the dictionary passed into execute
   """
   return (SECURITY_ENABLED_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY, HIVE_PRINCIPAL_KEY, HIVE_PRINCIPAL_KEYTAB_KEY,
-          HIVE_USER_KEY, STACK_ROOT, LLAP_APP_NAME_KEY)
+          HIVE_USER_KEY, STACK_NAME, STACK_ROOT, LLAP_APP_NAME_KEY)
 
 
 @OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
@@ -159,8 +160,11 @@ def execute(configurations={}, parameters={}, host_name=None):
 
 
     start_time = time.time()
-    if STACK_ROOT in configurations:
-      llap_status_cmd = configurations[STACK_ROOT] + format("/current/hive-server2-hive2/bin/hive --service llapstatus --name {llap_app_name}  --findAppTimeout {LLAP_APP_STATUS_CMD_TIMEOUT}")
+    if STACK_NAME in configurations and STACK_ROOT in configurations:
+      stack_root = stack_tools.get_stack_root(configurations[STACK_NAME],
+        configurations[STACK_ROOT])
+
+      llap_status_cmd = stack_root + format("/current/hive-server2-hive2/bin/hive --service llapstatus --name {llap_app_name}  --findAppTimeout {LLAP_APP_STATUS_CMD_TIMEOUT}")
     else:
       llap_status_cmd = STACK_ROOT_DEFAULT + format("/current/hive-server2-hive2/bin/hive --service llapstatus --name {llap_app_name} --findAppTimeout {LLAP_APP_STATUS_CMD_TIMEOUT}")
 


[04/10] ambari git commit: AMBARI-21430 - Allow Multiple Versions of Stack Tools to Co-Exist (jonathanhurley)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-secured.json b/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-secured.json
index 4e7d857..bcadd03 100644
--- a/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-secured.json
+++ b/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-secured.json
@@ -1,873 +1,873 @@
 {
     "localComponents": [
-        "SECONDARY_NAMENODE", 
-        "HDFS_CLIENT", 
-        "DATANODE", 
-        "NAMENODE", 
-        "RANGER_ADMIN", 
-        "RANGER_TAGSYNC", 
-        "RANGER_USERSYNC", 
-        "ZOOKEEPER_SERVER", 
-        "ZOOKEEPER_CLIENT", 
-        "KERBEROS_CLIENT", 
+        "SECONDARY_NAMENODE",
+        "HDFS_CLIENT",
+        "DATANODE",
+        "NAMENODE",
+        "RANGER_ADMIN",
+        "RANGER_TAGSYNC",
+        "RANGER_USERSYNC",
+        "ZOOKEEPER_SERVER",
+        "ZOOKEEPER_CLIENT",
+        "KERBEROS_CLIENT",
         "RANGER_KMS_SERVER"
-    ], 
+    ],
     "configuration_attributes": {
-        "ranger-kms-site": {}, 
-        "ranger-hdfs-audit": {}, 
-        "ssl-client": {}, 
-        "ranger-admin-site": {}, 
-        "kms-log4j": {}, 
-        "ranger-hdfs-policymgr-ssl": {}, 
-        "tagsync-application-properties": {}, 
-        "ranger-env": {}, 
-        "ranger-ugsync-site": {}, 
-        "ranger-hdfs-plugin-properties": {}, 
-        "ranger-kms-security": {}, 
-        "kerberos-env": {}, 
-        "kms-properties": {}, 
-        "admin-properties": {}, 
-        "ranger-kms-policymgr-ssl": {}, 
+        "ranger-kms-site": {},
+        "ranger-hdfs-audit": {},
+        "ssl-client": {},
+        "ranger-admin-site": {},
+        "kms-log4j": {},
+        "ranger-hdfs-policymgr-ssl": {},
+        "tagsync-application-properties": {},
+        "ranger-env": {},
+        "ranger-ugsync-site": {},
+        "ranger-hdfs-plugin-properties": {},
+        "ranger-kms-security": {},
+        "kerberos-env": {},
+        "kms-properties": {},
+        "admin-properties": {},
+        "ranger-kms-policymgr-ssl": {},
         "hdfs-site": {
             "final": {
-                "dfs.datanode.data.dir": "true", 
-                "dfs.namenode.http-address": "true", 
-                "dfs.datanode.failed.volumes.tolerated": "true", 
-                "dfs.support.append": "true", 
-                "dfs.namenode.name.dir": "true", 
+                "dfs.datanode.data.dir": "true",
+                "dfs.namenode.http-address": "true",
+                "dfs.datanode.failed.volumes.tolerated": "true",
+                "dfs.support.append": "true",
+                "dfs.namenode.name.dir": "true",
                 "dfs.webhdfs.enabled": "true"
             }
-        }, 
-        "ranger-tagsync-site": {}, 
-        "tagsync-log4j": {}, 
-        "ranger-kms-audit": {}, 
-        "hadoop-policy": {}, 
-        "hdfs-log4j": {}, 
-        "usersync-log4j": {}, 
-        "krb5-conf": {}, 
-        "kms-site": {}, 
+        },
+        "ranger-tagsync-site": {},
+        "tagsync-log4j": {},
+        "ranger-kms-audit": {},
+        "hadoop-policy": {},
+        "hdfs-log4j": {},
+        "usersync-log4j": {},
+        "krb5-conf": {},
+        "kms-site": {},
         "core-site": {
             "final": {
                 "fs.defaultFS": "true"
             }
-        }, 
-        "hadoop-env": {}, 
-        "zookeeper-log4j": {}, 
-        "ssl-server": {}, 
-        "ranger-site": {}, 
-        "zookeeper-env": {}, 
-        "admin-log4j": {}, 
-        "zoo.cfg": {}, 
-        "ranger-hdfs-security": {}, 
-        "usersync-properties": {}, 
-        "kms-env": {}, 
-        "dbks-site": {}, 
+        },
+        "hadoop-env": {},
+        "zookeeper-log4j": {},
+        "ssl-server": {},
+        "ranger-site": {},
+        "zookeeper-env": {},
+        "admin-log4j": {},
+        "zoo.cfg": {},
+        "ranger-hdfs-security": {},
+        "usersync-properties": {},
+        "kms-env": {},
+        "dbks-site": {},
         "cluster-env": {}
-    }, 
-    "public_hostname": "c6401.ambari.apache.org", 
-    "commandId": "43-0", 
-    "hostname": "c6401.ambari.apache.org", 
-    "kerberosCommandParams": [], 
-    "serviceName": "RANGER_KMS", 
-    "role": "RANGER_KMS_SERVER", 
-    "forceRefreshConfigTagsBeforeExecution": [], 
-    "requestId": 43, 
+    },
+    "public_hostname": "c6401.ambari.apache.org",
+    "commandId": "43-0",
+    "hostname": "c6401.ambari.apache.org",
+    "kerberosCommandParams": [],
+    "serviceName": "RANGER_KMS",
+    "role": "RANGER_KMS_SERVER",
+    "forceRefreshConfigTagsBeforeExecution": [],
+    "requestId": 43,
     "agentConfigParams": {
         "agent": {
             "parallel_execution": 0
         }
-    }, 
-    "clusterName": "c1", 
-    "commandType": "EXECUTION_COMMAND", 
-    "taskId": 200, 
-    "roleParams": {}, 
+    },
+    "clusterName": "c1",
+    "commandType": "EXECUTION_COMMAND",
+    "taskId": 200,
+    "roleParams": {},
     "configurationTags": {
         "ranger-kms-site": {
             "tag": "version1467026737262"
-        }, 
+        },
         "ranger-hdfs-audit": {
             "tag": "version1466705299922"
-        }, 
+        },
         "ssl-client": {
             "tag": "version1"
-        }, 
+        },
         "ranger-admin-site": {
             "tag": "version1467016680635"
-        }, 
+        },
         "kms-log4j": {
             "tag": "version1467026737262"
-        }, 
+        },
         "ranger-hdfs-policymgr-ssl": {
             "tag": "version1466705299922"
-        }, 
+        },
         "tagsync-application-properties": {
             "tag": "version1467016680511"
-        }, 
+        },
         "ranger-env": {
             "tag": "version1466705299949"
-        }, 
+        },
         "ranger-ugsync-site": {
             "tag": "version1467016680537"
-        }, 
+        },
         "ranger-hdfs-plugin-properties": {
             "tag": "version1466705299922"
-        }, 
+        },
         "ranger-kms-security": {
             "tag": "version1467026737262"
-        }, 
+        },
         "kerberos-env": {
             "tag": "version1467016537243"
-        }, 
+        },
         "admin-log4j": {
             "tag": "version1466705299949"
-        }, 
+        },
         "admin-properties": {
             "tag": "version1466705299949"
-        }, 
+        },
         "ranger-kms-policymgr-ssl": {
             "tag": "version1467026737262"
-        }, 
+        },
         "hdfs-site": {
             "tag": "version1467016680401"
-        }, 
+        },
         "ranger-tagsync-site": {
             "tag": "version1467016680586"
-        }, 
+        },
         "zoo.cfg": {
             "tag": "version1"
-        }, 
+        },
         "ranger-kms-audit": {
             "tag": "version1467026737262"
-        }, 
+        },
         "hadoop-policy": {
             "tag": "version1"
-        }, 
+        },
         "hdfs-log4j": {
             "tag": "version1"
-        }, 
+        },
         "usersync-log4j": {
             "tag": "version1466705299949"
-        }, 
+        },
         "krb5-conf": {
             "tag": "version1467016537243"
-        }, 
+        },
         "kms-site": {
             "tag": "version1467026751210"
-        }, 
+        },
         "core-site": {
             "tag": "version1467026751256"
-        }, 
+        },
         "hadoop-env": {
             "tag": "version1467016680446"
-        }, 
+        },
         "zookeeper-log4j": {
             "tag": "version1"
-        }, 
+        },
         "ssl-server": {
             "tag": "version1"
-        }, 
+        },
         "ranger-site": {
             "tag": "version1466705299949"
-        }, 
+        },
         "zookeeper-env": {
             "tag": "version1467016680492"
-        }, 
+        },
         "kms-properties": {
             "tag": "version1467026737262"
-        }, 
+        },
         "tagsync-log4j": {
             "tag": "version1466705299949"
-        }, 
+        },
         "ranger-hdfs-security": {
             "tag": "version1466705299922"
-        }, 
+        },
         "usersync-properties": {
             "tag": "version1466705299949"
-        }, 
+        },
         "kms-env": {
             "tag": "version1467026737262"
-        }, 
+        },
         "dbks-site": {
             "tag": "version1467026751234"
-        }, 
+        },
         "cluster-env": {
             "tag": "version1467016680567"
         }
-    }, 
-    "roleCommand": "START", 
+    },
+    "roleCommand": "START",
     "hostLevelParams": {
-        "agent_stack_retry_on_unavailability": "false", 
-        "stack_name": "HDP", 
-        "package_version": "2_5_0_0_*", 
+        "agent_stack_retry_on_unavailability": "false",
+        "stack_name": "HDP",
+        "package_version": "2_5_0_0_*",
         "custom_mysql_jdbc_name": "mysql-connector-java.jar",
         "previous_custom_mysql_jdbc_name": "mysql-connector-java-old.jar",
-        "host_sys_prepped": "false", 
-        "ambari_db_rca_username": "mapred", 
-        "current_version": "2.5.0.0-801", 
-        "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar", 
-        "agent_stack_retry_count": "5", 
-        "stack_version": "2.5", 
-        "jdk_name": "jdk-8u60-linux-x64.tar.gz", 
-        "ambari_db_rca_driver": "org.postgresql.Driver", 
-        "java_home": "/usr/jdk64/jdk1.7.0_45", 
-        "repository_version_id": "1", 
-        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
-        "not_managed_hdfs_path_list": "[\"/tmp\"]", 
-        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
-        "java_version": "8", 
-        "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.5\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]", 
-        "package_list": "[{\"name\":\"ranger_${stack_version}-kms\",\"condition\":\"\",\"skipUpgrade\":false}]", 
-        "db_name": "ambari", 
-        "group_list": "[\"kms\",\"ranger\",\"hadoop\",\"users\"]", 
-        "agentCacheDir": "/var/lib/ambari-agent/cache", 
-        "ambari_db_rca_password": "mapred", 
-        "jce_name": "UnlimitedJCEPolicyJDK7.zip", 
-        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar", 
-        "db_driver_filename": "mysql-connector-java.jar", 
-        "user_list": "[\"kms\",\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]", 
+        "host_sys_prepped": "false",
+        "ambari_db_rca_username": "mapred",
+        "current_version": "2.5.0.0-801",
+        "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar",
+        "agent_stack_retry_count": "5",
+        "stack_version": "2.5",
+        "jdk_name": "jdk-8u60-linux-x64.tar.gz",
+        "ambari_db_rca_driver": "org.postgresql.Driver",
+        "java_home": "/usr/jdk64/jdk1.7.0_45",
+        "repository_version_id": "1",
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
+        "not_managed_hdfs_path_list": "[\"/tmp\"]",
+        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
+        "java_version": "8",
+        "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.5\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]",
+        "package_list": "[{\"name\":\"ranger_${stack_version}-kms\",\"condition\":\"\",\"skipUpgrade\":false}]",
+        "db_name": "ambari",
+        "group_list": "[\"kms\",\"ranger\",\"hadoop\",\"users\"]",
+        "agentCacheDir": "/var/lib/ambari-agent/cache",
+        "ambari_db_rca_password": "mapred",
+        "jce_name": "UnlimitedJCEPolicyJDK7.zip",
+        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
+        "db_driver_filename": "mysql-connector-java.jar",
+        "user_list": "[\"kms\",\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]",
         "clientsToUpdateConfigs": "[\"*\"]"
-    }, 
+    },
     "commandParams": {
-        "service_package_folder": "common-services/RANGER_KMS/0.5.0.2.3/package", 
-        "script": "scripts/kms_server.py", 
-        "hooks_folder": "HDP/2.0.6/hooks", 
-        "version": "2.5.0.0-801", 
-        "max_duration_for_retries": "0", 
-        "command_retry_enabled": "false", 
-        "command_timeout": "600", 
+        "service_package_folder": "common-services/RANGER_KMS/0.5.0.2.3/package",
+        "script": "scripts/kms_server.py",
+        "hooks_folder": "HDP/2.0.6/hooks",
+        "version": "2.5.0.0-801",
+        "max_duration_for_retries": "0",
+        "command_retry_enabled": "false",
+        "command_timeout": "600",
         "script_type": "PYTHON"
-    }, 
-    "forceRefreshConfigTags": [], 
-    "stageId": 0, 
+    },
+    "forceRefreshConfigTags": [],
+    "stageId": 0,
     "clusterHostInfo": {
         "snamenode_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ambari_server_use_ssl": [
             "false"
-        ], 
+        ],
         "all_ping_ports": [
             "8670"
-        ], 
+        ],
         "ranger_tagsync_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ranger_kms_server_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ranger_usersync_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "all_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "slave_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "namenode_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ambari_server_port": [
             "8080"
-        ], 
+        ],
         "ranger_admin_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "all_racks": [
             "/default-rack"
-        ], 
+        ],
         "all_ipv4_ips": [
             "172.22.83.73"
-        ], 
+        ],
         "ambari_server_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "zookeeper_hosts": [
             "c6401.ambari.apache.org"
         ]
-    }, 
+    },
     "configurations": {
         "ranger-kms-site": {
-            "ranger.service.https.port": "9393", 
-            "ranger.service.https.attrib.ssl.enabled": "false", 
-            "xa.webapp.dir": "./webapp", 
-            "ranger.service.host": "{{kms_host}}", 
-            "ranger.service.shutdown.port": "7085", 
-            "ranger.contextName": "/kms", 
+            "ranger.service.https.port": "9393",
+            "ranger.service.https.attrib.ssl.enabled": "false",
+            "xa.webapp.dir": "./webapp",
+            "ranger.service.host": "{{kms_host}}",
+            "ranger.service.shutdown.port": "7085",
+            "ranger.contextName": "/kms",
             "ranger.service.http.port": "{{kms_port}}"
-        }, 
+        },
         "ranger-hdfs-audit": {
             "xasecure.audit.destination.solr.zookeepers": "NONE",
-            "xasecure.audit.destination.solr.urls": "", 
-            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool", 
+            "xasecure.audit.destination.solr.urls": "",
+            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool",
             "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool",
-            "xasecure.audit.destination.hdfs": "true", 
-            "xasecure.audit.destination.solr": "false", 
-            "xasecure.audit.provider.summary.enabled": "false", 
-            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit", 
+            "xasecure.audit.destination.hdfs": "true",
+            "xasecure.audit.destination.solr": "false",
+            "xasecure.audit.provider.summary.enabled": "false",
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
             "xasecure.audit.is.enabled": "true"
-        }, 
+        },
         "ssl-client": {
-            "ssl.client.truststore.reload.interval": "10000", 
-            "ssl.client.keystore.password": "bigdata", 
-            "ssl.client.truststore.type": "jks", 
-            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks", 
-            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks", 
-            "ssl.client.truststore.password": "bigdata", 
+            "ssl.client.truststore.reload.interval": "10000",
+            "ssl.client.keystore.password": "bigdata",
+            "ssl.client.truststore.type": "jks",
+            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks",
+            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks",
+            "ssl.client.truststore.password": "bigdata",
             "ssl.client.keystore.type": "jks"
-        }, 
+        },
         "ranger-admin-site": {
-            "ranger.admin.kerberos.cookie.domain": "{{ranger_host}}", 
-            "ranger.kms.service.user.hdfs": "hdfs", 
-            "ranger.spnego.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}", 
-            "ranger.plugins.hive.serviceuser": "hive", 
-            "ranger.lookup.kerberos.keytab": "/etc/security/keytabs/rangerlookup.service.keytab", 
-            "ranger.plugins.kms.serviceuser": "kms", 
-            "ranger.service.https.attrib.ssl.enabled": "false", 
-            "ranger.sso.browser.useragent": "Mozilla,chrome", 
-            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01", 
-            "ranger.plugins.hbase.serviceuser": "hbase", 
+            "ranger.admin.kerberos.cookie.domain": "{{ranger_host}}",
+            "ranger.kms.service.user.hdfs": "hdfs",
+            "ranger.spnego.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
+            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}",
+            "ranger.plugins.hive.serviceuser": "hive",
+            "ranger.lookup.kerberos.keytab": "/etc/security/keytabs/rangerlookup.service.keytab",
+            "ranger.plugins.kms.serviceuser": "kms",
+            "ranger.service.https.attrib.ssl.enabled": "false",
+            "ranger.sso.browser.useragent": "Mozilla,chrome",
+            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01",
+            "ranger.plugins.hbase.serviceuser": "hbase",
             "ranger.plugins.hdfs.serviceuser": "hdfs",
-            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}", 
-            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net", 
-            "ranger.plugins.knox.serviceuser": "knox", 
-            "ranger.ldap.base.dn": "dc=example,dc=com", 
-            "ranger.sso.publicKey": "", 
-            "ranger.admin.kerberos.cookie.path": "/", 
+            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}",
+            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net",
+            "ranger.plugins.knox.serviceuser": "knox",
+            "ranger.ldap.base.dn": "dc=example,dc=com",
+            "ranger.sso.publicKey": "",
+            "ranger.admin.kerberos.cookie.path": "/",
             "ranger.service.https.attrib.clientAuth": "want",
-            "ranger.jpa.jdbc.user": "{{ranger_db_user}}", 
-            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})", 
-            "ranger.ldap.group.roleattribute": "cn", 
-            "ranger.plugins.kafka.serviceuser": "kafka", 
-            "ranger.admin.kerberos.principal": "rangeradmin/_HOST@EXAMPLE.COM", 
-            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
+            "ranger.jpa.jdbc.user": "{{ranger_db_user}}",
+            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})",
+            "ranger.ldap.group.roleattribute": "cn",
+            "ranger.plugins.kafka.serviceuser": "kafka",
+            "ranger.admin.kerberos.principal": "rangeradmin/_HOST@EXAMPLE.COM",
+            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
             "ranger.credential.provider.path": "/etc/ranger/admin/rangeradmin.jceks",
-            "ranger.ldap.referral": "ignore", 
-            "ranger.service.http.port": "6080", 
-            "ranger.ldap.user.searchfilter": "(uid={0})", 
-            "ranger.plugins.atlas.serviceuser": "atlas", 
-            "ranger.truststore.password": "changeit", 
-            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
-            "ranger.audit.solr.password": "NONE", 
-            "ranger.audit.solr.zookeepers": "NONE", 
-            "ranger.lookup.kerberos.principal": "rangerlookup/_HOST@EXAMPLE.COM", 
-            "ranger.service.https.port": "6182", 
-            "ranger.plugins.storm.serviceuser": "storm", 
-            "ranger.externalurl": "{{ranger_external_url}}", 
-            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
-            "ranger.kms.service.user.hive": "", 
-            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
-            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}", 
-            "ranger.service.host": "{{ranger_host}}", 
+            "ranger.ldap.referral": "ignore",
+            "ranger.service.http.port": "6080",
+            "ranger.ldap.user.searchfilter": "(uid={0})",
+            "ranger.plugins.atlas.serviceuser": "atlas",
+            "ranger.truststore.password": "changeit",
+            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+            "ranger.audit.solr.password": "NONE",
+            "ranger.audit.solr.zookeepers": "NONE",
+            "ranger.lookup.kerberos.principal": "rangerlookup/_HOST@EXAMPLE.COM",
+            "ranger.service.https.port": "6182",
+            "ranger.plugins.storm.serviceuser": "storm",
+            "ranger.externalurl": "{{ranger_external_url}}",
+            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+            "ranger.kms.service.user.hive": "",
+            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}",
+            "ranger.service.host": "{{ranger_host}}",
             "ranger.service.https.attrib.keystore.keyalias": "rangeradmin",
-            "ranger.service.https.attrib.keystore.pass": "xasecure", 
-            "ranger.unixauth.remote.login.enabled": "true", 
-            "ranger.jpa.jdbc.credential.alias": "rangeradmin", 
-            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
-            "ranger.audit.solr.username": "ranger_solr", 
-            "ranger.sso.enabled": "false", 
-            "ranger.audit.solr.urls": "", 
-            "ranger.ldap.ad.domain": "", 
-            "ranger.plugins.yarn.serviceuser": "yarn", 
-            "ranger.audit.source.type": "solr", 
-            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
-            "ranger.ldap.url": "{{ranger_ug_ldap_url}}", 
-            "ranger.authentication.method": "UNIX", 
-            "ranger.service.http.enabled": "true", 
-            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}", 
-            "ranger.ldap.ad.referral": "ignore", 
-            "ranger.ldap.ad.base.dn": "dc=example,dc=com", 
-            "ranger.jpa.jdbc.password": "_", 
-            "ranger.spnego.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
-            "ranger.sso.providerurl": "", 
-            "ranger.unixauth.service.hostname": "{{ugsync_host}}", 
-            "ranger.admin.kerberos.keytab": "/etc/security/keytabs/rangeradmin.service.keytab", 
-            "ranger.admin.kerberos.token.valid.seconds": "30", 
-            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver", 
+            "ranger.service.https.attrib.keystore.pass": "xasecure",
+            "ranger.unixauth.remote.login.enabled": "true",
+            "ranger.jpa.jdbc.credential.alias": "rangeradmin",
+            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+            "ranger.audit.solr.username": "ranger_solr",
+            "ranger.sso.enabled": "false",
+            "ranger.audit.solr.urls": "",
+            "ranger.ldap.ad.domain": "",
+            "ranger.plugins.yarn.serviceuser": "yarn",
+            "ranger.audit.source.type": "solr",
+            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
+            "ranger.ldap.url": "{{ranger_ug_ldap_url}}",
+            "ranger.authentication.method": "UNIX",
+            "ranger.service.http.enabled": "true",
+            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}",
+            "ranger.ldap.ad.referral": "ignore",
+            "ranger.ldap.ad.base.dn": "dc=example,dc=com",
+            "ranger.jpa.jdbc.password": "_",
+            "ranger.spnego.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+            "ranger.sso.providerurl": "",
+            "ranger.unixauth.service.hostname": "{{ugsync_host}}",
+            "ranger.admin.kerberos.keytab": "/etc/security/keytabs/rangeradmin.service.keytab",
+            "ranger.admin.kerberos.token.valid.seconds": "30",
+            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver",
             "ranger.unixauth.service.port": "5151"
-        }, 
+        },
         "kms-log4j": {
             "content": "\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#    http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License. See accompanying LICENSE file.\n#\n\n# If the Java System property 'kms.log.dir' is not defined at KMS start up time\n# Setup sets its value to '${kms.home}/logs'\n\nlog4j.appender.kms=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.kms.DatePattern='.'yyyy-MM-dd\nlog4j.appender.kms.File=${kms.log.dir}/kms.log\nlog4j.appender.kms.Append=true\nlog4j.appender.kms.layout=org.apache.log4j.PatternLayout\
 nlog4j.appender.kms.layout.ConversionPattern=%d{ISO8601} %-5p %c{1} - %m%n\n\nlog4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.kms-audit.DatePattern='.'yyyy-MM-dd\nlog4j.appender.kms-audit.File=${kms.log.dir}/kms-audit.log\nlog4j.appender.kms-audit.Append=true\nlog4j.appender.kms-audit.layout=org.apache.log4j.PatternLayout\nlog4j.appender.kms-audit.layout.ConversionPattern=%d{ISO8601} %m%n\n\nlog4j.logger.kms-audit=INFO, kms-audit\nlog4j.additivity.kms-audit=false\n\nlog4j.rootLogger=ALL, kms\nlog4j.logger.org.apache.hadoop.conf=ERROR\nlog4j.logger.org.apache.hadoop=INFO\nlog4j.logger.com.sun.jersey.server.wadl.generators.WadlGeneratorJAXBGrammarGenerator=OFF"
-        }, 
+        },
         "ranger-hdfs-policymgr-ssl": {
-            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks", 
-            "xasecure.policymgr.clientssl.truststore.password": "changeit", 
-            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}", 
-            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks", 
-            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}", 
+            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks",
+            "xasecure.policymgr.clientssl.truststore.password": "changeit",
+            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}",
+            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks",
+            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}",
             "xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
-        }, 
+        },
         "tagsync-application-properties": {
-            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181", 
-            "atlas.kafka.security.protocol": "SASL_PLAINTEXT", 
-            "atlas.jaas.KafkaClient.option.principal": "{{tagsync_jaas_principal}}", 
-            "atlas.jaas.KafkaClient.option.keyTab": "{{tagsync_keytab_path}}", 
-            "atlas.kafka.entities.group.id": "ranger_entities_consumer", 
-            "atlas.jaas.KafkaClient.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule", 
-            "atlas.jaas.KafkaClient.option.serviceName": "kafka", 
-            "atlas.kafka.bootstrap.servers": "localhost:6667", 
-            "atlas.jaas.KafkaClient.option.useKeyTab": "true", 
-            "atlas.jaas.KafkaClient.option.storeKey": "true", 
-            "atlas.jaas.KafkaClient.loginModuleControlFlag": "required", 
+            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181",
+            "atlas.kafka.security.protocol": "SASL_PLAINTEXT",
+            "atlas.jaas.KafkaClient.option.principal": "{{tagsync_jaas_principal}}",
+            "atlas.jaas.KafkaClient.option.keyTab": "{{tagsync_keytab_path}}",
+            "atlas.kafka.entities.group.id": "ranger_entities_consumer",
+            "atlas.jaas.KafkaClient.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "atlas.jaas.KafkaClient.option.serviceName": "kafka",
+            "atlas.kafka.bootstrap.servers": "localhost:6667",
+            "atlas.jaas.KafkaClient.option.useKeyTab": "true",
+            "atlas.jaas.KafkaClient.option.storeKey": "true",
+            "atlas.jaas.KafkaClient.loginModuleControlFlag": "required",
             "atlas.kafka.sasl.kerberos.service.name": "kafka"
-        }, 
+        },
         "ranger-env": {
-            "ranger_solr_shards": "1", 
-            "ranger_solr_config_set": "ranger_audits", 
-            "ranger_user": "ranger", 
-            "xml_configurations_supported": "true", 
-            "ranger-atlas-plugin-enabled": "No", 
-            "ranger-hbase-plugin-enabled": "No", 
-            "ranger-yarn-plugin-enabled": "No", 
-            "bind_anonymous": "false", 
-            "ranger_admin_username": "amb_ranger_admin", 
-            "admin_password": "admin", 
-            "is_solrCloud_enabled": "false", 
-            "ranger-storm-plugin-enabled": "No", 
-            "ranger-hdfs-plugin-enabled": "No", 
-            "ranger_group": "ranger", 
-            "ranger-knox-plugin-enabled": "No", 
-            "ranger_admin_log_dir": "/var/log/ranger/admin", 
-            "ranger-kafka-plugin-enabled": "No", 
-            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306", 
-            "ranger-hive-plugin-enabled": "No", 
-            "xasecure.audit.destination.solr": "false", 
-            "ranger_pid_dir": "/var/run/ranger", 
-            "xasecure.audit.destination.hdfs": "true", 
-            "admin_username": "admin", 
-            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit", 
-            "create_db_dbuser": "true", 
-            "ranger_solr_collection_name": "ranger_audits", 
-            "ranger_admin_password": "P1!q9xa96SMi5NCl", 
+            "ranger_solr_shards": "1",
+            "ranger_solr_config_set": "ranger_audits",
+            "ranger_user": "ranger",
+            "xml_configurations_supported": "true",
+            "ranger-atlas-plugin-enabled": "No",
+            "ranger-hbase-plugin-enabled": "No",
+            "ranger-yarn-plugin-enabled": "No",
+            "bind_anonymous": "false",
+            "ranger_admin_username": "amb_ranger_admin",
+            "admin_password": "admin",
+            "is_solrCloud_enabled": "false",
+            "ranger-storm-plugin-enabled": "No",
+            "ranger-hdfs-plugin-enabled": "No",
+            "ranger_group": "ranger",
+            "ranger-knox-plugin-enabled": "No",
+            "ranger_admin_log_dir": "/var/log/ranger/admin",
+            "ranger-kafka-plugin-enabled": "No",
+            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306",
+            "ranger-hive-plugin-enabled": "No",
+            "xasecure.audit.destination.solr": "false",
+            "ranger_pid_dir": "/var/run/ranger",
+            "xasecure.audit.destination.hdfs": "true",
+            "admin_username": "admin",
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
+            "create_db_dbuser": "true",
+            "ranger_solr_collection_name": "ranger_audits",
+            "ranger_admin_password": "P1!q9xa96SMi5NCl",
             "ranger_usersync_log_dir": "/var/log/ranger/usersync"
-        }, 
+        },
         "ranger-ugsync-site": {
-            "ranger.usersync.ldap.binddn": "", 
-            "ranger.usersync.policymgr.username": "rangerusersync", 
-            "ranger.usersync.policymanager.mockrun": "false", 
-            "ranger.usersync.group.searchbase": "", 
-            "ranger.usersync.ldap.bindalias": "testldapalias", 
-            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks", 
-            "ranger.usersync.port": "5151", 
-            "ranger.usersync.pagedresultssize": "500", 
-            "ranger.usersync.group.memberattributename": "", 
-            "ranger.usersync.kerberos.principal": "rangerusersync/_HOST@EXAMPLE.COM", 
-            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder", 
-            "ranger.usersync.ldap.referral": "ignore", 
-            "ranger.usersync.group.searchfilter": "", 
-            "ranger.usersync.ldap.user.objectclass": "person", 
-            "ranger.usersync.logdir": "{{usersync_log_dir}}", 
-            "ranger.usersync.ldap.user.searchfilter": "", 
-            "ranger.usersync.ldap.groupname.caseconversion": "none", 
-            "ranger.usersync.ldap.ldapbindpassword": "", 
-            "ranger.usersync.unix.minUserId": "500", 
-            "ranger.usersync.policymanager.maxrecordsperapicall": "1000", 
-            "ranger.usersync.group.nameattribute": "", 
-            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password", 
-            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks", 
-            "ranger.usersync.user.searchenabled": "false", 
-            "ranger.usersync.group.usermapsyncenabled": "true", 
-            "ranger.usersync.ldap.bindkeystore": "", 
-            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof", 
-            "ranger.usersync.kerberos.keytab": "/etc/security/keytabs/rangerusersync.service.keytab", 
-            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe", 
-            "ranger.usersync.group.objectclass": "", 
-            "ranger.usersync.ldap.user.searchscope": "sub", 
-            "ranger.usersync.unix.password.file": "/etc/passwd", 
-            "ranger.usersync.ldap.user.nameattribute": "", 
-            "ranger.usersync.pagedresultsenabled": "true", 
-            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}", 
-            "ranger.usersync.group.search.first.enabled": "false", 
-            "ranger.usersync.group.searchenabled": "false", 
-            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder", 
-            "ranger.usersync.ssl": "true", 
-            "ranger.usersync.ldap.url": "", 
-            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org", 
-            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
-            "ranger.usersync.ldap.user.searchbase": "", 
-            "ranger.usersync.ldap.username.caseconversion": "none", 
-            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
-            "ranger.usersync.keystore.password": "UnIx529p", 
-            "ranger.usersync.unix.group.file": "/etc/group", 
-            "ranger.usersync.filesource.file": "/tmp/usergroup.txt", 
-            "ranger.usersync.group.searchscope": "", 
-            "ranger.usersync.truststore.password": "changeit", 
-            "ranger.usersync.enabled": "true", 
-            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000", 
+            "ranger.usersync.ldap.binddn": "",
+            "ranger.usersync.policymgr.username": "rangerusersync",
+            "ranger.usersync.policymanager.mockrun": "false",
+            "ranger.usersync.group.searchbase": "",
+            "ranger.usersync.ldap.bindalias": "testldapalias",
+            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks",
+            "ranger.usersync.port": "5151",
+            "ranger.usersync.pagedresultssize": "500",
+            "ranger.usersync.group.memberattributename": "",
+            "ranger.usersync.kerberos.principal": "rangerusersync/_HOST@EXAMPLE.COM",
+            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder",
+            "ranger.usersync.ldap.referral": "ignore",
+            "ranger.usersync.group.searchfilter": "",
+            "ranger.usersync.ldap.user.objectclass": "person",
+            "ranger.usersync.logdir": "{{usersync_log_dir}}",
+            "ranger.usersync.ldap.user.searchfilter": "",
+            "ranger.usersync.ldap.groupname.caseconversion": "none",
+            "ranger.usersync.ldap.ldapbindpassword": "",
+            "ranger.usersync.unix.minUserId": "500",
+            "ranger.usersync.policymanager.maxrecordsperapicall": "1000",
+            "ranger.usersync.group.nameattribute": "",
+            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password",
+            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks",
+            "ranger.usersync.user.searchenabled": "false",
+            "ranger.usersync.group.usermapsyncenabled": "true",
+            "ranger.usersync.ldap.bindkeystore": "",
+            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof",
+            "ranger.usersync.kerberos.keytab": "/etc/security/keytabs/rangerusersync.service.keytab",
+            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe",
+            "ranger.usersync.group.objectclass": "",
+            "ranger.usersync.ldap.user.searchscope": "sub",
+            "ranger.usersync.unix.password.file": "/etc/passwd",
+            "ranger.usersync.ldap.user.nameattribute": "",
+            "ranger.usersync.pagedresultsenabled": "true",
+            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}",
+            "ranger.usersync.group.search.first.enabled": "false",
+            "ranger.usersync.group.searchenabled": "false",
+            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder",
+            "ranger.usersync.ssl": "true",
+            "ranger.usersync.ldap.url": "",
+            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org",
+            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+            "ranger.usersync.ldap.user.searchbase": "",
+            "ranger.usersync.ldap.username.caseconversion": "none",
+            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+            "ranger.usersync.keystore.password": "UnIx529p",
+            "ranger.usersync.unix.group.file": "/etc/group",
+            "ranger.usersync.filesource.file": "/tmp/usergroup.txt",
+            "ranger.usersync.group.searchscope": "",
+            "ranger.usersync.truststore.password": "changeit",
+            "ranger.usersync.enabled": "true",
+            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000",
             "ranger.usersync.filesource.text.delimiter": ","
-        }, 
+        },
         "ranger-hdfs-plugin-properties": {
-            "hadoop.rpc.protection": "authentication", 
-            "ranger-hdfs-plugin-enabled": "No", 
-            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
-            "policy_user": "ambari-qa", 
-            "common.name.for.certificate": "", 
+            "hadoop.rpc.protection": "authentication",
+            "ranger-hdfs-plugin-enabled": "No",
+            "REPOSITORY_CONFIG_USERNAME": "hadoop",
+            "policy_user": "ambari-qa",
+            "common.name.for.certificate": "",
             "REPOSITORY_CONFIG_PASSWORD": "hadoop"
-        }, 
+        },
         "ranger-kms-security": {
-            "ranger.plugin.kms.policy.pollIntervalMs": "30000", 
-            "ranger.plugin.kms.service.name": "{{repo_name}}", 
-            "ranger.plugin.kms.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient", 
-            "ranger.plugin.kms.policy.rest.ssl.config.file": "/etc/ranger/kms/conf/ranger-policymgr-ssl.xml", 
-            "ranger.plugin.kms.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache", 
+            "ranger.plugin.kms.policy.pollIntervalMs": "30000",
+            "ranger.plugin.kms.service.name": "{{repo_name}}",
+            "ranger.plugin.kms.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient",
+            "ranger.plugin.kms.policy.rest.ssl.config.file": "/etc/ranger/kms/conf/ranger-policymgr-ssl.xml",
+            "ranger.plugin.kms.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache",
             "ranger.plugin.kms.policy.rest.url": "{{policymgr_mgr_url}}"
-        }, 
+        },
         "kerberos-env": {
-            "kdc_hosts": "c6401.ambari.apache.org", 
-            "manage_auth_to_local": "true", 
-            "install_packages": "true", 
-            "realm": "EXAMPLE.COM", 
-            "encryption_types": "aes des3-cbc-sha1 rc4 des-cbc-md5", 
-            "ad_create_attributes_template": "\n{\n  \"objectClass\": [\"top\", \"person\", \"organizationalPerson\", \"user\"],\n  \"cn\": \"$principal_name\",\n  #if( $is_service )\n  \"servicePrincipalName\": \"$principal_name\",\n  #end\n  \"userPrincipalName\": \"$normalized_principal\",\n  \"unicodePwd\": \"$password\",\n  \"accountExpires\": \"0\",\n  \"userAccountControl\": \"66048\"\n}", 
-            "kdc_create_attributes": "", 
-            "admin_server_host": "c6401.ambari.apache.org", 
-            "group": "ambari-managed-principals", 
-            "password_length": "20", 
-            "ldap_url": "", 
-            "manage_identities": "true", 
-            "password_min_lowercase_letters": "1", 
-            "create_ambari_principal": "true", 
-            "service_check_principal_name": "${cluster_name|toLower()}-${short_date}", 
-            "executable_search_paths": "/usr/bin, /usr/kerberos/bin, /usr/sbin, /usr/lib/mit/bin, /usr/lib/mit/sbin", 
-            "password_chat_timeout": "5", 
-            "kdc_type": "mit-kdc", 
-            "set_password_expiry": "false", 
-            "password_min_punctuation": "1", 
-            "container_dn": "", 
-            "case_insensitive_username_rules": "false", 
-            "password_min_whitespace": "0", 
-            "password_min_uppercase_letters": "1", 
+            "kdc_hosts": "c6401.ambari.apache.org",
+            "manage_auth_to_local": "true",
+            "install_packages": "true",
+            "realm": "EXAMPLE.COM",
+            "encryption_types": "aes des3-cbc-sha1 rc4 des-cbc-md5",
+            "ad_create_attributes_template": "\n{\n  \"objectClass\": [\"top\", \"person\", \"organizationalPerson\", \"user\"],\n  \"cn\": \"$principal_name\",\n  #if( $is_service )\n  \"servicePrincipalName\": \"$principal_name\",\n  #end\n  \"userPrincipalName\": \"$normalized_principal\",\n  \"unicodePwd\": \"$password\",\n  \"accountExpires\": \"0\",\n  \"userAccountControl\": \"66048\"\n}",
+            "kdc_create_attributes": "",
+            "admin_server_host": "c6401.ambari.apache.org",
+            "group": "ambari-managed-principals",
+            "password_length": "20",
+            "ldap_url": "",
+            "manage_identities": "true",
+            "password_min_lowercase_letters": "1",
+            "create_ambari_principal": "true",
+            "service_check_principal_name": "${cluster_name|toLower()}-${short_date}",
+            "executable_search_paths": "/usr/bin, /usr/kerberos/bin, /usr/sbin, /usr/lib/mit/bin, /usr/lib/mit/sbin",
+            "password_chat_timeout": "5",
+            "kdc_type": "mit-kdc",
+            "set_password_expiry": "false",
+            "password_min_punctuation": "1",
+            "container_dn": "",
+            "case_insensitive_username_rules": "false",
+            "password_min_whitespace": "0",
+            "password_min_uppercase_letters": "1",
             "password_min_digits": "1"
-        }, 
+        },
         "kms-properties": {
-            "REPOSITORY_CONFIG_USERNAME": "keyadmin", 
-            "db_user": "rangerkms01", 
-            "DB_FLAVOR": "MYSQL", 
-            "db_password": "rangerkms01", 
-            "KMS_MASTER_KEY_PASSWD": "StrongPassword01", 
-            "db_root_user": "root", 
-            "db_name": "rangerkms01", 
-            "db_host": "c6401.ambari.apache.org", 
-            "db_root_password": "vagrant", 
-            "SQL_CONNECTOR_JAR": "{{driver_curl_target}}", 
+            "REPOSITORY_CONFIG_USERNAME": "keyadmin",
+            "db_user": "rangerkms01",
+            "DB_FLAVOR": "MYSQL",
+            "db_password": "rangerkms01",
+            "KMS_MASTER_KEY_PASSWD": "StrongPassword01",
+            "db_root_user": "root",
+            "db_name": "rangerkms01",
+            "db_host": "c6401.ambari.apache.org",
+            "db_root_password": "vagrant",
+            "SQL_CONNECTOR_JAR": "{{driver_curl_target}}",
             "REPOSITORY_CONFIG_PASSWORD": "keyadmin"
-        }, 
+        },
         "admin-properties": {
-            "db_user": "rangeradmin01", 
-            "DB_FLAVOR": "MYSQL", 
-            "db_password": "rangeradmin01", 
-            "db_root_user": "root", 
-            "policymgr_external_url": "http://c6401.ambari.apache.org:6080", 
-            "db_name": "ranger01", 
-            "db_host": "c6401.ambari.apache.org", 
-            "db_root_password": "vagrant", 
+            "db_user": "rangeradmin01",
+            "DB_FLAVOR": "MYSQL",
+            "db_password": "rangeradmin01",
+            "db_root_user": "root",
+            "policymgr_external_url": "http://c6401.ambari.apache.org:6080",
+            "db_name": "ranger01",
+            "db_host": "c6401.ambari.apache.org",
+            "db_root_password": "vagrant",
             "SQL_CONNECTOR_JAR": "{{driver_curl_target}}"
-        }, 
+        },
         "ranger-kms-policymgr-ssl": {
-            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/ranger-kms/conf/ranger-plugin-keystore.jks", 
-            "xasecure.policymgr.clientssl.truststore.password": "changeit", 
-            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}", 
-            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/ranger-kms/conf/ranger-plugin-truststore.jks", 
-            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}", 
+            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/ranger-kms/conf/ranger-plugin-keystore.jks",
+            "xasecure.policymgr.clientssl.truststore.password": "changeit",
+            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}",
+            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/ranger-kms/conf/ranger-plugin-truststore.jks",
+            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}",
             "xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
-        }, 
+        },
         "hdfs-site": {
-            "dfs.namenode.checkpoint.period": "21600", 
-            "dfs.namenode.avoid.write.stale.datanode": "true", 
-            "dfs.permissions.superusergroup": "hdfs", 
-            "dfs.namenode.startup.delay.block.deletion.sec": "3600", 
-            "dfs.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM", 
-            "dfs.heartbeat.interval": "3", 
-            "dfs.content-summary.limit": "5000", 
-            "dfs.support.append": "true", 
-            "dfs.datanode.address": "0.0.0.0:1019", 
-            "dfs.cluster.administrators": " hdfs", 
-            "dfs.namenode.audit.log.async": "true", 
-            "dfs.datanode.balance.bandwidthPerSec": "6250000", 
-            "dfs.namenode.safemode.threshold-pct": "1", 
-            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", 
-            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020", 
-            "dfs.permissions.enabled": "true", 
-            "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", 
-            "dfs.client.read.shortcircuit": "true", 
-            "dfs.https.port": "50470", 
-            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470", 
-            "nfs.file.dump.dir": "/tmp/.hdfs-nfs", 
-            "dfs.blocksize": "134217728", 
-            "dfs.blockreport.initialDelay": "120", 
-            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode", 
-            "dfs.namenode.fslock.fair": "false", 
-            "dfs.datanode.max.transfer.threads": "4096", 
-            "dfs.secondary.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "dfs.replication": "3", 
-            "dfs.namenode.handler.count": "50", 
-            "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
-            "fs.permissions.umask-mode": "022", 
-            "dfs.namenode.stale.datanode.interval": "30000", 
-            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
-            "dfs.datanode.failed.volumes.tolerated": "0", 
-            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data", 
-            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070", 
-            "dfs.webhdfs.enabled": "true", 
-            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding", 
-            "dfs.namenode.accesstime.precision": "0", 
-            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
-            "dfs.datanode.https.address": "0.0.0.0:50475", 
-            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary", 
-            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090", 
-            "nfs.exports.allowed.hosts": "* rw", 
-            "dfs.namenode.checkpoint.txns": "1000000", 
-            "dfs.datanode.http.address": "0.0.0.0:1022", 
-            "dfs.datanode.du.reserved": "33011188224", 
-            "dfs.client.read.shortcircuit.streams.cache.size": "4096", 
-            "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", 
-            "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "dfs.http.policy": "HTTP_ONLY", 
-            "dfs.block.access.token.enable": "true", 
-            "dfs.client.retry.policy.enabled": "false", 
-            "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", 
-            "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab", 
-            "dfs.namenode.name.dir.restore": "true", 
-            "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", 
-            "dfs.journalnode.https-address": "0.0.0.0:8481", 
-            "dfs.journalnode.http-address": "0.0.0.0:8480", 
-            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
-            "dfs.namenode.avoid.read.stale.datanode": "true", 
-            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
-            "dfs.datanode.data.dir.perm": "750", 
-            "dfs.encryption.key.provider.uri": "kms://http@c6401.ambari.apache.org:9292/kms", 
-            "dfs.replication.max": "50", 
+            "dfs.namenode.checkpoint.period": "21600",
+            "dfs.namenode.avoid.write.stale.datanode": "true",
+            "dfs.permissions.superusergroup": "hdfs",
+            "dfs.namenode.startup.delay.block.deletion.sec": "3600",
+            "dfs.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
+            "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM",
+            "dfs.heartbeat.interval": "3",
+            "dfs.content-summary.limit": "5000",
+            "dfs.support.append": "true",
+            "dfs.datanode.address": "0.0.0.0:1019",
+            "dfs.cluster.administrators": " hdfs",
+            "dfs.namenode.audit.log.async": "true",
+            "dfs.datanode.balance.bandwidthPerSec": "6250000",
+            "dfs.namenode.safemode.threshold-pct": "1",
+            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
+            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020",
+            "dfs.permissions.enabled": "true",
+            "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
+            "dfs.client.read.shortcircuit": "true",
+            "dfs.https.port": "50470",
+            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470",
+            "nfs.file.dump.dir": "/tmp/.hdfs-nfs",
+            "dfs.blocksize": "134217728",
+            "dfs.blockreport.initialDelay": "120",
+            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode",
+            "dfs.namenode.fslock.fair": "false",
+            "dfs.datanode.max.transfer.threads": "4096",
+            "dfs.secondary.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
+            "dfs.replication": "3",
+            "dfs.namenode.handler.count": "50",
+            "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+            "fs.permissions.umask-mode": "022",
+            "dfs.namenode.stale.datanode.interval": "30000",
+            "dfs.datanode.ipc.address": "0.0.0.0:8010",
+            "dfs.datanode.failed.volumes.tolerated": "0",
+            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data",
+            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070",
+            "dfs.webhdfs.enabled": "true",
+            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding",
+            "dfs.namenode.accesstime.precision": "0",
+            "dfs.namenode.write.stale.datanode.ratio": "1.0f",
+            "dfs.datanode.https.address": "0.0.0.0:50475",
+            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary",
+            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090",
+            "nfs.exports.allowed.hosts": "* rw",
+            "dfs.namenode.checkpoint.txns": "1000000",
+            "dfs.datanode.http.address": "0.0.0.0:1022",
+            "dfs.datanode.du.reserved": "33011188224",
+            "dfs.client.read.shortcircuit.streams.cache.size": "4096",
+            "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
+            "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
+            "dfs.http.policy": "HTTP_ONLY",
+            "dfs.block.access.token.enable": "true",
+            "dfs.client.retry.policy.enabled": "false",
+            "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
+            "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab",
+            "dfs.namenode.name.dir.restore": "true",
+            "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
+            "dfs.journalnode.https-address": "0.0.0.0:8481",
+            "dfs.journalnode.http-address": "0.0.0.0:8480",
+            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
+            "dfs.namenode.avoid.read.stale.datanode": "true",
+            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
+            "dfs.datanode.data.dir.perm": "750",
+            "dfs.encryption.key.provider.uri": "kms://http@c6401.ambari.apache.org:9292/kms",
+            "dfs.replication.max": "50",
             "dfs.namenode.name.dir": "/grid/0/hadoop/hdfs/namenode"
-        }, 
+        },
         "ranger-tagsync-site": {
-            "ranger.tagsync.dest.ranger.ssl.config.filename": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks", 
-            "ranger.tagsync.source.atlasrest.username": "", 
-            "ranger.tagsync.logdir": "/var/log/ranger/tagsync", 
-            "ranger.tagsync.source.atlasrest.download.interval.millis": "", 
-            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks", 
-            "ranger.tagsync.source.file.check.interval.millis": "", 
-            "ranger.tagsync.source.atlasrest.endpoint": "", 
-            "ranger.tagsync.dest.ranger.username": "rangertagsync", 
-            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}", 
-            "ranger.tagsync.kerberos.principal": "rangertagsync/_HOST@EXAMPLE.COM", 
-            "ranger.tagsync.kerberos.keytab": "/etc/security/keytabs/rangertagsync.service.keytab", 
-            "ranger.tagsync.source.atlas": "false", 
-            "ranger.tagsync.source.atlasrest": "false", 
-            "ranger.tagsync.source.file": "false", 
+            "ranger.tagsync.dest.ranger.ssl.config.filename": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks",
+            "ranger.tagsync.source.atlasrest.username": "",
+            "ranger.tagsync.logdir": "/var/log/ranger/tagsync",
+            "ranger.tagsync.source.atlasrest.download.interval.millis": "",
+            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks",
+            "ranger.tagsync.source.file.check.interval.millis": "",
+            "ranger.tagsync.source.atlasrest.endpoint": "",
+            "ranger.tagsync.dest.ranger.username": "rangertagsync",
+            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}",
+            "ranger.tagsync.kerberos.principal": "rangertagsync/_HOST@EXAMPLE.COM",
+            "ranger.tagsync.kerberos.keytab": "/etc/security/keytabs/rangertagsync.service.keytab",
+            "ranger.tagsync.source.atlas": "false",
+            "ranger.tagsync.source.atlasrest": "false",
+            "ranger.tagsync.source.file": "false",
             "ranger.tagsync.source.file.filename": ""
-        }, 
+        },
         "tagsync-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/tagsync
 .log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n"
-        }, 
+        },
         "ranger-kms-audit": {
-            "xasecure.audit.destination.solr.zookeepers": "NONE", 
-            "xasecure.audit.destination.solr.urls": "", 
-            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/ranger/kms/audit/solr/spool", 
-            "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/ranger/kms/audit/hdfs/spool", 
-            "xasecure.audit.destination.hdfs": "true", 
-            "xasecure.audit.destination.solr": "true", 
+            "xasecure.audit.destination.solr.zookeepers": "NONE",
+            "xasecure.audit.destination.solr.urls": "",
+            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/ranger/kms/audit/solr/spool",
+            "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/ranger/kms/audit/hdfs/spool",
+            "xasecure.audit.destination.hdfs": "true",
+            "xasecure.audit.destination.solr": "true",
             "xasecure.audit.provider.summary.enabled": "false",
-            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit", 
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
             "xasecure.audit.is.enabled": "true"
-        }, 
+        },
         "hadoop-policy": {
-            "security.job.client.protocol.acl": "*", 
-            "security.job.task.protocol.acl": "*", 
-            "security.datanode.protocol.acl": "*", 
-            "security.namenode.protocol.acl": "*", 
-            "security.client.datanode.protocol.acl": "*", 
-            "security.inter.tracker.protocol.acl": "*", 
-            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop", 
-            "security.client.protocol.acl": "*", 
-            "security.refresh.policy.protocol.acl": "hadoop", 
-            "security.admin.operations.protocol.acl": "hadoop", 
+            "security.job.client.protocol.acl": "*",
+            "security.job.task.protocol.acl": "*",
+            "security.datanode.protocol.acl": "*",
+            "security.namenode.protocol.acl": "*",
+            "security.client.datanode.protocol.acl": "*",
+            "security.inter.tracker.protocol.acl": "*",
+            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
+            "security.client.protocol.acl": "*",
+            "security.refresh.policy.protocol.acl": "hadoop",
+            "security.admin.operations.protocol.acl": "hadoop",
             "security.inter.datanode.protocol.acl": "*"
-        }, 
+        },
         "hdfs-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.lo
 gger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender
 .console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logg
 er}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=
 false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# NameNode metrics logging.\n# The default is to retain two namenode-metrics.log files up to 64MB each.\n#\nnamenode.metrics.logger=INFO,NullAppender\nlog4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}\nlog4j.additivity.NameNodeMetricsLog=false\nlog4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log\nlog4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n\nlog4j.appender.NNMETRICSRFA.MaxBackupIndex=1\nlog4j.appender.NNMETRICSRFA.MaxFileSize=64MB\n\n#\n# mapred audit logging\n#\nmapred
 .audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging leve
 ls\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateCh
 ange=WARN"
-        }, 
+        },
         "usersync-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/usersync.
 log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n"
-        }, 
+        },
         "krb5-conf": {
-            "domains": "", 
-            "manage_krb5_conf": "true", 
-            "content": "\n[libdefaults]\n  renew_lifetime = 7d\n  forwardable = true\n  default_realm = {{realm}}\n  ticket_lifetime = 24h\n  dns_lookup_realm = false\n  dns_lookup_kdc = false\n  default_ccache_name = /tmp/krb5cc_%{uid}\n  #default_tgs_enctypes = {{encryption_types}}\n  #default_tkt_enctypes = {{encryption_types}}\n{% if domains %}\n[domain_realm]\n{%- for domain in domains.split(',') %}\n  {{domain|trim()}} = {{realm}}\n{%- endfor %}\n{% endif %}\n[logging]\n  default = FILE:/var/log/krb5kdc.log\n  admin_server = FILE:/var/log/kadmind.log\n  kdc = FILE:/var/log/krb5kdc.log\n\n[realms]\n  {{realm}} = {\n{%- if kdc_hosts > 0 -%}\n{%- set kdc_host_list = kdc_hosts.split(',')  -%}\n{%- if kdc_host_list and kdc_host_list|length > 0 %}\n    admin_server = {{admin_server_host|default(kdc_host_list[0]|trim(), True)}}\n{%- if kdc_host_list -%}\n{% for kdc_host in kdc_host_list %}\n    kdc = {{kdc_host|trim()}}\n{%- endfor -%}\n{% endif %}\n{%- endif %}\n{%- endif %}\n  }\n\
 n{# Append additional realm declarations below #}", 
+            "domains": "",
+            "manage_krb5_conf": "true",
+            "content": "\n[libdefaults]\n  renew_lifetime = 7d\n  forwardable = true\n  default_realm = {{realm}}\n  ticket_lifetime = 24h\n  dns_lookup_realm = false\n  dns_lookup_kdc = false\n  default_ccache_name = /tmp/krb5cc_%{uid}\n  #default_tgs_enctypes = {{encryption_types}}\n  #default_tkt_enctypes = {{encryption_types}}\n{% if domains %}\n[domain_realm]\n{%- for domain in domains.split(',') %}\n  {{domain|trim()}} = {{realm}}\n{%- endfor %}\n{% endif %}\n[logging]\n  default = FILE:/var/log/krb5kdc.log\n  admin_server = FILE:/var/log/kadmind.log\n  kdc = FILE:/var/log/krb5kdc.log\n\n[realms]\n  {{realm}} = {\n{%- if kdc_hosts > 0 -%}\n{%- set kdc_host_list = kdc_hosts.split(',')  -%}\n{%- if kdc_host_list and kdc_host_list|length > 0 %}\n    admin_server = {{admin_server_host|default(kdc_host_list[0]|trim(), True)}}\n{%- if kdc_host_list -%}\n{% for kdc_host in kdc_host_list %}\n    kdc = {{kdc_host|trim()}}\n{%- endfor -%}\n{% endif %}\n{%- endif %}\n{%- endif %}\n  }\n\
 n{# Append additional realm declarations below #}",
             "conf_dir": "/etc"
-        }, 
+        },
         "kms-site": {
-            "hadoop.kms.proxyuser.ranger.hosts": "*", 
-            "hadoop.kms.authentication.type": "kerberos", 
-            "hadoop.kms.proxyuser.ranger.groups": "*", 
-            "hadoop.kms.authentication.signer.secret.provider.zookeeper.path": "/hadoop-kms/hadoop-auth-signature-secret", 
-            "hadoop.kms.security.authorization.manager": "org.apache.ranger.authorization.kms.authorizer.RangerKmsAuthorizer", 
-            "hadoop.kms.authentication.kerberos.name.rules": "RULE:[1:$1@$0](ambari-qa-test_cluster01@EXAMPLE.COM)s/.*/ambari-qa/\nRULE:[1:$1@$0](hdfs-test_cluster01@EXAMPLE.COM)s/.*/hdfs/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nRULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](rangeradmin@EXAMPLE.COM)s/.*/ranger/\nRULE:[2:$1@$0](rangerkms@EXAMPLE.COM)s/.*/keyadmin/\nRULE:[2:$1@$0](rangertagsync@EXAMPLE.COM)s/.*/rangertagsync/\nRULE:[2:$1@$0](rangerusersync@EXAMPLE.COM)s/.*/rangerusersync/\nDEFAULT", 
-            "hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
-            "hadoop.kms.current.key.cache.timeout.ms": "30000", 
-            "hadoop.kms.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
-            "hadoop.kms.audit.aggregation.window.ms": "10000", 
-            "hadoop.kms.proxyuser.ranger.users": "*", 
-            "hadoop.kms.authentication.signer.secret.provider.zookeeper.auth.type": "kerberos", 
-            "hadoop.kms.key.provider.uri": "dbks://http@localhost:9292/kms", 
-            "hadoop.security.keystore.JavaKeyStoreProvider.password": "none", 
-            "hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "hadoop.kms.authentication.signer.secret.provider": "random", 
-            "hadoop.kms.authentication.signer.secret.provider.zookeeper.connection.string": "#HOSTNAME#:#PORT#,...", 
-            "hadoop.kms.cache.enable": "true", 
-            "hadoop.kms.cache.timeout.ms": "600000", 
+            "hadoop.kms.proxyuser.ranger.hosts": "*",
+            "hadoop.kms.authentication.type": "kerberos",
+            "hadoop.kms.proxyuser.ranger.groups": "*",
+            "hadoop.kms.authentication.signer.secret.provider.zookeeper.path": "/hadoop-kms/hadoop-auth-signature-secret",
+            "hadoop.kms.security.authorization.manager": "org.apache.ranger.authorization.kms.authorizer.RangerKmsAuthorizer",
+            "hadoop.kms.authentication.kerberos.name.rules": "RULE:[1:$1@$0](ambari-qa-test_cluster01@EXAMPLE.COM)s/.*/ambari-qa/\nRULE:[1:$1@$0](hdfs-test_cluster01@EXAMPLE.COM)s/.*/hdfs/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nRULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](rangeradmin@EXAMPLE.COM)s/.*/ranger/\nRULE:[2:$1@$0](rangerkms@EXAMPLE.COM)s/.*/keyadmin/\nRULE:[2:$1@$0](rangertagsync@EXAMPLE.COM)s/.*/rangertagsync/\nRULE:[2:$1@$0](rangerusersync@EXAMPLE.COM)s/.*/rangerusersync/\nDEFAULT",
+            "hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+            "hadoop.kms.current.key.cache.timeout.ms": "30000",
+            "hadoop.kms.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+            "hadoop.kms.audit.aggregation.window.ms": "10000",
+            "hadoop.kms.proxyuser.ranger.users": "*",
+            "hadoop.kms.authentication.signer.secret.provider.zookeeper.auth.type": "kerberos",
+            "hadoop.kms.key.provider.uri": "dbks://http@localhost:9292/kms",
+            "hadoop.security.keystore.JavaKeyStoreProvider.password": "none",
+            "hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
+            "hadoop.kms.authentication.signer.secret.provider": "random",
+            "hadoop.kms.authentication.signer.secret.provider.zookeeper.connection.string": "#HOSTNAME#:#PORT#,...",
+            "hadoop.kms.cache.enable": "true",
+            "hadoop.kms.cache.timeout.ms": "600000",
             "hadoop.kms.authentication.kerberos.principal": "*"
-        }, 
+        },
         "core-site": {
-            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py", 
-            "hadoop.proxyuser.hdfs.groups": "*", 
-            "fs.trash.interval": "360", 
-            "ipc.server.tcpnodelay": "true", 
-            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec", 
-            "ipc.client.idlethreshold": "8000", 
-            "io.file.buffer.size": "131072", 
-            "hadoop.proxyuser.ambari-server-test_cluster01.groups": "*", 
-            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", 
-            "hadoop.security.authentication": "kerberos", 
-            "mapreduce.jobtracker.webinterface.trusted": "false", 
-            "hadoop.proxyuser.kms.groups": "*", 
-            "hadoop.proxyuser.hdfs.hosts": "*", 
-            "hadoop.proxyuser.HTTP.groups": "users", 
-            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020", 
-            "ha.failover-controller.active-standby-elector.zk.op.retries": "120", 
-            "hadoop.security.key.provider.path": "kms://http@c6401.ambari.apache.org:9292/kms", 
-            "hadoop.security.authorization": "true", 
-            "hadoop.http.authentication.simple.anonymous.allowed": "true", 
-            "ipc.client.connect.max.retries": "50", 
-            "hadoop.security.auth_to_local": "RULE:[1:$1@$0](ambari-qa-test_cluster01@EXAMPLE.COM)s/.*/ambari-qa/\nRULE:[1:$1@$0](hdfs-test_cluster01@EXAMPLE.COM)s/.*/hdfs/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nRULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](rangeradmin@EXAMPLE.COM)s/.*/ranger/\nRULE:[2:$1@$0](rangerkms@EXAMPLE.COM)s/.*/keyadmin/\nRULE:[2:$1@$0](rangertagsync@EXAMPLE.COM)s/.*/rangertagsync/\nRULE:[2:$1@$0](rangerusersync@EXAMPLE.COM)s/.*/rangerusersync/\nDEFAULT", 
-            "hadoop.proxyuser.ambari-server-test_cluster01.hosts": "c6401.ambari.apache.org", 
+            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py",
+            "hadoop.proxyuser.hdfs.groups": "*",
+            "fs.trash.interval": "360",
+            "ipc.server.tcpnodelay": "true",
+            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec",
+            "ipc.client.idlethreshold": "8000",
+            "io.file.buffer.size": "131072",
+            "hadoop.proxyuser.ambari-server-test_cluster01.groups": "*",
+            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
+            "hadoop.security.authentication": "kerberos",
+            "mapreduce.jobtracker.webinterface.trusted": "false",
+            "hadoop.proxyuser.kms.groups": "*",
+            "hadoop.proxyuser.hdfs.hosts": "*",
+            "hadoop.proxyuser.HTTP.groups": "users",
+            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020",
+            "ha.failover-controller.active-standby-elector.zk.op.retries": "120",
+            "hadoop.security.key.provider.path": "kms://http@c6401.ambari.apache.org:9292/kms",
+            "hadoop.security.authorization": "true",
+            "hadoop.http.authentication.simple.anonymous.allowed": "true",
+            "ipc.client.connect.max.retries": "50",
+            "hadoop.security.auth_to_local": "RULE:[1:$1@$0](ambari-qa-test_cluster01@EXAMPLE.COM)s/.*/ambari-qa/\nRULE:[1:$1@$0](hdfs-test_cluster01@EXAMPLE.COM)s/.*/hdfs/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nRULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](rangeradmin@EXAMPLE.COM)s/.*/ranger/\nRULE:[2:$1@$0](rangerkms@EXAMPLE.COM)s/.*/keyadmin/\nRULE:[2:$1@$0](rangertagsync@EXAMPLE.COM)s/.*/rangertagsync/\nRULE:[2:$1@$0](rangerusersync@EXAMPLE.COM)s/.*/rangerusersync/\nDEFAULT",
+            "hadoop.proxyuser.ambari-server-test_cluster01.hosts": "c6401.ambari.apache.org",
             "ipc.client.connection.maxidletime": "30000"
-        }, 
+        },
         "hadoop-env": {
-            "keyserver_port": "", 
-            "proxyuser_group": "users", 
-            "hdfs_user_nproc_limit": "65536", 
-            "hdfs_log_dir_prefix": "/var/log/hadoop", 
-            "hdfs_user_nofile_limit": "128000", 
-            "hdfs_user": "hdfs", 
+            "keyserver_port": "",
+            "proxyuser_group": "users",
+            "hdfs_user_nproc_limit": "65536",
+            "hdfs_log_dir_prefix": "/var/log/hadoop",
+            "hdfs_user_nofile_limit": "128000",
+            "hdfs_user": "hdfs",
             "hdfs_principal_name": "hdfs-cl1@EXAMPLE.COM",
-            "keyserver_host": " ", 
-            "namenode_opt_maxnewsize": "128m", 
-            "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab", 
-            "namenode_opt_maxpermsize": "256m", 
-            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
 {{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
 node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/ha

<TRUNCATED>

[05/10] ambari git commit: AMBARI-21430 - Allow Multiple Versions of Stack Tools to Co-Exist (jonathanhurley)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-default.json b/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-default.json
index 05cb78a..cafbede 100644
--- a/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-default.json
+++ b/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-default.json
@@ -1,55 +1,55 @@
 {
     "localComponents": [
-        "SECONDARY_NAMENODE", 
-        "HDFS_CLIENT", 
-        "DATANODE", 
-        "NAMENODE", 
-        "ZOOKEEPER_SERVER", 
-        "ZOOKEEPER_CLIENT", 
-        "RANGER_USERSYNC", 
-        "RANGER_ADMIN", 
+        "SECONDARY_NAMENODE",
+        "HDFS_CLIENT",
+        "DATANODE",
+        "NAMENODE",
+        "ZOOKEEPER_SERVER",
+        "ZOOKEEPER_CLIENT",
+        "RANGER_USERSYNC",
+        "RANGER_ADMIN",
         "RANGER_TAGSYNC",
         "RANGER_KMS_SERVER"
-    ], 
+    ],
     "configuration_attributes": {
-        "ranger-hdfs-audit": {}, 
-        "ssl-client": {}, 
-        "ranger-admin-site": {}, 
-        "ranger-hdfs-policymgr-ssl": {}, 
-        "tagsync-application-properties": {}, 
-        "ranger-env": {}, 
-        "usersync-log4j": {}, 
-        "admin-properties": {}, 
-        "ranger-ugsync-site": {}, 
+        "ranger-hdfs-audit": {},
+        "ssl-client": {},
+        "ranger-admin-site": {},
+        "ranger-hdfs-policymgr-ssl": {},
+        "tagsync-application-properties": {},
+        "ranger-env": {},
+        "usersync-log4j": {},
+        "admin-properties": {},
+        "ranger-ugsync-site": {},
         "hdfs-site": {
             "final": {
-                "dfs.datanode.data.dir": "true", 
-                "dfs.namenode.http-address": "true", 
-                "dfs.datanode.failed.volumes.tolerated": "true", 
-                "dfs.support.append": "true", 
-                "dfs.namenode.name.dir": "true", 
+                "dfs.datanode.data.dir": "true",
+                "dfs.namenode.http-address": "true",
+                "dfs.datanode.failed.volumes.tolerated": "true",
+                "dfs.support.append": "true",
+                "dfs.namenode.name.dir": "true",
                 "dfs.webhdfs.enabled": "true"
             }
-        }, 
-        "ranger-tagsync-site": {}, 
-        "zoo.cfg": {}, 
-        "hadoop-policy": {}, 
-        "hdfs-log4j": {}, 
-        "ranger-hdfs-plugin-properties": {}, 
+        },
+        "ranger-tagsync-site": {},
+        "zoo.cfg": {},
+        "hadoop-policy": {},
+        "hdfs-log4j": {},
+        "ranger-hdfs-plugin-properties": {},
         "core-site": {
             "final": {
                 "fs.defaultFS": "true"
             }
-        }, 
-        "hadoop-env": {}, 
-        "zookeeper-log4j": {}, 
-        "ssl-server": {}, 
-        "ranger-site": {}, 
-        "admin-log4j": {}, 
-        "tagsync-log4j": {}, 
-        "ranger-hdfs-security": {}, 
-        "usersync-properties": {}, 
-        "zookeeper-env": {}, 
+        },
+        "hadoop-env": {},
+        "zookeeper-log4j": {},
+        "ssl-server": {},
+        "ranger-site": {},
+        "admin-log4j": {},
+        "tagsync-log4j": {},
+        "ranger-hdfs-security": {},
+        "usersync-properties": {},
+        "zookeeper-env": {},
         "cluster-env": {},
         "dbks-site": {},
         "kms-env": {},
@@ -60,744 +60,744 @@
         "ranger-kms-site": {},
         "ranger-kms-policymgr-ssl": {},
         "ranger-kms-audit": {}
-    }, 
-    "public_hostname": "c6401.ambari.apache.org", 
-    "commandId": "9-1", 
-    "hostname": "c6401.ambari.apache.org", 
-    "kerberosCommandParams": [], 
-    "serviceName": "RANGER_KMS", 
-    "role": "RANGER_KMS_SERVER", 
-    "forceRefreshConfigTagsBeforeExecution": [], 
-    "requestId": 9, 
+    },
+    "public_hostname": "c6401.ambari.apache.org",
+    "commandId": "9-1",
+    "hostname": "c6401.ambari.apache.org",
+    "kerberosCommandParams": [],
+    "serviceName": "RANGER_KMS",
+    "role": "RANGER_KMS_SERVER",
+    "forceRefreshConfigTagsBeforeExecution": [],
+    "requestId": 9,
     "agentConfigParams": {
         "agent": {
             "parallel_execution": 0
         }
-    }, 
-    "clusterName": "c1", 
-    "commandType": "EXECUTION_COMMAND", 
-    "taskId": 64, 
-    "roleParams": {}, 
+    },
+    "clusterName": "c1",
+    "commandType": "EXECUTION_COMMAND",
+    "taskId": 64,
+    "roleParams": {},
     "configurationTags": {
         "ranger-hdfs-audit": {
             "tag": "version1466427664617"
-        }, 
+        },
         "ssl-client": {
             "tag": "version1"
-        }, 
+        },
         "ranger-admin-site": {
             "tag": "version1466427664621"
-        }, 
+        },
         "ranger-hdfs-policymgr-ssl": {
             "tag": "version1466427664617"
-        }, 
+        },
         "tagsync-application-properties": {
             "tag": "version1466427664621"
-        }, 
+        },
         "ranger-env": {
             "tag": "version1466427664621"
-        }, 
+        },
         "usersync-log4j": {
             "tag": "version1466427664621"
-        }, 
+        },
         "admin-properties": {
             "tag": "version1466427664621"
-        }, 
+        },
         "ranger-ugsync-site": {
             "tag": "version1466427664621"
-        }, 
+        },
         "hdfs-site": {
             "tag": "version1"
-        }, 
+        },
         "ranger-tagsync-site": {
             "tag": "version1466427664621"
-        }, 
+        },
         "zoo.cfg": {
             "tag": "version1"
-        }, 
+        },
         "hadoop-policy": {
             "tag": "version1"
-        }, 
+        },
         "hdfs-log4j": {
             "tag": "version1"
-        }, 
+        },
         "ranger-hdfs-plugin-properties": {
             "tag": "version1466427664617"
-        }, 
+        },
         "core-site": {
             "tag": "version1"
-        }, 
+        },
         "hadoop-env": {
             "tag": "version1"
-        }, 
+        },
         "zookeeper-log4j": {
             "tag": "version1"
-        }, 
+        },
         "ssl-server": {
             "tag": "version1"
-        }, 
+        },
         "ranger-site": {
             "tag": "version1466427664621"
-        }, 
+        },
         "admin-log4j": {
             "tag": "version1466427664621"
-        }, 
+        },
         "tagsync-log4j": {
             "tag": "version1466427664621"
-        }, 
+        },
         "ranger-hdfs-security": {
             "tag": "version1466427664617"
-        }, 
+        },
         "usersync-properties": {
             "tag": "version1466427664621"
-        }, 
+        },
         "zookeeper-env": {
             "tag": "version1"
-        }, 
+        },
         "cluster-env": {
             "tag": "version1"
         },
         "dbks-site": {
-            "tag": "version1"            
+            "tag": "version1"
             },
         "kms-env": {
-            "tag": "version1"            
+            "tag": "version1"
             },
         "kms-log4j": {
-            "tag": "version1"            
+            "tag": "version1"
             },
         "kms-properties": {
-            "tag": "version1"            
+            "tag": "version1"
             },
         "kms-site": {
-            "tag": "version1"            
+            "tag": "version1"
             },
         "ranger-kms-security": {
-            "tag": "version1"            
+            "tag": "version1"
             },
         "ranger-kms-site": {
-            "tag": "version1"            
+            "tag": "version1"
             },
         "ranger-kms-policymgr-ssl": {
-            "tag": "version1"            
+            "tag": "version1"
             },
         "ranger-kms-audit": {
-            "tag": "version1"            
+            "tag": "version1"
         }
-    }, 
-    "roleCommand": "START", 
+    },
+    "roleCommand": "START",
     "hostLevelParams": {
-        "agent_stack_retry_on_unavailability": "false", 
-        "stack_name": "HDP", 
+        "agent_stack_retry_on_unavailability": "false",
+        "stack_name": "HDP",
         "custom_mysql_jdbc_name": "mysql-connector-java.jar",
         "previous_custom_mysql_jdbc_name": "mysql-connector-java-old.jar",
-        "host_sys_prepped": "false", 
-        "ambari_db_rca_username": "mapred", 
-        "current_version": "2.5.0.0-777", 
-        "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar", 
-        "agent_stack_retry_count": "5", 
-        "stack_version": "2.5", 
-        "jdk_name": "jdk-8u60-linux-x64.tar.gz", 
-        "ambari_db_rca_driver": "org.postgresql.Driver", 
+        "host_sys_prepped": "false",
+        "ambari_db_rca_username": "mapred",
+        "current_version": "2.5.0.0-777",
+        "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar",
+        "agent_stack_retry_count": "5",
+        "stack_version": "2.5",
+        "jdk_name": "jdk-8u60-linux-x64.tar.gz",
+        "ambari_db_rca_driver": "org.postgresql.Driver",
         "java_home": "/usr/jdk64/jdk1.7.0_45",
-        "repository_version_id": "1", 
-        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
-        "not_managed_hdfs_path_list": "[\"/tmp\"]", 
-        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
-        "java_version": "8", 
-        "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-777\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.5\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-776\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]", 
-        "package_list": "[{\"name\":\"ranger_${stack_version}-admin\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-usersync\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-tagsync\",\"condition\":\"should_install_ranger_tagsync\",\"skipUpgrade\":false},{\"name\":\"ambari-logsearch-solr-client\",\"condition\":\"should_install_logsearch_solr_client\",\"skipUpgrade\":false}]", 
-        "db_name": "ambari", 
-        "group_list": "[\"ranger\",\"hadoop\",\"users\"]", 
-        "agentCacheDir": "/var/lib/ambari-agent/cache", 
-        "ambari_db_rca_password": "mapred", 
+        "repository_version_id": "1",
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
+        "not_managed_hdfs_path_list": "[\"/tmp\"]",
+        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
+        "java_version": "8",
+        "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-777\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.5\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-776\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]",
+        "package_list": "[{\"name\":\"ranger_${stack_version}-admin\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-usersync\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-tagsync\",\"condition\":\"should_install_ranger_tagsync\",\"skipUpgrade\":false},{\"name\":\"ambari-logsearch-solr-client\",\"condition\":\"should_install_logsearch_solr_client\",\"skipUpgrade\":false}]",
+        "db_name": "ambari",
+        "group_list": "[\"ranger\",\"hadoop\",\"users\"]",
+        "agentCacheDir": "/var/lib/ambari-agent/cache",
+        "ambari_db_rca_password": "mapred",
         "jce_name": "UnlimitedJCEPolicyJDK7.zip",
-        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar", 
-        "db_driver_filename": "mysql-connector-java.jar", 
-        "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]", 
+        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
+        "db_driver_filename": "mysql-connector-java.jar",
+        "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]",
         "clientsToUpdateConfigs": "[\"*\"]"
-    }, 
+    },
     "commandParams": {
-        "service_package_folder": "common-services/RANGER/0.4.0/package", 
-        "script": "scripts/ranger_usersync.py", 
-        "hooks_folder": "HDP/2.0.6/hooks", 
-        "version": "2.5.0.0-777", 
-        "max_duration_for_retries": "0", 
-        "command_retry_enabled": "false", 
-        "command_timeout": "600", 
+        "service_package_folder": "common-services/RANGER/0.4.0/package",
+        "script": "scripts/ranger_usersync.py",
+        "hooks_folder": "HDP/2.0.6/hooks",
+        "version": "2.5.0.0-777",
+        "max_duration_for_retries": "0",
+        "command_retry_enabled": "false",
+        "command_timeout": "600",
         "script_type": "PYTHON"
-    }, 
-    "forceRefreshConfigTags": [], 
-    "stageId": 1, 
+    },
+    "forceRefreshConfigTags": [],
+    "stageId": 1,
     "clusterHostInfo": {
         "snamenode_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ambari_server_use_ssl": [
             "false"
-        ], 
+        ],
         "all_ping_ports": [
             "8670"
-        ], 
+        ],
         "ranger_tagsync_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ranger_usersync_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "all_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "slave_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "namenode_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ambari_server_port": [
             "8080"
-        ], 
+        ],
         "ranger_admin_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "all_racks": [
             "/default-rack"
-        ], 
+        ],
         "all_ipv4_ips": [
             "172.22.125.4"
-        ], 
+        ],
         "ambari_server_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "zookeeper_hosts": [
             "c6401.ambari.apache.org"
         ],
         "ranger_kms_server_hosts": [
             "c6401.ambari.apache.org"
         ]
-    }, 
+    },
     "configurations": {
         "ranger-hdfs-audit": {
             "xasecure.audit.destination.solr.zookeepers": "c6401.ambari.apache.org:2181/ranger_audits",
-            "xasecure.audit.destination.solr.urls": "", 
-            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool", 
-            "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool", 
+            "xasecure.audit.destination.solr.urls": "",
+            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool",
+            "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool",
             "xasecure.audit.destination.hdfs": "true",
-            "xasecure.audit.destination.solr": "true", 
-            "xasecure.audit.provider.summary.enabled": "false", 
-            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit", 
+            "xasecure.audit.destination.solr": "true",
+            "xasecure.audit.provider.summary.enabled": "false",
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
             "xasecure.audit.is.enabled": "true"
-        }, 
+        },
         "ssl-client": {
-            "ssl.client.truststore.reload.interval": "10000", 
-            "ssl.client.keystore.password": "bigdata", 
-            "ssl.client.truststore.type": "jks", 
-            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks", 
-            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks", 
-            "ssl.client.truststore.password": "bigdata", 
+            "ssl.client.truststore.reload.interval": "10000",
+            "ssl.client.keystore.password": "bigdata",
+            "ssl.client.truststore.type": "jks",
+            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks",
+            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks",
+            "ssl.client.truststore.password": "bigdata",
             "ssl.client.keystore.type": "jks"
-        }, 
+        },
         "ranger-admin-site": {
-            "ranger.admin.kerberos.cookie.domain": "", 
-            "ranger.kms.service.user.hdfs": "hdfs", 
-            "ranger.spnego.kerberos.principal": "", 
-            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}", 
-            "ranger.plugins.hive.serviceuser": "hive", 
-            "ranger.lookup.kerberos.keytab": "", 
-            "ranger.plugins.kms.serviceuser": "kms", 
-            "ranger.service.https.attrib.ssl.enabled": "false", 
-            "ranger.sso.browser.useragent": "Mozilla,chrome", 
-            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01", 
-            "ranger.plugins.hbase.serviceuser": "hbase", 
-            "ranger.plugins.hdfs.serviceuser": "hdfs", 
-            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}", 
-            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net", 
-            "ranger.plugins.knox.serviceuser": "knox", 
-            "ranger.ldap.base.dn": "dc=example,dc=com", 
-            "ranger.sso.publicKey": "", 
-            "ranger.admin.kerberos.cookie.path": "/", 
-            "ranger.service.https.attrib.clientAuth": "want", 
-            "ranger.jpa.jdbc.user": "{{ranger_db_user}}", 
-            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})", 
-            "ranger.ldap.group.roleattribute": "cn", 
-            "ranger.plugins.kafka.serviceuser": "kafka", 
-            "ranger.admin.kerberos.principal": "", 
-            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
+            "ranger.admin.kerberos.cookie.domain": "",
+            "ranger.kms.service.user.hdfs": "hdfs",
+            "ranger.spnego.kerberos.principal": "",
+            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}",
+            "ranger.plugins.hive.serviceuser": "hive",
+            "ranger.lookup.kerberos.keytab": "",
+            "ranger.plugins.kms.serviceuser": "kms",
+            "ranger.service.https.attrib.ssl.enabled": "false",
+            "ranger.sso.browser.useragent": "Mozilla,chrome",
+            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01",
+            "ranger.plugins.hbase.serviceuser": "hbase",
+            "ranger.plugins.hdfs.serviceuser": "hdfs",
+            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}",
+            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net",
+            "ranger.plugins.knox.serviceuser": "knox",
+            "ranger.ldap.base.dn": "dc=example,dc=com",
+            "ranger.sso.publicKey": "",
+            "ranger.admin.kerberos.cookie.path": "/",
+            "ranger.service.https.attrib.clientAuth": "want",
+            "ranger.jpa.jdbc.user": "{{ranger_db_user}}",
+            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})",
+            "ranger.ldap.group.roleattribute": "cn",
+            "ranger.plugins.kafka.serviceuser": "kafka",
+            "ranger.admin.kerberos.principal": "",
+            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
             "ranger.credential.provider.path": "/etc/ranger/admin/rangeradmin.jceks",
-            "ranger.ldap.referral": "ignore", 
-            "ranger.service.http.port": "6080", 
-            "ranger.ldap.user.searchfilter": "(uid={0})", 
-            "ranger.plugins.atlas.serviceuser": "atlas", 
-            "ranger.truststore.password": "changeit", 
-            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
-            "ranger.audit.solr.password": "NONE", 
-            "ranger.audit.solr.zookeepers": "c6401.ambari.apache.org:2181/ranger_audits", 
-            "ranger.lookup.kerberos.principal": "", 
-            "ranger.service.https.port": "6182", 
-            "ranger.plugins.storm.serviceuser": "storm", 
-            "ranger.externalurl": "{{ranger_external_url}}", 
-            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
-            "ranger.kms.service.user.hive": "", 
-            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
-            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}", 
-            "ranger.service.host": "{{ranger_host}}", 
-            "ranger.service.https.attrib.keystore.keyalias": "rangeradmin", 
-            "ranger.service.https.attrib.keystore.pass": "xasecure", 
-            "ranger.unixauth.remote.login.enabled": "true", 
-            "ranger.jpa.jdbc.credential.alias": "rangeradmin", 
-            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
-            "ranger.audit.solr.username": "ranger_solr", 
-            "ranger.sso.enabled": "false", 
-            "ranger.audit.solr.urls": "", 
-            "ranger.ldap.ad.domain": "", 
-            "ranger.plugins.yarn.serviceuser": "yarn", 
-            "ranger.audit.source.type": "solr", 
-            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
-            "ranger.ldap.url": "{{ranger_ug_ldap_url}}", 
-            "ranger.authentication.method": "UNIX", 
-            "ranger.service.http.enabled": "true", 
-            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}", 
-            "ranger.ldap.ad.referral": "ignore", 
-            "ranger.ldap.ad.base.dn": "dc=example,dc=com", 
-            "ranger.jpa.jdbc.password": "_", 
-            "ranger.spnego.kerberos.keytab": "", 
-            "ranger.sso.providerurl": "", 
-            "ranger.unixauth.service.hostname": "{{ugsync_host}}", 
-            "ranger.admin.kerberos.keytab": "", 
-            "ranger.admin.kerberos.token.valid.seconds": "30", 
-            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver", 
+            "ranger.ldap.referral": "ignore",
+            "ranger.service.http.port": "6080",
+            "ranger.ldap.user.searchfilter": "(uid={0})",
+            "ranger.plugins.atlas.serviceuser": "atlas",
+            "ranger.truststore.password": "changeit",
+            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+            "ranger.audit.solr.password": "NONE",
+            "ranger.audit.solr.zookeepers": "c6401.ambari.apache.org:2181/ranger_audits",
+            "ranger.lookup.kerberos.principal": "",
+            "ranger.service.https.port": "6182",
+            "ranger.plugins.storm.serviceuser": "storm",
+            "ranger.externalurl": "{{ranger_external_url}}",
+            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+            "ranger.kms.service.user.hive": "",
+            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}",
+            "ranger.service.host": "{{ranger_host}}",
+            "ranger.service.https.attrib.keystore.keyalias": "rangeradmin",
+            "ranger.service.https.attrib.keystore.pass": "xasecure",
+            "ranger.unixauth.remote.login.enabled": "true",
+            "ranger.jpa.jdbc.credential.alias": "rangeradmin",
+            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+            "ranger.audit.solr.username": "ranger_solr",
+            "ranger.sso.enabled": "false",
+            "ranger.audit.solr.urls": "",
+            "ranger.ldap.ad.domain": "",
+            "ranger.plugins.yarn.serviceuser": "yarn",
+            "ranger.audit.source.type": "solr",
+            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
+            "ranger.ldap.url": "{{ranger_ug_ldap_url}}",
+            "ranger.authentication.method": "UNIX",
+            "ranger.service.http.enabled": "true",
+            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}",
+            "ranger.ldap.ad.referral": "ignore",
+            "ranger.ldap.ad.base.dn": "dc=example,dc=com",
+            "ranger.jpa.jdbc.password": "_",
+            "ranger.spnego.kerberos.keytab": "",
+            "ranger.sso.providerurl": "",
+            "ranger.unixauth.service.hostname": "{{ugsync_host}}",
+            "ranger.admin.kerberos.keytab": "",
+            "ranger.admin.kerberos.token.valid.seconds": "30",
+            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver",
             "ranger.unixauth.service.port": "5151"
-        }, 
+        },
         "ranger-hdfs-policymgr-ssl": {
-            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks", 
-            "xasecure.policymgr.clientssl.truststore.password": "changeit", 
-            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}", 
-            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks", 
-            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}", 
+            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks",
+            "xasecure.policymgr.clientssl.truststore.password": "changeit",
+            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}",
+            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks",
+            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}",
             "xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
-        }, 
+        },
         "tagsync-application-properties": {
-            "atlas.kafka.entities.group.id": "ranger_entities_consumer", 
-            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181", 
+            "atlas.kafka.entities.group.id": "ranger_entities_consumer",
+            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181",
             "atlas.kafka.bootstrap.servers": "localhost:6667"
-        }, 
+        },
         "ranger-env": {
-            "ranger_solr_shards": "1", 
-            "ranger_solr_config_set": "ranger_audits", 
-            "ranger_user": "ranger", 
-            "xml_configurations_supported": "true", 
-            "ranger-atlas-plugin-enabled": "No", 
-            "ranger-hbase-plugin-enabled": "No", 
-            "ranger-yarn-plugin-enabled": "No", 
-            "bind_anonymous": "false", 
-            "ranger_admin_username": "amb_ranger_admin", 
-            "admin_password": "admin", 
-            "is_solrCloud_enabled": "true", 
-            "ranger-storm-plugin-enabled": "No", 
-            "ranger-hdfs-plugin-enabled": "No", 
-            "ranger_group": "ranger", 
-            "ranger-knox-plugin-enabled": "No", 
-            "ranger_admin_log_dir": "/var/log/ranger/admin", 
-            "ranger-kafka-plugin-enabled": "No", 
-            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306", 
-            "ranger-hive-plugin-enabled": "No", 
-            "xasecure.audit.destination.solr": "true", 
-            "ranger_pid_dir": "/var/run/ranger", 
-            "xasecure.audit.destination.hdfs": "true", 
-            "admin_username": "admin", 
-            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit", 
-            "create_db_dbuser": "true", 
-            "ranger_solr_collection_name": "ranger_audits", 
-            "ranger_admin_password": "P1!qLEQwP24KVlWY", 
+            "ranger_solr_shards": "1",
+            "ranger_solr_config_set": "ranger_audits",
+            "ranger_user": "ranger",
+            "xml_configurations_supported": "true",
+            "ranger-atlas-plugin-enabled": "No",
+            "ranger-hbase-plugin-enabled": "No",
+            "ranger-yarn-plugin-enabled": "No",
+            "bind_anonymous": "false",
+            "ranger_admin_username": "amb_ranger_admin",
+            "admin_password": "admin",
+            "is_solrCloud_enabled": "true",
+            "ranger-storm-plugin-enabled": "No",
+            "ranger-hdfs-plugin-enabled": "No",
+            "ranger_group": "ranger",
+            "ranger-knox-plugin-enabled": "No",
+            "ranger_admin_log_dir": "/var/log/ranger/admin",
+            "ranger-kafka-plugin-enabled": "No",
+            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306",
+            "ranger-hive-plugin-enabled": "No",
+            "xasecure.audit.destination.solr": "true",
+            "ranger_pid_dir": "/var/run/ranger",
+            "xasecure.audit.destination.hdfs": "true",
+            "admin_username": "admin",
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
+            "create_db_dbuser": "true",
+            "ranger_solr_collection_name": "ranger_audits",
+            "ranger_admin_password": "P1!qLEQwP24KVlWY",
             "ranger_usersync_log_dir": "/var/log/ranger/usersync"
-        }, 
+        },
         "usersync-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/usersync.
 log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n"
-        }, 
+        },
         "admin-properties": {
-            "db_user": "rangeradmin01", 
-            "DB_FLAVOR": "MYSQL", 
-            "db_password": "rangeradmin01", 
-            "db_root_user": "root", 
-            "policymgr_external_url": "http://c6401.ambari.apache.org:6080", 
-            "db_name": "ranger01", 
-            "db_host": "c6401.ambari.apache.org", 
-            "db_root_password": "vagrant", 
+            "db_user": "rangeradmin01",
+            "DB_FLAVOR": "MYSQL",
+            "db_password": "rangeradmin01",
+            "db_root_user": "root",
+            "policymgr_external_url": "http://c6401.ambari.apache.org:6080",
+            "db_name": "ranger01",
+            "db_host": "c6401.ambari.apache.org",
+            "db_root_password": "vagrant",
             "SQL_CONNECTOR_JAR": "{{driver_curl_target}}"
-        }, 
+        },
         "ranger-ugsync-site": {
-            "ranger.usersync.ldap.binddn": "", 
-            "ranger.usersync.policymgr.username": "rangerusersync", 
-            "ranger.usersync.policymanager.mockrun": "false", 
-            "ranger.usersync.group.searchbase": "", 
-            "ranger.usersync.ldap.bindalias": "testldapalias", 
-            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks", 
-            "ranger.usersync.port": "5151", 
-            "ranger.usersync.pagedresultssize": "500", 
-            "ranger.usersync.group.memberattributename": "", 
-            "ranger.usersync.kerberos.principal": "", 
-            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder", 
-            "ranger.usersync.ldap.referral": "ignore", 
-            "ranger.usersync.group.searchfilter": "", 
-            "ranger.usersync.ldap.user.objectclass": "person", 
-            "ranger.usersync.logdir": "{{usersync_log_dir}}", 
-            "ranger.usersync.ldap.user.searchfilter": "", 
-            "ranger.usersync.ldap.groupname.caseconversion": "none", 
-            "ranger.usersync.ldap.ldapbindpassword": "", 
-            "ranger.usersync.unix.minUserId": "500", 
-            "ranger.usersync.policymanager.maxrecordsperapicall": "1000", 
-            "ranger.usersync.group.nameattribute": "", 
-            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password", 
-            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks", 
-            "ranger.usersync.user.searchenabled": "false", 
-            "ranger.usersync.group.usermapsyncenabled": "true", 
-            "ranger.usersync.ldap.bindkeystore": "", 
-            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof", 
-            "ranger.usersync.kerberos.keytab": "", 
-            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe", 
-            "ranger.usersync.group.objectclass": "", 
-            "ranger.usersync.ldap.user.searchscope": "sub", 
-            "ranger.usersync.unix.password.file": "/etc/passwd", 
-            "ranger.usersync.ldap.user.nameattribute": "", 
-            "ranger.usersync.pagedresultsenabled": "true", 
-            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}", 
-            "ranger.usersync.group.search.first.enabled": "false", 
-            "ranger.usersync.group.searchenabled": "false", 
-            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder", 
-            "ranger.usersync.ssl": "true", 
-            "ranger.usersync.ldap.url": "", 
-            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org", 
-            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
-            "ranger.usersync.ldap.user.searchbase": "", 
-            "ranger.usersync.ldap.username.caseconversion": "none", 
-            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
-            "ranger.usersync.keystore.password": "UnIx529p", 
-            "ranger.usersync.unix.group.file": "/etc/group", 
-            "ranger.usersync.filesource.file": "/tmp/usergroup.txt", 
-            "ranger.usersync.group.searchscope": "", 
-            "ranger.usersync.truststore.password": "changeit", 
-            "ranger.usersync.enabled": "true", 
-            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000", 
+            "ranger.usersync.ldap.binddn": "",
+            "ranger.usersync.policymgr.username": "rangerusersync",
+            "ranger.usersync.policymanager.mockrun": "false",
+            "ranger.usersync.group.searchbase": "",
+            "ranger.usersync.ldap.bindalias": "testldapalias",
+            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks",
+            "ranger.usersync.port": "5151",
+            "ranger.usersync.pagedresultssize": "500",
+            "ranger.usersync.group.memberattributename": "",
+            "ranger.usersync.kerberos.principal": "",
+            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder",
+            "ranger.usersync.ldap.referral": "ignore",
+            "ranger.usersync.group.searchfilter": "",
+            "ranger.usersync.ldap.user.objectclass": "person",
+            "ranger.usersync.logdir": "{{usersync_log_dir}}",
+            "ranger.usersync.ldap.user.searchfilter": "",
+            "ranger.usersync.ldap.groupname.caseconversion": "none",
+            "ranger.usersync.ldap.ldapbindpassword": "",
+            "ranger.usersync.unix.minUserId": "500",
+            "ranger.usersync.policymanager.maxrecordsperapicall": "1000",
+            "ranger.usersync.group.nameattribute": "",
+            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password",
+            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks",
+            "ranger.usersync.user.searchenabled": "false",
+            "ranger.usersync.group.usermapsyncenabled": "true",
+            "ranger.usersync.ldap.bindkeystore": "",
+            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof",
+            "ranger.usersync.kerberos.keytab": "",
+            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe",
+            "ranger.usersync.group.objectclass": "",
+            "ranger.usersync.ldap.user.searchscope": "sub",
+            "ranger.usersync.unix.password.file": "/etc/passwd",
+            "ranger.usersync.ldap.user.nameattribute": "",
+            "ranger.usersync.pagedresultsenabled": "true",
+            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}",
+            "ranger.usersync.group.search.first.enabled": "false",
+            "ranger.usersync.group.searchenabled": "false",
+            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder",
+            "ranger.usersync.ssl": "true",
+            "ranger.usersync.ldap.url": "",
+            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org",
+            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+            "ranger.usersync.ldap.user.searchbase": "",
+            "ranger.usersync.ldap.username.caseconversion": "none",
+            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+            "ranger.usersync.keystore.password": "UnIx529p",
+            "ranger.usersync.unix.group.file": "/etc/group",
+            "ranger.usersync.filesource.file": "/tmp/usergroup.txt",
+            "ranger.usersync.group.searchscope": "",
+            "ranger.usersync.truststore.password": "changeit",
+            "ranger.usersync.enabled": "true",
+            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000",
             "ranger.usersync.filesource.text.delimiter": ","
-        }, 
+        },
         "hdfs-site": {
-            "dfs.namenode.checkpoint.period": "21600", 
-            "dfs.namenode.avoid.write.stale.datanode": "true", 
-            "dfs.namenode.startup.delay.block.deletion.sec": "3600", 
-            "dfs.namenode.checkpoint.txns": "1000000", 
-            "dfs.content-summary.limit": "5000", 
-            "dfs.support.append": "true", 
-            "dfs.datanode.address": "0.0.0.0:50010", 
-            "dfs.cluster.administrators": " hdfs", 
-            "dfs.namenode.audit.log.async": "true", 
-            "dfs.datanode.balance.bandwidthPerSec": "6250000", 
-            "dfs.namenode.safemode.threshold-pct": "1", 
-            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", 
-            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020", 
-            "dfs.permissions.enabled": "true", 
-            "dfs.client.read.shortcircuit": "true", 
-            "dfs.https.port": "50470", 
-            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470", 
-            "nfs.file.dump.dir": "/tmp/.hdfs-nfs", 
-            "dfs.blocksize": "134217728", 
-            "dfs.blockreport.initialDelay": "120", 
-            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode", 
-            "dfs.namenode.fslock.fair": "false", 
-            "dfs.datanode.max.transfer.threads": "4096", 
-            "dfs.heartbeat.interval": "3", 
-            "dfs.replication": "3", 
-            "dfs.namenode.handler.count": "50", 
-            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary", 
-            "fs.permissions.umask-mode": "022", 
-            "dfs.namenode.stale.datanode.interval": "30000", 
-            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
-            "dfs.datanode.failed.volumes.tolerated": "0", 
-            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data", 
-            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070", 
-            "dfs.webhdfs.enabled": "true", 
-            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding", 
-            "dfs.namenode.accesstime.precision": "0", 
-            "dfs.datanode.https.address": "0.0.0.0:50475", 
-            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
-            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090", 
-            "nfs.exports.allowed.hosts": "* rw", 
-            "dfs.datanode.http.address": "0.0.0.0:50075", 
-            "dfs.datanode.du.reserved": "33011188224", 
-            "dfs.client.read.shortcircuit.streams.cache.size": "4096", 
-            "dfs.http.policy": "HTTP_ONLY", 
-            "dfs.block.access.token.enable": "true", 
-            "dfs.client.retry.policy.enabled": "false", 
-            "dfs.namenode.name.dir.restore": "true", 
-            "dfs.permissions.superusergroup": "hdfs", 
-            "dfs.journalnode.https-address": "0.0.0.0:8481", 
-            "dfs.journalnode.http-address": "0.0.0.0:8480", 
-            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
-            "dfs.namenode.avoid.read.stale.datanode": "true", 
-            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
-            "dfs.datanode.data.dir.perm": "750", 
-            "dfs.encryption.key.provider.uri": "", 
-            "dfs.replication.max": "50", 
+            "dfs.namenode.checkpoint.period": "21600",
+            "dfs.namenode.avoid.write.stale.datanode": "true",
+            "dfs.namenode.startup.delay.block.deletion.sec": "3600",
+            "dfs.namenode.checkpoint.txns": "1000000",
+            "dfs.content-summary.limit": "5000",
+            "dfs.support.append": "true",
+            "dfs.datanode.address": "0.0.0.0:50010",
+            "dfs.cluster.administrators": " hdfs",
+            "dfs.namenode.audit.log.async": "true",
+            "dfs.datanode.balance.bandwidthPerSec": "6250000",
+            "dfs.namenode.safemode.threshold-pct": "1",
+            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
+            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020",
+            "dfs.permissions.enabled": "true",
+            "dfs.client.read.shortcircuit": "true",
+            "dfs.https.port": "50470",
+            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470",
+            "nfs.file.dump.dir": "/tmp/.hdfs-nfs",
+            "dfs.blocksize": "134217728",
+            "dfs.blockreport.initialDelay": "120",
+            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode",
+            "dfs.namenode.fslock.fair": "false",
+            "dfs.datanode.max.transfer.threads": "4096",
+            "dfs.heartbeat.interval": "3",
+            "dfs.replication": "3",
+            "dfs.namenode.handler.count": "50",
+            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary",
+            "fs.permissions.umask-mode": "022",
+            "dfs.namenode.stale.datanode.interval": "30000",
+            "dfs.datanode.ipc.address": "0.0.0.0:8010",
+            "dfs.datanode.failed.volumes.tolerated": "0",
+            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data",
+            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070",
+            "dfs.webhdfs.enabled": "true",
+            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding",
+            "dfs.namenode.accesstime.precision": "0",
+            "dfs.datanode.https.address": "0.0.0.0:50475",
+            "dfs.namenode.write.stale.datanode.ratio": "1.0f",
+            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090",
+            "nfs.exports.allowed.hosts": "* rw",
+            "dfs.datanode.http.address": "0.0.0.0:50075",
+            "dfs.datanode.du.reserved": "33011188224",
+            "dfs.client.read.shortcircuit.streams.cache.size": "4096",
+            "dfs.http.policy": "HTTP_ONLY",
+            "dfs.block.access.token.enable": "true",
+            "dfs.client.retry.policy.enabled": "false",
+            "dfs.namenode.name.dir.restore": "true",
+            "dfs.permissions.superusergroup": "hdfs",
+            "dfs.journalnode.https-address": "0.0.0.0:8481",
+            "dfs.journalnode.http-address": "0.0.0.0:8480",
+            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
+            "dfs.namenode.avoid.read.stale.datanode": "true",
+            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
+            "dfs.datanode.data.dir.perm": "750",
+            "dfs.encryption.key.provider.uri": "",
+            "dfs.replication.max": "50",
             "dfs.namenode.name.dir": "/grid/0/hadoop/hdfs/namenode"
-        }, 
+        },
         "ranger-tagsync-site": {
-            "ranger.tagsync.atlas.to.ranger.service.mapping": "", 
-            "ranger.tagsync.dest.ranger.ssl.config.filename": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks", 
-            "ranger.tagsync.source.file.check.interval.millis": "", 
-            "ranger.tagsync.logdir": "/var/log/ranger/tagsync", 
-            "ranger.tagsync.source.atlasrest.download.interval.millis": "", 
-            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks", 
-            "ranger.tagsync.source.atlasrest.endpoint": "", 
-            "ranger.tagsync.dest.ranger.username": "rangertagsync", 
-            "ranger.tagsync.kerberos.principal": "", 
-            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}", 
-            "ranger.tagsync.atlas.custom.resource.mappers": "", 
-            "ranger.tagsync.kerberos.keytab": "", 
-            "ranger.tagsync.source.atlas": "false", 
-            "ranger.tagsync.source.atlasrest": "false", 
-            "ranger.tagsync.source.file": "false", 
+            "ranger.tagsync.atlas.to.ranger.service.mapping": "",
+            "ranger.tagsync.dest.ranger.ssl.config.filename": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks",
+            "ranger.tagsync.source.file.check.interval.millis": "",
+            "ranger.tagsync.logdir": "/var/log/ranger/tagsync",
+            "ranger.tagsync.source.atlasrest.download.interval.millis": "",
+            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks",
+            "ranger.tagsync.source.atlasrest.endpoint": "",
+            "ranger.tagsync.dest.ranger.username": "rangertagsync",
+            "ranger.tagsync.kerberos.principal": "",
+            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}",
+            "ranger.tagsync.atlas.custom.resource.mappers": "",
+            "ranger.tagsync.kerberos.keytab": "",
+            "ranger.tagsync.source.atlas": "false",
+            "ranger.tagsync.source.atlasrest": "false",
+            "ranger.tagsync.source.file": "false",
             "ranger.tagsync.source.file.filename": ""
-        }, 
+        },
         "zoo.cfg": {
-            "clientPort": "2181", 
-            "autopurge.purgeInterval": "24", 
-            "syncLimit": "5", 
-            "dataDir": "/grid/0/hadoop/zookeeper", 
-            "initLimit": "10", 
-            "tickTime": "2000", 
+            "clientPort": "2181",
+            "autopurge.purgeInterval": "24",
+            "syncLimit": "5",
+            "dataDir": "/grid/0/hadoop/zookeeper",
+            "initLimit": "10",
+            "tickTime": "2000",
             "autopurge.snapRetainCount": "30"
-        }, 
+        },
         "hadoop-policy": {
-            "security.job.client.protocol.acl": "*", 
-            "security.job.task.protocol.acl": "*", 
-            "security.datanode.protocol.acl": "*", 
-            "security.namenode.protocol.acl": "*", 
-            "security.client.datanode.protocol.acl": "*", 
-            "security.inter.tracker.protocol.acl": "*", 
-            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop", 
-            "security.client.protocol.acl": "*", 
-            "security.refresh.policy.protocol.acl": "hadoop", 
-            "security.admin.operations.protocol.acl": "hadoop", 
+            "security.job.client.protocol.acl": "*",
+            "security.job.task.protocol.acl": "*",
+            "security.datanode.protocol.acl": "*",
+            "security.namenode.protocol.acl": "*",
+            "security.client.datanode.protocol.acl": "*",
+            "security.inter.tracker.protocol.acl": "*",
+            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
+            "security.client.protocol.acl": "*",
+            "security.refresh.policy.protocol.acl": "hadoop",
+            "security.admin.operations.protocol.acl": "hadoop",
             "security.inter.datanode.protocol.acl": "*"
-        }, 
+        },
         "hdfs-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.lo
 gger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender
 .console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logg
 er}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=
 false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# NameNode metrics logging.\n# The default is to retain two namenode-metrics.log files up to 64MB each.\n#\nnamenode.metrics.logger=INFO,NullAppender\nlog4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}\nlog4j.additivity.NameNodeMetricsLog=false\nlog4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log\nlog4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n\nlog4j.appender.NNMETRICSRFA.MaxBackupIndex=1\nlog4j.appender.NNMETRICSRFA.MaxFileSize=64MB\n\n#\n# mapred audit logging\n#\nmapred
 .audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging leve
 ls\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateCh
 ange=WARN"
-        }, 
+        },
         "ranger-hdfs-plugin-properties": {
-            "hadoop.rpc.protection": "authentication", 
-            "ranger-hdfs-plugin-enabled": "No", 
-            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
-            "policy_user": "ambari-qa", 
-            "common.name.for.certificate": "", 
+            "hadoop.rpc.protection": "authentication",
+            "ranger-hdfs-plugin-enabled": "No",
+            "REPOSITORY_CONFIG_USERNAME": "hadoop",
+            "policy_user": "ambari-qa",
+            "common.name.for.certificate": "",
             "REPOSITORY_CONFIG_PASSWORD": "hadoop"
-        }, 
+        },
         "core-site": {
-            "hadoop.proxyuser.root.hosts": "*", 
-            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", 
-            "fs.trash.interval": "360", 
-            "ha.failover-controller.active-standby-elector.zk.op.retries": "120", 
-            "hadoop.http.authentication.simple.anonymous.allowed": "true", 
-            "hadoop.security.authentication": "simple", 
-            "hadoop.proxyuser.root.groups": "*", 
-            "ipc.client.connection.maxidletime": "30000", 
-            "hadoop.security.key.provider.path": "", 
-            "mapreduce.jobtracker.webinterface.trusted": "false", 
-            "hadoop.security.authorization": "false", 
-            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py", 
-            "ipc.server.tcpnodelay": "true", 
-            "ipc.client.connect.max.retries": "50", 
-            "hadoop.security.auth_to_local": "DEFAULT", 
-            "io.file.buffer.size": "131072", 
-            "hadoop.proxyuser.hdfs.hosts": "*", 
-            "hadoop.proxyuser.hdfs.groups": "*", 
-            "ipc.client.idlethreshold": "8000", 
-            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020", 
+            "hadoop.proxyuser.root.hosts": "*",
+            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
+            "fs.trash.interval": "360",
+            "ha.failover-controller.active-standby-elector.zk.op.retries": "120",
+            "hadoop.http.authentication.simple.anonymous.allowed": "true",
+            "hadoop.security.authentication": "simple",
+            "hadoop.proxyuser.root.groups": "*",
+            "ipc.client.connection.maxidletime": "30000",
+            "hadoop.security.key.provider.path": "",
+            "mapreduce.jobtracker.webinterface.trusted": "false",
+            "hadoop.security.authorization": "false",
+            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py",
+            "ipc.server.tcpnodelay": "true",
+            "ipc.client.connect.max.retries": "50",
+            "hadoop.security.auth_to_local": "DEFAULT",
+            "io.file.buffer.size": "131072",
+            "hadoop.proxyuser.hdfs.hosts": "*",
+            "hadoop.proxyuser.hdfs.groups": "*",
+            "ipc.client.idlethreshold": "8000",
+            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020",
             "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec"
-        }, 
+        },
         "hadoop-env": {
-            "keyserver_port": "", 
-            "proxyuser_group": "users", 
-            "hdfs_user_nproc_limit": "65536", 
-            "hdfs_log_dir_prefix": "/var/log/hadoop", 
-            "hdfs_user_nofile_limit": "128000", 
-            "hdfs_user": "hdfs", 
-            "keyserver_host": " ", 
-            "namenode_opt_maxnewsize": "128m", 
-            "namenode_opt_maxpermsize": "256m", 
-            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
 {{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
 node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
 NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
 N_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
 ing priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n  for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n  do\n    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n  done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
 ECURE_DN_USER\" ]; then\n  ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}", 
-            "namenode_heapsize": "1024m", 
-            "namenode_opt_newsize": "128m", 
-            "nfsgateway_heapsize": "1024", 
-            "dtnode_heapsize": "1024m", 
-            "hadoop_root_logger": "INFO,RFA", 
-            "hadoop_heapsize": "1024", 
-            "hadoop_pid_dir_prefix": "/var/run/hadoop", 
-            "namenode_opt_permsize": "128m", 
+            "keyserver_port": "",
+            "proxyuser_group": "users",
+            "hdfs_user_nproc_limit": "65536",
+            "hdfs_log_dir_prefix": "/var/log/hadoop",
+            "hdfs_user_nofile_limit": "128000",
+            "hdfs_user": "hdfs",
+            "keyserver_host": " ",
+            "namenode_opt_maxnewsize": "128m",
+            "namenode_opt_maxpermsize": "256m",
+            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
 {{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
 node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
 NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
 N_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
 ing priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n  for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n  do\n    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n  done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
 ECURE_DN_USER\" ]; then\n  ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}",
+            "namenode_heapsize": "1024m",
+            "namenode_opt_newsize": "128m",
+            "nfsgateway_heapsize": "1024",
+            "dtnode_heapsize": "1024m",
+            "hadoop_root_logger": "INFO,RFA",
+            "hadoop_heapsize": "1024",
+            "hadoop_pid_dir_prefix": "/var/run/hadoop",
+            "namenode_opt_permsize": "128m",
             "hdfs_tmp_dir": "/tmp"
-        }, 
+        },
         "zookeeper-log4j": {
             "content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: console appender only\nlog4j.rootLogger=INFO, CONSOLE\n\n# Example with rolling log file\n#log4
 j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE\n\n# Example with rolling log file and tracing\n#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE\n\n#\n# Log INFO level and above messages to the console\n#\nlog4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\nlog4j.appender.CONSOLE.Threshold=INFO\nlog4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n#\n# Add ROLLINGFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender\nlog4j.appender.ROLLINGFILE.Threshold=DEBUG\nlog4j.appender.ROLLINGFILE.File=zookeeper.log\n\n# Max log file size of 10MB\nlog4j.appender.ROLLINGFILE.MaxFileSize=10MB\n# uncomment the next line to limit number of backup files\n#log4j.appender.ROLLINGFILE.MaxBackupIndex=10\n\nlog4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.ROLLIN
 GFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n\n#\n# Add TRACEFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.TRACEFILE=org.apache.log4j.FileAppender\nlog4j.appender.TRACEFILE.Threshold=TRACE\nlog4j.appender.TRACEFILE.File=zookeeper_trace.log\n\nlog4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout\n### Notice we are including log4j's NDC here (%x)\nlog4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n"
-        }, 
+        },
         "ssl-server": {
-            "ssl.server.keystore.location": "/etc/security/serverKeys/keystore.jks", 
-            "ssl.server.keystore.keypassword": "bigdata", 
-            "ssl.server.truststore.location": "/etc/security/serverKeys/all.jks", 
-            "ssl.server.keystore.password": "bigdata", 
-            "ssl.server.truststore.password": "bigdata", 
-            "ssl.server.truststore.type": "jks", 
-            "ssl.server.keystore.type": "jks", 
+            "ssl.server.keystore.location": "/etc/security/serverKeys/keystore.jks",
+            "ssl.server.keystore.keypassword": "bigdata",
+            "ssl.server.truststore.location": "/etc/security/serverKeys/all.jks",
+            "ssl.server.keystore.password": "bigdata",
+            "ssl.server.truststore.password": "bigdata",
+            "ssl.server.truststore.type": "jks",
+            "ssl.server.keystore.type": "jks",
             "ssl.server.truststore.reload.interval": "10000"
-        }, 
-        "ranger-site": {}, 
+        },
+        "ranger-site": {},
         "admin-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nlog4j.rootLogger = warn,xa_log_appender\n\n\n# xa_logger\nlog4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.xa_log_app
 ender.file=${logdir}/xa_portal.log\nlog4j.appender.xa_log_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.xa_log_appender.append=true\nlog4j.appender.xa_log_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.xa_log_appender.layout.ConversionPattern=%d [%t] %-5p %C{6} (%F:%L) - %m%n\n# xa_log_appender : category and additivity\nlog4j.category.org.springframework=warn,xa_log_appender\nlog4j.additivity.org.springframework=false\n\nlog4j.category.org.apache.ranger=info,xa_log_appender\nlog4j.additivity.org.apache.ranger=false\n\nlog4j.category.xa=info,xa_log_appender\nlog4j.additivity.xa=false\n\n# perf_logger\nlog4j.appender.perf_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.perf_appender.file=${logdir}/ranger_admin_perf.log\nlog4j.appender.perf_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.perf_appender.append=true\nlog4j.appender.perf_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.perf_appender.layout.ConversionPattern=%d [%t] 
 %m%n\n\n\n# sql_appender\nlog4j.appender.sql_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.sql_appender.file=${logdir}/xa_portal_sql.log\nlog4j.appender.sql_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.sql_appender.append=true\nlog4j.appender.sql_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.sql_appender.layout.ConversionPattern=%d [%t] %-5p %C{6} (%F:%L) - %m%n\n\n# sql_appender : category and additivity\nlog4j.category.org.hibernate.SQL=warn,sql_appender\nlog4j.additivity.org.hibernate.SQL=false\n\nlog4j.category.jdbc.sqlonly=fatal,sql_appender\nlog4j.additivity.jdbc.sqlonly=false\n\nlog4j.category.jdbc.sqltiming=warn,sql_appender\nlog4j.additivity.jdbc.sqltiming=false\n\nlog4j.category.jdbc.audit=fatal,sql_appender\nlog4j.additivity.jdbc.audit=false\n\nlog4j.category.jdbc.resultset=fatal,sql_appender\nlog4j.additivity.jdbc.resultset=false\n\nlog4j.category.jdbc.connection=fatal,sql_appender\nlog4j.additivity.jdbc.connection=false"
-        }, 
+        },
         "tagsync-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/tagsync
 .log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n"
-        }, 
+        },
         "ranger-hdfs-security": {
-            "ranger.plugin.hdfs.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient", 
-            "ranger.plugin.hdfs.service.name": "{{repo_name}}", 
-            "ranger.plugin.hdfs.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache", 
-            "ranger.plugin.hdfs.policy.pollIntervalMs": "30000", 
-            "ranger.plugin.hdfs.policy.rest.url": "{{policymgr_mgr_url}}", 
-            "ranger.plugin.hdfs.policy.rest.ssl.config.file": "/etc/hadoop/conf/ranger-policymgr-ssl.xml", 
+            "ranger.plugin.hdfs.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient",
+            "ranger.plugin.hdfs.service.name": "{{repo_name}}",
+            "ranger.plugin.hdfs.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache",
+            "ranger.plugin.hdfs.policy.pollIntervalMs": "30000",
+            "ranger.plugin.hdfs.policy.rest.url": "{{policymgr_mgr_url}}",
+            "ranger.plugin.hdfs.policy.rest.ssl.config.file": "/etc/hadoop/conf/ranger-policymgr-ssl.xml",
             "xasecure.add-hadoop-authorization": "true"
-        }, 
-        "usersync-properties": {}, 
+        },
+        "usersync-properties": {},
         "zookeeper-env": {
-            "zk_log_dir": "/var/log/zookeeper", 
-            "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}", 
-            "zk_server_heapsize": "1024m", 
-            "zk_pid_dir": "/var/run/zookeeper", 
+            "zk_log_dir": "/var/log/zookeeper",
+            "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk

<TRUNCATED>

[02/10] ambari git commit: AMBARI-21430 - Allow Multiple Versions of Stack Tools to Co-Exist (jonathanhurley)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-secured.json b/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-secured.json
index fa791c1..64e7d52 100644
--- a/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-secured.json
+++ b/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-secured.json
@@ -1,101 +1,101 @@
 {
     "localComponents": [
-        "SECONDARY_NAMENODE", 
-        "HDFS_CLIENT", 
-        "DATANODE", 
-        "NAMENODE", 
-        "RANGER_ADMIN", 
-        "RANGER_TAGSYNC", 
-        "RANGER_USERSYNC", 
-        "ZOOKEEPER_SERVER", 
-        "ZOOKEEPER_CLIENT", 
+        "SECONDARY_NAMENODE",
+        "HDFS_CLIENT",
+        "DATANODE",
+        "NAMENODE",
+        "RANGER_ADMIN",
+        "RANGER_TAGSYNC",
+        "RANGER_USERSYNC",
+        "ZOOKEEPER_SERVER",
+        "ZOOKEEPER_CLIENT",
         "KERBEROS_CLIENT",
         "LOGSEARCH_SOLR",
         "LOGSEARCH_SOLR_CLIENT"
-    ], 
+    ],
     "configuration_attributes": {
-        "ranger-hdfs-audit": {}, 
-        "ssl-client": {}, 
-        "ranger-admin-site": {}, 
-        "ranger-hdfs-policymgr-ssl": {}, 
-        "tagsync-application-properties": {}, 
-        "ranger-env": {}, 
-        "usersync-log4j": {}, 
-        "ranger-hdfs-plugin-properties": {}, 
-        "kerberos-env": {}, 
-        "admin-properties": {}, 
-        "ranger-ugsync-site": {}, 
+        "ranger-hdfs-audit": {},
+        "ssl-client": {},
+        "ranger-admin-site": {},
+        "ranger-hdfs-policymgr-ssl": {},
+        "tagsync-application-properties": {},
+        "ranger-env": {},
+        "usersync-log4j": {},
+        "ranger-hdfs-plugin-properties": {},
+        "kerberos-env": {},
+        "admin-properties": {},
+        "ranger-ugsync-site": {},
         "hdfs-site": {
             "final": {
-                "dfs.datanode.data.dir": "true", 
-                "dfs.namenode.http-address": "true", 
-                "dfs.datanode.failed.volumes.tolerated": "true", 
-                "dfs.support.append": "true", 
-                "dfs.namenode.name.dir": "true", 
+                "dfs.datanode.data.dir": "true",
+                "dfs.namenode.http-address": "true",
+                "dfs.datanode.failed.volumes.tolerated": "true",
+                "dfs.support.append": "true",
+                "dfs.namenode.name.dir": "true",
                 "dfs.webhdfs.enabled": "true"
             }
-        }, 
+        },
         "ranger-tagsync-site": {},
-        "ranger-tagsync-policymgr-ssl": {}, 
+        "ranger-tagsync-policymgr-ssl": {},
         "zoo.cfg": {},
         "hadoop-policy": {},
-        "hdfs-log4j": {}, 
-        "krb5-conf": {}, 
+        "hdfs-log4j": {},
+        "krb5-conf": {},
         "core-site": {
             "final": {
                 "fs.defaultFS": "true"
             }
-        }, 
-        "hadoop-env": {}, 
-        "zookeeper-log4j": {}, 
-        "ssl-server": {}, 
-        "ranger-site": {}, 
-        "admin-log4j": {}, 
-        "tagsync-log4j": {}, 
-        "ranger-hdfs-security": {}, 
+        },
+        "hadoop-env": {},
+        "zookeeper-log4j": {},
+        "ssl-server": {},
+        "ranger-site": {},
+        "admin-log4j": {},
+        "tagsync-log4j": {},
+        "ranger-hdfs-security": {},
         "ranger-solr-configuration": {},
         "usersync-properties": {},
         "zookeeper-env": {},
         "infra-solr-env": {},
         "infra-solr-client-log4j": {},
         "cluster-env": {}
-    }, 
-    "public_hostname": "c6401.ambari.apache.org", 
-    "commandId": "41-2", 
-    "hostname": "c6401.ambari.apache.org", 
-    "kerberosCommandParams": [], 
-    "serviceName": "RANGER", 
-    "role": "RANGER_ADMIN", 
-    "forceRefreshConfigTagsBeforeExecution": [], 
-    "requestId": 41, 
+    },
+    "public_hostname": "c6401.ambari.apache.org",
+    "commandId": "41-2",
+    "hostname": "c6401.ambari.apache.org",
+    "kerberosCommandParams": [],
+    "serviceName": "RANGER",
+    "role": "RANGER_ADMIN",
+    "forceRefreshConfigTagsBeforeExecution": [],
+    "requestId": 41,
     "agentConfigParams": {
         "agent": {
             "parallel_execution": 0
         }
-    }, 
-    "clusterName": "test_Cluster01", 
-    "commandType": "EXECUTION_COMMAND", 
-    "taskId": 186, 
-    "roleParams": {}, 
+    },
+    "clusterName": "test_Cluster01",
+    "commandType": "EXECUTION_COMMAND",
+    "taskId": 186,
+    "roleParams": {},
     "configurationTags": {
         "ranger-hdfs-audit": {
             "tag": "version1466705299922"
-        }, 
+        },
         "ssl-client": {
             "tag": "version1"
-        }, 
+        },
         "ranger-admin-site": {
             "tag": "version1467016680635"
-        }, 
+        },
         "ranger-hdfs-policymgr-ssl": {
             "tag": "version1466705299922"
-        }, 
+        },
         "tagsync-application-properties": {
             "tag": "version1467016680511"
-        }, 
+        },
         "ranger-env": {
             "tag": "version1466705299949"
-        }, 
+        },
         "ranger-ugsync-site": {
             "tag": "version1467016680537"
         },
@@ -104,52 +104,52 @@
         },
         "ranger-hdfs-plugin-properties": {
             "tag": "version1466705299922"
-        }, 
+        },
         "kerberos-env": {
             "tag": "version1467016537243"
-        }, 
+        },
         "admin-properties": {
             "tag": "version1466705299949"
-        }, 
+        },
         "hdfs-site": {
             "tag": "version1467016680401"
-        }, 
+        },
         "ranger-tagsync-site": {
             "tag": "version1467016680586"
-        }, 
+        },
         "zoo.cfg": {
             "tag": "version1"
-        }, 
+        },
         "hadoop-policy": {
             "tag": "version1"
-        }, 
+        },
         "hdfs-log4j": {
             "tag": "version1"
-        }, 
+        },
         "usersync-log4j": {
             "tag": "version1466705299949"
-        }, 
+        },
         "krb5-conf": {
             "tag": "version1467016537243"
-        }, 
+        },
         "core-site": {
             "tag": "version1467016680612"
-        }, 
+        },
         "hadoop-env": {
             "tag": "version1467016680446"
-        }, 
+        },
         "zookeeper-log4j": {
             "tag": "version1"
-        }, 
+        },
         "ssl-server": {
             "tag": "version1"
-        }, 
+        },
         "ranger-site": {
             "tag": "version1466705299949"
-        }, 
+        },
         "admin-log4j": {
             "tag": "version1466705299949"
-        }, 
+        },
         "tagsync-log4j": {
             "tag": "version1466705299949"
         },
@@ -158,10 +158,10 @@
         },
         "ranger-hdfs-security": {
             "tag": "version1466705299922"
-        }, 
+        },
         "usersync-properties": {
             "tag": "version1466705299949"
-        }, 
+        },
         "zookeeper-env": {
             "tag": "version1467016680492"
         },
@@ -174,116 +174,116 @@
         "cluster-env": {
             "tag": "version1467016680567"
         }
-    }, 
-    "roleCommand": "START", 
+    },
+    "roleCommand": "START",
     "hostLevelParams": {
-        "agent_stack_retry_on_unavailability": "false", 
-        "stack_name": "HDP", 
+        "agent_stack_retry_on_unavailability": "false",
+        "stack_name": "HDP",
         "package_version": "2_6_0_0_*",
         "custom_mysql_jdbc_name": "mysql-connector-java.jar",
         "previous_custom_mysql_jdbc_name": "mysql-connector-java-old.jar",
-        "host_sys_prepped": "false", 
-        "ambari_db_rca_username": "mapred", 
+        "host_sys_prepped": "false",
+        "ambari_db_rca_username": "mapred",
         "current_version": "2.6.0.0-801",
         "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar",
         "agent_stack_retry_count": "5",
         "stack_version": "2.6",
-        "jdk_name": "jdk-8u60-linux-x64.tar.gz", 
-        "ambari_db_rca_driver": "org.postgresql.Driver", 
+        "jdk_name": "jdk-8u60-linux-x64.tar.gz",
+        "ambari_db_rca_driver": "org.postgresql.Driver",
         "java_home": "/usr/jdk64/jdk1.7.0_45",
         "repository_version_id": "1",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
-        "not_managed_hdfs_path_list": "[\"/tmp\"]", 
-        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
-        "java_version": "8", 
+        "not_managed_hdfs_path_list": "[\"/tmp\"]",
+        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
+        "java_version": "8",
         "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.6.0.0-801\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.6\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.6.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.6.0.0-801\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]",
         "package_list": "[{\"name\":\"ranger_${stack_version}-admin\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-usersync\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-tagsync\",\"condition\":\"should_install_ranger_tagsync\",\"skipUpgrade\":false},{\"name\":\"ambari-logsearch-solr-client\",\"condition\":\"should_install_logsearch_solr_client\",\"skipUpgrade\":false}]",
         "db_name": "ambari",
-        "group_list": "[\"ranger\",\"hadoop\",\"users\"]", 
-        "agentCacheDir": "/var/lib/ambari-agent/cache", 
-        "ambari_db_rca_password": "mapred", 
-        "jce_name": "jce_policy-8.zip", 
-        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar", 
-        "db_driver_filename": "mysql-connector-java.jar", 
-        "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]", 
+        "group_list": "[\"ranger\",\"hadoop\",\"users\"]",
+        "agentCacheDir": "/var/lib/ambari-agent/cache",
+        "ambari_db_rca_password": "mapred",
+        "jce_name": "jce_policy-8.zip",
+        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
+        "db_driver_filename": "mysql-connector-java.jar",
+        "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]",
         "clientsToUpdateConfigs": "[\"*\"]"
-    }, 
+    },
     "commandParams": {
-        "service_package_folder": "common-services/RANGER/0.4.0/package", 
-        "script": "scripts/ranger_admin.py", 
-        "hooks_folder": "HDP/2.0.6/hooks", 
+        "service_package_folder": "common-services/RANGER/0.4.0/package",
+        "script": "scripts/ranger_admin.py",
+        "hooks_folder": "HDP/2.0.6/hooks",
         "version": "2.6.0.0-801",
         "max_duration_for_retries": "0",
         "command_retry_enabled": "false",
-        "command_timeout": "600", 
+        "command_timeout": "600",
         "script_type": "PYTHON"
-    }, 
-    "forceRefreshConfigTags": [], 
-    "stageId": 2, 
+    },
+    "forceRefreshConfigTags": [],
+    "stageId": 2,
     "clusterHostInfo": {
         "snamenode_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ambari_server_use_ssl": [
             "false"
-        ], 
+        ],
         "all_ping_ports": [
             "8670"
-        ], 
+        ],
         "ranger_tagsync_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ranger_usersync_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "all_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "slave_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "namenode_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ambari_server_port": [
             "8080"
-        ], 
+        ],
         "ranger_admin_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "all_racks": [
             "/default-rack"
-        ], 
+        ],
         "all_ipv4_ips": [
             "172.22.83.73"
-        ], 
+        ],
         "ambari_server_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "zookeeper_hosts": [
             "c6401.ambari.apache.org"
         ],
         "infra_solr_hosts": [
             "c6401.ambari.apache.org"
         ]
-    }, 
+    },
     "configurations": {
         "ranger-hdfs-audit": {
-            "xasecure.audit.destination.solr.zookeepers": "NONE", 
-            "xasecure.audit.destination.solr.urls": "", 
-            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool", 
+            "xasecure.audit.destination.solr.zookeepers": "NONE",
+            "xasecure.audit.destination.solr.urls": "",
+            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool",
             "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool",
-            "xasecure.audit.destination.hdfs": "true", 
+            "xasecure.audit.destination.hdfs": "true",
             "xasecure.audit.destination.solr": "false",
-            "xasecure.audit.provider.summary.enabled": "false", 
-            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit", 
+            "xasecure.audit.provider.summary.enabled": "false",
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
             "xasecure.audit.is.enabled": "true"
         },
         "ranger-tagsync-policymgr-ssl": {
-            "xasecure.policymgr.clientssl.keystore": "/etc/security/serverKeys/ranger-tagsync-keystore.jks", 
-            "xasecure.policymgr.clientssl.truststore.password": "changeit", 
+            "xasecure.policymgr.clientssl.keystore": "/etc/security/serverKeys/ranger-tagsync-keystore.jks",
+            "xasecure.policymgr.clientssl.truststore.password": "changeit",
             "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{ranger_tagsync_credential_file}}",
-            "xasecure.policymgr.clientssl.truststore": "/etc/security/serverKeys/ranger-tagsync-mytruststore.jks", 
+            "xasecure.policymgr.clientssl.truststore": "/etc/security/serverKeys/ranger-tagsync-mytruststore.jks",
             "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{ranger_tagsync_credential_file}}",
             "xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
         },
@@ -296,186 +296,186 @@
             "xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
         },
         "ssl-client": {
-            "ssl.client.truststore.reload.interval": "10000", 
-            "ssl.client.keystore.password": "bigdata", 
-            "ssl.client.truststore.type": "jks", 
-            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks", 
-            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks", 
-            "ssl.client.truststore.password": "bigdata", 
+            "ssl.client.truststore.reload.interval": "10000",
+            "ssl.client.keystore.password": "bigdata",
+            "ssl.client.truststore.type": "jks",
+            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks",
+            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks",
+            "ssl.client.truststore.password": "bigdata",
             "ssl.client.keystore.type": "jks"
-        }, 
+        },
         "ranger-admin-site": {
             "ranger.is.solr.kerberised": "true",
-            "ranger.admin.kerberos.cookie.domain": "{{ranger_host}}", 
-            "ranger.kms.service.user.hdfs": "hdfs", 
-            "ranger.spnego.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}", 
-            "ranger.plugins.hive.serviceuser": "hive", 
-            "ranger.lookup.kerberos.keytab": "/etc/security/keytabs/rangerlookup.service.keytab", 
-            "ranger.plugins.kms.serviceuser": "kms", 
-            "ranger.service.https.attrib.ssl.enabled": "false", 
-            "ranger.sso.browser.useragent": "Mozilla,chrome", 
-            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01", 
-            "ranger.plugins.hbase.serviceuser": "hbase", 
-            "ranger.plugins.hdfs.serviceuser": "hdfs", 
-            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}", 
-            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net", 
-            "ranger.plugins.knox.serviceuser": "knox", 
-            "ranger.ldap.base.dn": "dc=example,dc=com", 
-            "ranger.sso.publicKey": "", 
-            "ranger.admin.kerberos.cookie.path": "/", 
-            "ranger.service.https.attrib.clientAuth": "want", 
-            "ranger.jpa.jdbc.user": "{{ranger_db_user}}", 
-            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})", 
-            "ranger.ldap.group.roleattribute": "cn", 
-            "ranger.plugins.kafka.serviceuser": "kafka", 
-            "ranger.admin.kerberos.principal": "rangeradmin/_HOST@EXAMPLE.COM", 
-            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
+            "ranger.admin.kerberos.cookie.domain": "{{ranger_host}}",
+            "ranger.kms.service.user.hdfs": "hdfs",
+            "ranger.spnego.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
+            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}",
+            "ranger.plugins.hive.serviceuser": "hive",
+            "ranger.lookup.kerberos.keytab": "/etc/security/keytabs/rangerlookup.service.keytab",
+            "ranger.plugins.kms.serviceuser": "kms",
+            "ranger.service.https.attrib.ssl.enabled": "false",
+            "ranger.sso.browser.useragent": "Mozilla,chrome",
+            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01",
+            "ranger.plugins.hbase.serviceuser": "hbase",
+            "ranger.plugins.hdfs.serviceuser": "hdfs",
+            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}",
+            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net",
+            "ranger.plugins.knox.serviceuser": "knox",
+            "ranger.ldap.base.dn": "dc=example,dc=com",
+            "ranger.sso.publicKey": "",
+            "ranger.admin.kerberos.cookie.path": "/",
+            "ranger.service.https.attrib.clientAuth": "want",
+            "ranger.jpa.jdbc.user": "{{ranger_db_user}}",
+            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})",
+            "ranger.ldap.group.roleattribute": "cn",
+            "ranger.plugins.kafka.serviceuser": "kafka",
+            "ranger.admin.kerberos.principal": "rangeradmin/_HOST@EXAMPLE.COM",
+            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
             "ranger.credential.provider.path": "/etc/ranger/admin/rangeradmin.jceks",
-            "ranger.ldap.referral": "ignore", 
-            "ranger.service.http.port": "6080", 
-            "ranger.ldap.user.searchfilter": "(uid={0})", 
-            "ranger.plugins.atlas.serviceuser": "atlas", 
-            "ranger.truststore.password": "changeit", 
-            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
-            "ranger.audit.solr.password": "NONE", 
+            "ranger.ldap.referral": "ignore",
+            "ranger.service.http.port": "6080",
+            "ranger.ldap.user.searchfilter": "(uid={0})",
+            "ranger.plugins.atlas.serviceuser": "atlas",
+            "ranger.truststore.password": "changeit",
+            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+            "ranger.audit.solr.password": "NONE",
             "ranger.audit.solr.zookeepers": "c6401.ambari.apache.org:2181/infra-solr",
             "ranger.lookup.kerberos.principal": "rangerlookup/_HOST@EXAMPLE.COM",
-            "ranger.service.https.port": "6182", 
-            "ranger.plugins.storm.serviceuser": "storm", 
-            "ranger.externalurl": "{{ranger_external_url}}", 
-            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
-            "ranger.kms.service.user.hive": "", 
-            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
-            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}", 
-            "ranger.service.host": "{{ranger_host}}", 
-            "ranger.service.https.attrib.keystore.keyalias": "rangeradmin", 
-            "ranger.service.https.attrib.keystore.pass": "xasecure", 
-            "ranger.unixauth.remote.login.enabled": "true", 
+            "ranger.service.https.port": "6182",
+            "ranger.plugins.storm.serviceuser": "storm",
+            "ranger.externalurl": "{{ranger_external_url}}",
+            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+            "ranger.kms.service.user.hive": "",
+            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}",
+            "ranger.service.host": "{{ranger_host}}",
+            "ranger.service.https.attrib.keystore.keyalias": "rangeradmin",
+            "ranger.service.https.attrib.keystore.pass": "xasecure",
+            "ranger.unixauth.remote.login.enabled": "true",
             "ranger.jpa.jdbc.credential.alias": "rangeradmin",
-            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
-            "ranger.audit.solr.username": "ranger_solr", 
-            "ranger.sso.enabled": "false", 
-            "ranger.audit.solr.urls": "", 
-            "ranger.ldap.ad.domain": "", 
-            "ranger.plugins.yarn.serviceuser": "yarn", 
-            "ranger.audit.source.type": "solr", 
-            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
-            "ranger.ldap.url": "{{ranger_ug_ldap_url}}", 
-            "ranger.authentication.method": "UNIX", 
-            "ranger.service.http.enabled": "true", 
-            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}", 
-            "ranger.ldap.ad.referral": "ignore", 
-            "ranger.ldap.ad.base.dn": "dc=example,dc=com", 
-            "ranger.jpa.jdbc.password": "_", 
-            "ranger.spnego.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
-            "ranger.sso.providerurl": "", 
-            "ranger.unixauth.service.hostname": "{{ugsync_host}}", 
-            "ranger.admin.kerberos.keytab": "/etc/security/keytabs/rangeradmin.service.keytab", 
-            "ranger.admin.kerberos.token.valid.seconds": "30", 
-            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver", 
+            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+            "ranger.audit.solr.username": "ranger_solr",
+            "ranger.sso.enabled": "false",
+            "ranger.audit.solr.urls": "",
+            "ranger.ldap.ad.domain": "",
+            "ranger.plugins.yarn.serviceuser": "yarn",
+            "ranger.audit.source.type": "solr",
+            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
+            "ranger.ldap.url": "{{ranger_ug_ldap_url}}",
+            "ranger.authentication.method": "UNIX",
+            "ranger.service.http.enabled": "true",
+            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}",
+            "ranger.ldap.ad.referral": "ignore",
+            "ranger.ldap.ad.base.dn": "dc=example,dc=com",
+            "ranger.jpa.jdbc.password": "_",
+            "ranger.spnego.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+            "ranger.sso.providerurl": "",
+            "ranger.unixauth.service.hostname": "{{ugsync_host}}",
+            "ranger.admin.kerberos.keytab": "/etc/security/keytabs/rangeradmin.service.keytab",
+            "ranger.admin.kerberos.token.valid.seconds": "30",
+            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver",
             "ranger.unixauth.service.port": "5151"
-        }, 
+        },
         "ranger-hdfs-policymgr-ssl": {
-            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks", 
-            "xasecure.policymgr.clientssl.truststore.password": "changeit", 
-            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}", 
-            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks", 
-            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}", 
+            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks",
+            "xasecure.policymgr.clientssl.truststore.password": "changeit",
+            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}",
+            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks",
+            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}",
             "xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
-        }, 
+        },
         "tagsync-application-properties": {
-            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181", 
-            "atlas.kafka.security.protocol": "SASL_PLAINTEXT", 
-            "atlas.jaas.KafkaClient.option.principal": "{{tagsync_jaas_principal}}", 
-            "atlas.jaas.KafkaClient.option.keyTab": "{{tagsync_keytab_path}}", 
-            "atlas.kafka.entities.group.id": "ranger_entities_consumer", 
-            "atlas.jaas.KafkaClient.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule", 
-            "atlas.jaas.KafkaClient.option.serviceName": "kafka", 
-            "atlas.kafka.bootstrap.servers": "localhost:6667", 
-            "atlas.jaas.KafkaClient.option.useKeyTab": "true", 
-            "atlas.jaas.KafkaClient.option.storeKey": "true", 
-            "atlas.jaas.KafkaClient.loginModuleControlFlag": "required", 
+            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181",
+            "atlas.kafka.security.protocol": "SASL_PLAINTEXT",
+            "atlas.jaas.KafkaClient.option.principal": "{{tagsync_jaas_principal}}",
+            "atlas.jaas.KafkaClient.option.keyTab": "{{tagsync_keytab_path}}",
+            "atlas.kafka.entities.group.id": "ranger_entities_consumer",
+            "atlas.jaas.KafkaClient.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "atlas.jaas.KafkaClient.option.serviceName": "kafka",
+            "atlas.kafka.bootstrap.servers": "localhost:6667",
+            "atlas.jaas.KafkaClient.option.useKeyTab": "true",
+            "atlas.jaas.KafkaClient.option.storeKey": "true",
+            "atlas.jaas.KafkaClient.loginModuleControlFlag": "required",
             "atlas.kafka.sasl.kerberos.service.name": "kafka"
-        }, 
+        },
         "ranger-env": {
-            "ranger_solr_shards": "1", 
-            "ranger_solr_config_set": "ranger_audits", 
-            "ranger_user": "ranger", 
+            "ranger_solr_shards": "1",
+            "ranger_solr_config_set": "ranger_audits",
+            "ranger_user": "ranger",
             "ranger_solr_replication_factor": "1",
-            "xml_configurations_supported": "true", 
-            "ranger-atlas-plugin-enabled": "No", 
-            "ranger-hbase-plugin-enabled": "No", 
-            "ranger-yarn-plugin-enabled": "No", 
-            "bind_anonymous": "false", 
-            "ranger_admin_username": "amb_ranger_admin", 
-            "admin_password": "admin", 
-            "is_solrCloud_enabled": "true", 
-            "ranger-storm-plugin-enabled": "No", 
-            "ranger-hdfs-plugin-enabled": "No", 
-            "ranger_group": "ranger", 
-            "ranger-knox-plugin-enabled": "No", 
-            "ranger_admin_log_dir": "/var/log/ranger/admin", 
-            "ranger-kafka-plugin-enabled": "No", 
-            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306", 
-            "ranger-hive-plugin-enabled": "No", 
-            "xasecure.audit.destination.solr": "true", 
-            "ranger_pid_dir": "/var/run/ranger", 
-            "xasecure.audit.destination.hdfs": "true", 
-            "admin_username": "admin", 
-            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit", 
-            "create_db_dbuser": "true", 
-            "ranger_solr_collection_name": "ranger_audits", 
-            "ranger_admin_password": "P1!q9xa96SMi5NCl", 
+            "xml_configurations_supported": "true",
+            "ranger-atlas-plugin-enabled": "No",
+            "ranger-hbase-plugin-enabled": "No",
+            "ranger-yarn-plugin-enabled": "No",
+            "bind_anonymous": "false",
+            "ranger_admin_username": "amb_ranger_admin",
+            "admin_password": "admin",
+            "is_solrCloud_enabled": "true",
+            "ranger-storm-plugin-enabled": "No",
+            "ranger-hdfs-plugin-enabled": "No",
+            "ranger_group": "ranger",
+            "ranger-knox-plugin-enabled": "No",
+            "ranger_admin_log_dir": "/var/log/ranger/admin",
+            "ranger-kafka-plugin-enabled": "No",
+            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306",
+            "ranger-hive-plugin-enabled": "No",
+            "xasecure.audit.destination.solr": "true",
+            "ranger_pid_dir": "/var/run/ranger",
+            "xasecure.audit.destination.hdfs": "true",
+            "admin_username": "admin",
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
+            "create_db_dbuser": "true",
+            "ranger_solr_collection_name": "ranger_audits",
+            "ranger_admin_password": "P1!q9xa96SMi5NCl",
             "ranger_usersync_log_dir": "/var/log/ranger/usersync"
-        }, 
+        },
         "usersync-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/usersync.
 log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n"
-        }, 
+        },
         "ranger-hdfs-plugin-properties": {
-            "hadoop.rpc.protection": "authentication", 
-            "ranger-hdfs-plugin-enabled": "No", 
-            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
-            "policy_user": "ambari-qa", 
-            "common.name.for.certificate": "", 
+            "hadoop.rpc.protection": "authentication",
+            "ranger-hdfs-plugin-enabled": "No",
+            "REPOSITORY_CONFIG_USERNAME": "hadoop",
+            "policy_user": "ambari-qa",
+            "common.name.for.certificate": "",
             "REPOSITORY_CONFIG_PASSWORD": "hadoop"
-        }, 
+        },
         "kerberos-env": {
-            "kdc_hosts": "c6401.ambari.apache.org", 
-            "manage_auth_to_local": "true", 
-            "install_packages": "true", 
-            "realm": "EXAMPLE.COM", 
-            "encryption_types": "aes des3-cbc-sha1 rc4 des-cbc-md5", 
-            "ad_create_attributes_template": "\n{\n  \"objectClass\": [\"top\", \"person\", \"organizationalPerson\", \"user\"],\n  \"cn\": \"$principal_name\",\n  #if( $is_service )\n  \"servicePrincipalName\": \"$principal_name\",\n  #end\n  \"userPrincipalName\": \"$normalized_principal\",\n  \"unicodePwd\": \"$password\",\n  \"accountExpires\": \"0\",\n  \"userAccountControl\": \"66048\"\n}", 
-            "kdc_create_attributes": "", 
-            "admin_server_host": "c6401.ambari.apache.org", 
-            "group": "ambari-managed-principals", 
-            "password_length": "20", 
-            "ldap_url": "", 
-            "manage_identities": "true", 
-            "password_min_lowercase_letters": "1", 
-            "create_ambari_principal": "true", 
-            "service_check_principal_name": "${cluster_name|toLower()}-${short_date}", 
-            "executable_search_paths": "/usr/bin, /usr/kerberos/bin, /usr/sbin, /usr/lib/mit/bin, /usr/lib/mit/sbin", 
-            "password_chat_timeout": "5", 
-            "kdc_type": "mit-kdc", 
-            "set_password_expiry": "false", 
-            "password_min_punctuation": "1", 
-            "container_dn": "", 
-            "case_insensitive_username_rules": "false", 
-            "password_min_whitespace": "0", 
-            "password_min_uppercase_letters": "1", 
+            "kdc_hosts": "c6401.ambari.apache.org",
+            "manage_auth_to_local": "true",
+            "install_packages": "true",
+            "realm": "EXAMPLE.COM",
+            "encryption_types": "aes des3-cbc-sha1 rc4 des-cbc-md5",
+            "ad_create_attributes_template": "\n{\n  \"objectClass\": [\"top\", \"person\", \"organizationalPerson\", \"user\"],\n  \"cn\": \"$principal_name\",\n  #if( $is_service )\n  \"servicePrincipalName\": \"$principal_name\",\n  #end\n  \"userPrincipalName\": \"$normalized_principal\",\n  \"unicodePwd\": \"$password\",\n  \"accountExpires\": \"0\",\n  \"userAccountControl\": \"66048\"\n}",
+            "kdc_create_attributes": "",
+            "admin_server_host": "c6401.ambari.apache.org",
+            "group": "ambari-managed-principals",
+            "password_length": "20",
+            "ldap_url": "",
+            "manage_identities": "true",
+            "password_min_lowercase_letters": "1",
+            "create_ambari_principal": "true",
+            "service_check_principal_name": "${cluster_name|toLower()}-${short_date}",
+            "executable_search_paths": "/usr/bin, /usr/kerberos/bin, /usr/sbin, /usr/lib/mit/bin, /usr/lib/mit/sbin",
+            "password_chat_timeout": "5",
+            "kdc_type": "mit-kdc",
+            "set_password_expiry": "false",
+            "password_min_punctuation": "1",
+            "container_dn": "",
+            "case_insensitive_username_rules": "false",
+            "password_min_whitespace": "0",
+            "password_min_uppercase_letters": "1",
             "password_min_digits": "1"
-        }, 
+        },
         "admin-properties": {
-            "db_user": "rangeradmin01", 
-            "DB_FLAVOR": "MYSQL", 
-            "db_password": "rangeradmin01", 
-            "db_root_user": "root", 
-            "policymgr_external_url": "http://c6401.ambari.apache.org:6080", 
-            "db_name": "ranger01", 
-            "db_host": "c6401.ambari.apache.org", 
-            "db_root_password": "vagrant", 
+            "db_user": "rangeradmin01",
+            "DB_FLAVOR": "MYSQL",
+            "db_password": "rangeradmin01",
+            "db_root_user": "root",
+            "policymgr_external_url": "http://c6401.ambari.apache.org:6080",
+            "db_name": "ranger01",
+            "db_host": "c6401.ambari.apache.org",
+            "db_root_password": "vagrant",
             "SQL_CONNECTOR_JAR": "{{driver_curl_target}}"
         },
         "ranger-solr-configuration": {
@@ -484,261 +484,261 @@
             "ranger_audit_logs_merge_factor": "5"
         },
         "ranger-ugsync-site": {
-            "ranger.usersync.ldap.binddn": "", 
-            "ranger.usersync.policymgr.username": "rangerusersync", 
-            "ranger.usersync.policymanager.mockrun": "false", 
-            "ranger.usersync.group.searchbase": "", 
-            "ranger.usersync.ldap.bindalias": "testldapalias", 
-            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks", 
-            "ranger.usersync.port": "5151", 
-            "ranger.usersync.pagedresultssize": "500", 
-            "ranger.usersync.group.memberattributename": "", 
-            "ranger.usersync.kerberos.principal": "rangerusersync/_HOST@EXAMPLE.COM", 
-            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder", 
-            "ranger.usersync.ldap.referral": "ignore", 
-            "ranger.usersync.group.searchfilter": "", 
-            "ranger.usersync.ldap.user.objectclass": "person", 
-            "ranger.usersync.logdir": "{{usersync_log_dir}}", 
-            "ranger.usersync.ldap.user.searchfilter": "", 
-            "ranger.usersync.ldap.groupname.caseconversion": "none", 
-            "ranger.usersync.ldap.ldapbindpassword": "", 
-            "ranger.usersync.unix.minUserId": "500", 
-            "ranger.usersync.policymanager.maxrecordsperapicall": "1000", 
-            "ranger.usersync.group.nameattribute": "", 
-            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password", 
-            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks", 
-            "ranger.usersync.user.searchenabled": "false", 
-            "ranger.usersync.group.usermapsyncenabled": "true", 
-            "ranger.usersync.ldap.bindkeystore": "", 
-            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof", 
-            "ranger.usersync.kerberos.keytab": "/etc/security/keytabs/rangerusersync.service.keytab", 
-            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe", 
-            "ranger.usersync.group.objectclass": "", 
-            "ranger.usersync.ldap.user.searchscope": "sub", 
-            "ranger.usersync.unix.password.file": "/etc/passwd", 
-            "ranger.usersync.ldap.user.nameattribute": "", 
-            "ranger.usersync.pagedresultsenabled": "true", 
-            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}", 
-            "ranger.usersync.group.search.first.enabled": "false", 
-            "ranger.usersync.group.searchenabled": "false", 
-            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder", 
-            "ranger.usersync.ssl": "true", 
-            "ranger.usersync.ldap.url": "", 
-            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org", 
-            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
-            "ranger.usersync.ldap.user.searchbase": "", 
-            "ranger.usersync.ldap.username.caseconversion": "none", 
-            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
-            "ranger.usersync.keystore.password": "UnIx529p", 
-            "ranger.usersync.unix.group.file": "/etc/group", 
-            "ranger.usersync.filesource.file": "/tmp/usergroup.txt", 
-            "ranger.usersync.group.searchscope": "", 
-            "ranger.usersync.truststore.password": "changeit", 
-            "ranger.usersync.enabled": "true", 
-            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000", 
+            "ranger.usersync.ldap.binddn": "",
+            "ranger.usersync.policymgr.username": "rangerusersync",
+            "ranger.usersync.policymanager.mockrun": "false",
+            "ranger.usersync.group.searchbase": "",
+            "ranger.usersync.ldap.bindalias": "testldapalias",
+            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks",
+            "ranger.usersync.port": "5151",
+            "ranger.usersync.pagedresultssize": "500",
+            "ranger.usersync.group.memberattributename": "",
+            "ranger.usersync.kerberos.principal": "rangerusersync/_HOST@EXAMPLE.COM",
+            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder",
+            "ranger.usersync.ldap.referral": "ignore",
+            "ranger.usersync.group.searchfilter": "",
+            "ranger.usersync.ldap.user.objectclass": "person",
+            "ranger.usersync.logdir": "{{usersync_log_dir}}",
+            "ranger.usersync.ldap.user.searchfilter": "",
+            "ranger.usersync.ldap.groupname.caseconversion": "none",
+            "ranger.usersync.ldap.ldapbindpassword": "",
+            "ranger.usersync.unix.minUserId": "500",
+            "ranger.usersync.policymanager.maxrecordsperapicall": "1000",
+            "ranger.usersync.group.nameattribute": "",
+            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password",
+            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks",
+            "ranger.usersync.user.searchenabled": "false",
+            "ranger.usersync.group.usermapsyncenabled": "true",
+            "ranger.usersync.ldap.bindkeystore": "",
+            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof",
+            "ranger.usersync.kerberos.keytab": "/etc/security/keytabs/rangerusersync.service.keytab",
+            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe",
+            "ranger.usersync.group.objectclass": "",
+            "ranger.usersync.ldap.user.searchscope": "sub",
+            "ranger.usersync.unix.password.file": "/etc/passwd",
+            "ranger.usersync.ldap.user.nameattribute": "",
+            "ranger.usersync.pagedresultsenabled": "true",
+            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}",
+            "ranger.usersync.group.search.first.enabled": "false",
+            "ranger.usersync.group.searchenabled": "false",
+            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder",
+            "ranger.usersync.ssl": "true",
+            "ranger.usersync.ldap.url": "",
+            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org",
+            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+            "ranger.usersync.ldap.user.searchbase": "",
+            "ranger.usersync.ldap.username.caseconversion": "none",
+            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+            "ranger.usersync.keystore.password": "UnIx529p",
+            "ranger.usersync.unix.group.file": "/etc/group",
+            "ranger.usersync.filesource.file": "/tmp/usergroup.txt",
+            "ranger.usersync.group.searchscope": "",
+            "ranger.usersync.truststore.password": "changeit",
+            "ranger.usersync.enabled": "true",
+            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000",
             "ranger.usersync.filesource.text.delimiter": ","
-        }, 
+        },
         "hdfs-site": {
-            "dfs.namenode.checkpoint.period": "21600", 
-            "dfs.namenode.avoid.write.stale.datanode": "true", 
-            "dfs.permissions.superusergroup": "hdfs", 
-            "dfs.namenode.startup.delay.block.deletion.sec": "3600", 
-            "dfs.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM", 
-            "dfs.heartbeat.interval": "3", 
-            "dfs.content-summary.limit": "5000", 
-            "dfs.support.append": "true", 
-            "dfs.datanode.address": "0.0.0.0:1019", 
-            "dfs.cluster.administrators": " hdfs", 
-            "dfs.namenode.audit.log.async": "true", 
-            "dfs.datanode.balance.bandwidthPerSec": "6250000", 
-            "dfs.namenode.safemode.threshold-pct": "1", 
-            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", 
-            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020", 
-            "dfs.permissions.enabled": "true", 
-            "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", 
-            "dfs.client.read.shortcircuit": "true", 
-            "dfs.https.port": "50470", 
-            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470", 
-            "nfs.file.dump.dir": "/tmp/.hdfs-nfs", 
-            "dfs.blocksize": "134217728", 
-            "dfs.blockreport.initialDelay": "120", 
-            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode", 
-            "dfs.namenode.fslock.fair": "false", 
-            "dfs.datanode.max.transfer.threads": "4096", 
-            "dfs.secondary.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "dfs.replication": "3", 
-            "dfs.namenode.handler.count": "50", 
-            "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
-            "fs.permissions.umask-mode": "022", 
-            "dfs.namenode.stale.datanode.interval": "30000", 
-            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
-            "dfs.datanode.failed.volumes.tolerated": "0", 
-            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data", 
-            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070", 
-            "dfs.webhdfs.enabled": "true", 
-            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding", 
-            "dfs.namenode.accesstime.precision": "0", 
-            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
-            "dfs.datanode.https.address": "0.0.0.0:50475", 
-            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary", 
-            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090", 
-            "nfs.exports.allowed.hosts": "* rw", 
-            "dfs.namenode.checkpoint.txns": "1000000", 
-            "dfs.datanode.http.address": "0.0.0.0:1022", 
-            "dfs.datanode.du.reserved": "33011188224", 
-            "dfs.client.read.shortcircuit.streams.cache.size": "4096", 
-            "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", 
-            "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "dfs.http.policy": "HTTP_ONLY", 
-            "dfs.block.access.token.enable": "true", 
-            "dfs.client.retry.policy.enabled": "false", 
-            "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", 
-            "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab", 
-            "dfs.namenode.name.dir.restore": "true", 
-            "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", 
-            "dfs.journalnode.https-address": "0.0.0.0:8481", 
-            "dfs.journalnode.http-address": "0.0.0.0:8480", 
-            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
-            "dfs.namenode.avoid.read.stale.datanode": "true", 
-            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
-            "dfs.datanode.data.dir.perm": "750", 
-            "dfs.encryption.key.provider.uri": "kms://http@c6401.ambari.apache.org:9292/kms", 
-            "dfs.replication.max": "50", 
+            "dfs.namenode.checkpoint.period": "21600",
+            "dfs.namenode.avoid.write.stale.datanode": "true",
+            "dfs.permissions.superusergroup": "hdfs",
+            "dfs.namenode.startup.delay.block.deletion.sec": "3600",
+            "dfs.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
+            "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM",
+            "dfs.heartbeat.interval": "3",
+            "dfs.content-summary.limit": "5000",
+            "dfs.support.append": "true",
+            "dfs.datanode.address": "0.0.0.0:1019",
+            "dfs.cluster.administrators": " hdfs",
+            "dfs.namenode.audit.log.async": "true",
+            "dfs.datanode.balance.bandwidthPerSec": "6250000",
+            "dfs.namenode.safemode.threshold-pct": "1",
+            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
+            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020",
+            "dfs.permissions.enabled": "true",
+            "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
+            "dfs.client.read.shortcircuit": "true",
+            "dfs.https.port": "50470",
+            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470",
+            "nfs.file.dump.dir": "/tmp/.hdfs-nfs",
+            "dfs.blocksize": "134217728",
+            "dfs.blockreport.initialDelay": "120",
+            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode",
+            "dfs.namenode.fslock.fair": "false",
+            "dfs.datanode.max.transfer.threads": "4096",
+            "dfs.secondary.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
+            "dfs.replication": "3",
+            "dfs.namenode.handler.count": "50",
+            "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+            "fs.permissions.umask-mode": "022",
+            "dfs.namenode.stale.datanode.interval": "30000",
+            "dfs.datanode.ipc.address": "0.0.0.0:8010",
+            "dfs.datanode.failed.volumes.tolerated": "0",
+            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data",
+            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070",
+            "dfs.webhdfs.enabled": "true",
+            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding",
+            "dfs.namenode.accesstime.precision": "0",
+            "dfs.namenode.write.stale.datanode.ratio": "1.0f",
+            "dfs.datanode.https.address": "0.0.0.0:50475",
+            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary",
+            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090",
+            "nfs.exports.allowed.hosts": "* rw",
+            "dfs.namenode.checkpoint.txns": "1000000",
+            "dfs.datanode.http.address": "0.0.0.0:1022",
+            "dfs.datanode.du.reserved": "33011188224",
+            "dfs.client.read.shortcircuit.streams.cache.size": "4096",
+            "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
+            "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
+            "dfs.http.policy": "HTTP_ONLY",
+            "dfs.block.access.token.enable": "true",
+            "dfs.client.retry.policy.enabled": "false",
+            "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
+            "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab",
+            "dfs.namenode.name.dir.restore": "true",
+            "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
+            "dfs.journalnode.https-address": "0.0.0.0:8481",
+            "dfs.journalnode.http-address": "0.0.0.0:8480",
+            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
+            "dfs.namenode.avoid.read.stale.datanode": "true",
+            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
+            "dfs.datanode.data.dir.perm": "750",
+            "dfs.encryption.key.provider.uri": "kms://http@c6401.ambari.apache.org:9292/kms",
+            "dfs.replication.max": "50",
             "dfs.namenode.name.dir": "/grid/0/hadoop/hdfs/namenode"
-        }, 
+        },
         "ranger-tagsync-site": {
             "ranger.tagsync.dest.ranger.ssl.config.filename": "{{stack_root}}/current/ranger-tagsync/conf/ranger-policymgr-ssl.xml",
             "ranger.tagsync.source.atlasrest.username": "",
             "ranger.tagsync.logdir": "/var/log/ranger/tagsync",
-            "ranger.tagsync.source.atlasrest.download.interval.millis": "", 
-            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks", 
-            "ranger.tagsync.source.file.check.interval.millis": "", 
-            "ranger.tagsync.source.atlasrest.endpoint": "", 
-            "ranger.tagsync.dest.ranger.username": "rangertagsync", 
-            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}", 
-            "ranger.tagsync.kerberos.principal": "rangertagsync/_HOST@EXAMPLE.COM", 
-            "ranger.tagsync.kerberos.keytab": "/etc/security/keytabs/rangertagsync.service.keytab", 
-            "ranger.tagsync.source.atlas": "false", 
-            "ranger.tagsync.source.atlasrest": "false", 
-            "ranger.tagsync.source.file": "false", 
+            "ranger.tagsync.source.atlasrest.download.interval.millis": "",
+            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks",
+            "ranger.tagsync.source.file.check.interval.millis": "",
+            "ranger.tagsync.source.atlasrest.endpoint": "",
+            "ranger.tagsync.dest.ranger.username": "rangertagsync",
+            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}",
+            "ranger.tagsync.kerberos.principal": "rangertagsync/_HOST@EXAMPLE.COM",
+            "ranger.tagsync.kerberos.keytab": "/etc/security/keytabs/rangertagsync.service.keytab",
+            "ranger.tagsync.source.atlas": "false",
+            "ranger.tagsync.source.atlasrest": "false",
+            "ranger.tagsync.source.file": "false",
             "ranger.tagsync.source.file.filename": ""
-        }, 
+        },
         "zoo.cfg": {
-            "clientPort": "2181", 
-            "autopurge.purgeInterval": "24", 
-            "syncLimit": "5", 
-            "dataDir": "/grid/0/hadoop/zookeeper", 
-            "initLimit": "10", 
-            "tickTime": "2000", 
+            "clientPort": "2181",
+            "autopurge.purgeInterval": "24",
+            "syncLimit": "5",
+            "dataDir": "/grid/0/hadoop/zookeeper",
+            "initLimit": "10",
+            "tickTime": "2000",
             "autopurge.snapRetainCount": "30"
-        }, 
+        },
         "hadoop-policy": {
-            "security.job.client.protocol.acl": "*", 
-            "security.job.task.protocol.acl": "*", 
-            "security.datanode.protocol.acl": "*", 
-            "security.namenode.protocol.acl": "*", 
-            "security.client.datanode.protocol.acl": "*", 
-            "security.inter.tracker.protocol.acl": "*", 
-            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop", 
-            "security.client.protocol.acl": "*", 
-            "security.refresh.policy.protocol.acl": "hadoop", 
-            "security.admin.operations.protocol.acl": "hadoop", 
+            "security.job.client.protocol.acl": "*",
+            "security.job.task.protocol.acl": "*",
+            "security.datanode.protocol.acl": "*",
+            "security.namenode.protocol.acl": "*",
+            "security.client.datanode.protocol.acl": "*",
+            "security.inter.tracker.protocol.acl": "*",
+            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
+            "security.client.protocol.acl": "*",
+            "security.refresh.policy.protocol.acl": "hadoop",
+            "security.admin.operations.protocol.acl": "hadoop",
             "security.inter.datanode.protocol.acl": "*"
-        }, 
+        },
         "hdfs-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.lo
 gger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender
 .console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logg
 er}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=
 false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# NameNode metrics logging.\n# The default is to retain two namenode-metrics.log files up to 64MB each.\n#\nnamenode.metrics.logger=INFO,NullAppender\nlog4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}\nlog4j.additivity.NameNodeMetricsLog=false\nlog4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log\nlog4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n\nlog4j.appender.NNMETRICSRFA.MaxBackupIndex=1\nlog4j.appender.NNMETRICSRFA.MaxFileSize=64MB\n\n#\n# mapred audit logging\n#\nmapred
 .audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging leve
 ls\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateCh
 ange=WARN"
-        }, 
+        },
         "krb5-conf": {
-            "domains": "", 
-            "manage_krb5_conf": "true", 
-            "content": "\n[libdefaults]\n  renew_lifetime = 7d\n  forwardable = true\n  default_realm = {{realm}}\n  ticket_lifetime = 24h\n  dns_lookup_realm = false\n  dns_lookup_kdc = false\n  default_ccache_name = /tmp/krb5cc_%{uid}\n  #default_tgs_enctypes = {{encryption_types}}\n  #default_tkt_enctypes = {{encryption_types}}\n{% if domains %}\n[domain_realm]\n{%- for domain in domains.split(',') %}\n  {{domain|trim()}} = {{realm}}\n{%- endfor %}\n{% endif %}\n[logging]\n  default = FILE:/var/log/krb5kdc.log\n  admin_server = FILE:/var/log/kadmind.log\n  kdc = FILE:/var/log/krb5kdc.log\n\n[realms]\n  {{realm}} = {\n{%- if kdc_hosts > 0 -%}\n{%- set kdc_host_list = kdc_hosts.split(',')  -%}\n{%- if kdc_host_list and kdc_host_list|length > 0 %}\n    admin_server = {{admin_server_host|default(kdc_host_list[0]|trim(), True)}}\n{%- if kdc_host_list -%}\n{% for kdc_host in kdc_host_list %}\n    kdc = {{kdc_host|trim()}}\n{%- endfor -%}\n{% endif %}\n{%- endif %}\n{%- endif %}\n  }\n\
 n{# Append additional realm declarations below #}", 
+            "domains": "",
+            "manage_krb5_conf": "true",
+            "content": "\n[libdefaults]\n  renew_lifetime = 7d\n  forwardable = true\n  default_realm = {{realm}}\n  ticket_lifetime = 24h\n  dns_lookup_realm = false\n  dns_lookup_kdc = false\n  default_ccache_name = /tmp/krb5cc_%{uid}\n  #default_tgs_enctypes = {{encryption_types}}\n  #default_tkt_enctypes = {{encryption_types}}\n{% if domains %}\n[domain_realm]\n{%- for domain in domains.split(',') %}\n  {{domain|trim()}} = {{realm}}\n{%- endfor %}\n{% endif %}\n[logging]\n  default = FILE:/var/log/krb5kdc.log\n  admin_server = FILE:/var/log/kadmind.log\n  kdc = FILE:/var/log/krb5kdc.log\n\n[realms]\n  {{realm}} = {\n{%- if kdc_hosts > 0 -%}\n{%- set kdc_host_list = kdc_hosts.split(',')  -%}\n{%- if kdc_host_list and kdc_host_list|length > 0 %}\n    admin_server = {{admin_server_host|default(kdc_host_list[0]|trim(), True)}}\n{%- if kdc_host_list -%}\n{% for kdc_host in kdc_host_list %}\n    kdc = {{kdc_host|trim()}}\n{%- endfor -%}\n{% endif %}\n{%- endif %}\n{%- endif %}\n  }\n\
 n{# Append additional realm declarations below #}",
             "conf_dir": "/etc"
-        }, 
+        },
         "core-site": {
-            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py", 
-            "hadoop.proxyuser.hdfs.groups": "*", 
-            "fs.trash.interval": "360", 
-            "ipc.server.tcpnodelay": "true", 
-            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec", 
-            "ipc.client.idlethreshold": "8000", 
-            "io.file.buffer.size": "131072", 
-            "hadoop.proxyuser.ambari-server-test_cluster01.groups": "*", 
-            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", 
-            "hadoop.security.authentication": "kerberos", 
-            "mapreduce.jobtracker.webinterface.trusted": "false", 
-            "hadoop.proxyuser.hdfs.hosts": "*", 
-            "hadoop.proxyuser.HTTP.groups": "users", 
-            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020", 
-            "ha.failover-controller.active-standby-elector.zk.op.retries": "120", 
-            "hadoop.security.key.provider.path": "kms://http@c6401.ambari.apache.org:9292/kms", 
-            "hadoop.security.authorization": "true", 
-            "hadoop.http.authentication.simple.anonymous.allowed": "true", 
-            "ipc.client.connect.max.retries": "50", 
-            "hadoop.security.auth_to_local": "RULE:[1:$1@$0](ambari-qa-test_cluster01@EXAMPLE.COM)s/.*/ambari-qa/\nRULE:[1:$1@$0](hdfs-test_cluster01@EXAMPLE.COM)s/.*/hdfs/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nRULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](rangeradmin@EXAMPLE.COM)s/.*/ranger/\nRULE:[2:$1@$0](rangertagsync@EXAMPLE.COM)s/.*/rangertagsync/\nRULE:[2:$1@$0](rangerusersync@EXAMPLE.COM)s/.*/rangerusersync/\nDEFAULT", 
-            "hadoop.proxyuser.ambari-server-test_cluster01.hosts": "c6401.ambari.apache.org", 
+            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py",
+            "hadoop.proxyuser.hdfs.groups": "*",
+            "fs.trash.interval": "360",
+            "ipc.server.tcpnodelay": "true",
+            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec",
+            "ipc.client.idlethreshold": "8000",
+            "io.file.buffer.size": "131072",
+            "hadoop.proxyuser.ambari-server-test_cluster01.groups": "*",
+            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
+            "hadoop.security.authentication": "kerberos",
+            "mapreduce.jobtracker.webinterface.trusted": "false",
+            "hadoop.proxyuser.hdfs.hosts": "*",
+            "hadoop.proxyuser.HTTP.groups": "users",
+            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020",
+            "ha.failover-controller.active-standby-elector.zk.op.retries": "120",
+            "hadoop.security.key.provider.path": "kms://http@c6401.ambari.apache.org:9292/kms",
+            "hadoop.security.authorization": "true",
+            "hadoop.http.authentication.simple.anonymous.allowed": "true",
+            "ipc.client.connect.max.retries": "50",
+            "hadoop.security.auth_to_local": "RULE:[1:$1@$0](ambari-qa-test_cluster01@EXAMPLE.COM)s/.*/ambari-qa/\nRULE:[1:$1@$0](hdfs-test_cluster01@EXAMPLE.COM)s/.*/hdfs/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nRULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](rangeradmin@EXAMPLE.COM)s/.*/ranger/\nRULE:[2:$1@$0](rangertagsync@EXAMPLE.COM)s/.*/rangertagsync/\nRULE:[2:$1@$0](rangerusersync@EXAMPLE.COM)s/.*/rangerusersync/\nDEFAULT",
+            "hadoop.proxyuser.ambari-server-test_cluster01.hosts": "c6401.ambari.apache.org",
             "ipc.client.connection.maxidletime": "30000"
-        }, 
+        },
         "hadoop-env": {
-            "keyserver_port": "", 
-            "proxyuser_group": "users", 
-            "hdfs_user_nproc_limit": "65536", 
-            "hdfs_log_dir_prefix": "/var/log/hadoop", 
-            "hdfs_user_nofile_limit": "128000", 
-            "hdfs_user": "hdfs", 
-            "hdfs_principal_name": "hdfs-test_cluster01@EXAMPLE.COM", 
-            "keyserver_host": " ", 
-            "namenode_opt_maxnewsize": "128m", 
-            "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab", 
-            "namenode_opt_maxpermsize": "256m", 
-            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
 {{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
 node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
 NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
 N_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
 ing priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n  for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n  do\n    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n  done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
 ECURE_DN_USER\" ]; then\n  ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}", 
-            "namenode_heapsize": "1024m", 
-            "namenode_opt_newsize": "128m", 
-            "nfsgateway_heapsize": "1024", 
-            "dtnode_heapsize": "1024m", 
-            "hadoop_root_logger": "INFO,RFA", 
-            "hadoop_heapsize": "1024", 
-            "hadoop_pid_dir_prefix": "/var/run/hadoop", 
-            "namenode_opt_permsize": "128m", 
+            "keyserver_port": "",
+            "proxyuser_group": "users",
+            "hdfs_user_nproc_limit": "65536",
+            "hdfs_log_dir_prefix": "/var/log/hadoop",
+            "hdfs_user_nofile_limit": "128000",
+            "hdfs_user": "hdfs",
+            "hdfs_principal_name": "hdfs-test_cluster01@EXAMPLE.COM",
+            "keyserver_host": " ",
+            "namenode_opt_maxnewsize": "128m",
+            "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab",
+            "namenode_opt_maxpermsize": "256m",
+            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
 {{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
 node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
 NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
 N_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
 ing priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n  for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n  do\n    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n  done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
 ECURE_DN_USER\" ]; then\n  ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}",
+            "namenode_heapsize": "1024m",
+            "namenode_opt_newsize": "128m",
+            "nfsgateway_heapsize": "1024",
+            "dtnode_heapsize": "1024m",
+            "hadoop_root_logger": "INFO,RFA",
+            "hadoop_heapsize": "1024",
+            "hadoop_pid_dir_prefix": "/var/run/hadoop",
+            "namenode_opt_permsize": "128m",
             "hdfs_tmp_dir": "/tmp"
-        }, 
+        },
         "zookeeper-log4j": {
             "content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: consol

<TRUNCATED>

[08/10] ambari git commit: AMBARI-21430 - Allow Multiple Versions of Stack Tools to Co-Exist (jonathanhurley)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
index 1c2f33e..7018245 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
@@ -19,111 +19,111 @@
 package org.apache.ambari.server.controller.internal;
 
  import static org.easymock.EasyMock.anyLong;
- import static org.easymock.EasyMock.anyObject;
- import static org.easymock.EasyMock.capture;
- import static org.easymock.EasyMock.createMock;
- import static org.easymock.EasyMock.createNiceMock;
- import static org.easymock.EasyMock.eq;
- import static org.easymock.EasyMock.expect;
- import static org.easymock.EasyMock.expectLastCall;
- import static org.easymock.EasyMock.replay;
- import static org.easymock.EasyMock.verify;
-
- import java.io.File;
- import java.io.FileInputStream;
- import java.lang.reflect.Field;
- import java.sql.SQLException;
- import java.util.ArrayList;
- import java.util.Arrays;
- import java.util.Collections;
- import java.util.HashMap;
- import java.util.LinkedHashMap;
- import java.util.LinkedHashSet;
- import java.util.List;
- import java.util.Map;
- import java.util.Properties;
- import java.util.Set;
-
- import org.apache.ambari.annotations.Experimental;
- import org.apache.ambari.annotations.ExperimentalFeature;
- import org.apache.ambari.server.AmbariException;
- import org.apache.ambari.server.H2DatabaseCleaner;
- import org.apache.ambari.server.Role;
- import org.apache.ambari.server.actionmanager.ActionManager;
- import org.apache.ambari.server.actionmanager.ExecutionCommandWrapper;
- import org.apache.ambari.server.actionmanager.HostRoleCommand;
- import org.apache.ambari.server.actionmanager.Stage;
- import org.apache.ambari.server.actionmanager.StageFactory;
- import org.apache.ambari.server.agent.CommandReport;
- import org.apache.ambari.server.agent.ExecutionCommand;
- import org.apache.ambari.server.agent.ExecutionCommand.KeyNames;
- import org.apache.ambari.server.api.services.AmbariMetaInfo;
- import org.apache.ambari.server.configuration.Configuration;
- import org.apache.ambari.server.controller.AmbariManagementController;
- import org.apache.ambari.server.controller.ExecuteActionRequest;
- import org.apache.ambari.server.controller.RequestStatusResponse;
- import org.apache.ambari.server.controller.ResourceProviderFactory;
- import org.apache.ambari.server.controller.spi.Request;
- import org.apache.ambari.server.controller.spi.RequestStatus;
- import org.apache.ambari.server.controller.spi.Resource;
- import org.apache.ambari.server.controller.spi.ResourceProvider;
- import org.apache.ambari.server.controller.utilities.PropertyHelper;
- import org.apache.ambari.server.orm.GuiceJpaInitializer;
- import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
- import org.apache.ambari.server.orm.dao.ClusterVersionDAO;
- import org.apache.ambari.server.orm.dao.HostComponentStateDAO;
- import org.apache.ambari.server.orm.dao.HostVersionDAO;
- import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
- import org.apache.ambari.server.orm.dao.ResourceTypeDAO;
- import org.apache.ambari.server.orm.dao.StackDAO;
- import org.apache.ambari.server.orm.entities.ClusterEntity;
- import org.apache.ambari.server.orm.entities.ClusterVersionEntity;
- import org.apache.ambari.server.orm.entities.HostVersionEntity;
- import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
- import org.apache.ambari.server.orm.entities.ResourceEntity;
- import org.apache.ambari.server.orm.entities.ResourceTypeEntity;
- import org.apache.ambari.server.orm.entities.StackEntity;
- import org.apache.ambari.server.orm.entities.UpgradeEntity;
- import org.apache.ambari.server.security.TestAuthenticationFactory;
- import org.apache.ambari.server.security.authorization.AuthorizationException;
- import org.apache.ambari.server.security.authorization.ResourceType;
- import org.apache.ambari.server.serveraction.upgrades.FinalizeUpgradeAction;
- import org.apache.ambari.server.state.Cluster;
- import org.apache.ambari.server.state.Clusters;
- import org.apache.ambari.server.state.ConfigHelper;
- import org.apache.ambari.server.state.Host;
- import org.apache.ambari.server.state.MaintenanceState;
- import org.apache.ambari.server.state.RepositoryType;
- import org.apache.ambari.server.state.RepositoryVersionState;
- import org.apache.ambari.server.state.Service;
- import org.apache.ambari.server.state.ServiceComponent;
- import org.apache.ambari.server.state.ServiceComponentHost;
- import org.apache.ambari.server.state.ServiceInfo;
- import org.apache.ambari.server.state.ServiceOsSpecific;
- import org.apache.ambari.server.state.StackId;
- import org.apache.ambari.server.state.cluster.ClusterImpl;
- import org.apache.ambari.server.state.stack.upgrade.Direction;
- import org.apache.ambari.server.topology.TopologyManager;
- import org.apache.ambari.server.utils.StageUtils;
- import org.apache.commons.io.IOUtils;
- import org.easymock.Capture;
- import org.easymock.EasyMock;
- import org.easymock.IAnswer;
- import org.junit.After;
- import org.junit.Assert;
- import org.junit.Before;
- import org.junit.Ignore;
- import org.junit.Test;
- import org.springframework.security.core.Authentication;
- import org.springframework.security.core.context.SecurityContextHolder;
-
- import com.google.gson.JsonArray;
- import com.google.gson.JsonObject;
- import com.google.gson.JsonParser;
- import com.google.inject.AbstractModule;
- import com.google.inject.Guice;
- import com.google.inject.Injector;
- import com.google.inject.util.Modules;
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.capture;
+import static org.easymock.EasyMock.createMock;
+import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.eq;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.expectLastCall;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.verify;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.lang.reflect.Field;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+
+import org.apache.ambari.annotations.Experimental;
+import org.apache.ambari.annotations.ExperimentalFeature;
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.H2DatabaseCleaner;
+import org.apache.ambari.server.Role;
+import org.apache.ambari.server.actionmanager.ActionManager;
+import org.apache.ambari.server.actionmanager.ExecutionCommandWrapper;
+import org.apache.ambari.server.actionmanager.HostRoleCommand;
+import org.apache.ambari.server.actionmanager.Stage;
+import org.apache.ambari.server.actionmanager.StageFactory;
+import org.apache.ambari.server.agent.CommandReport;
+import org.apache.ambari.server.agent.ExecutionCommand;
+import org.apache.ambari.server.agent.ExecutionCommand.KeyNames;
+import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.controller.AmbariManagementController;
+import org.apache.ambari.server.controller.ExecuteActionRequest;
+import org.apache.ambari.server.controller.RequestStatusResponse;
+import org.apache.ambari.server.controller.ResourceProviderFactory;
+import org.apache.ambari.server.controller.spi.Request;
+import org.apache.ambari.server.controller.spi.RequestStatus;
+import org.apache.ambari.server.controller.spi.Resource;
+import org.apache.ambari.server.controller.spi.ResourceProvider;
+import org.apache.ambari.server.controller.utilities.PropertyHelper;
+import org.apache.ambari.server.orm.GuiceJpaInitializer;
+import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
+import org.apache.ambari.server.orm.dao.ClusterVersionDAO;
+import org.apache.ambari.server.orm.dao.HostComponentStateDAO;
+import org.apache.ambari.server.orm.dao.HostVersionDAO;
+import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
+import org.apache.ambari.server.orm.dao.ResourceTypeDAO;
+import org.apache.ambari.server.orm.dao.StackDAO;
+import org.apache.ambari.server.orm.entities.ClusterEntity;
+import org.apache.ambari.server.orm.entities.ClusterVersionEntity;
+import org.apache.ambari.server.orm.entities.HostVersionEntity;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
+import org.apache.ambari.server.orm.entities.ResourceEntity;
+import org.apache.ambari.server.orm.entities.ResourceTypeEntity;
+import org.apache.ambari.server.orm.entities.StackEntity;
+import org.apache.ambari.server.orm.entities.UpgradeEntity;
+import org.apache.ambari.server.security.TestAuthenticationFactory;
+import org.apache.ambari.server.security.authorization.AuthorizationException;
+import org.apache.ambari.server.security.authorization.ResourceType;
+import org.apache.ambari.server.serveraction.upgrades.FinalizeUpgradeAction;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.ConfigHelper;
+import org.apache.ambari.server.state.Host;
+import org.apache.ambari.server.state.MaintenanceState;
+import org.apache.ambari.server.state.RepositoryType;
+import org.apache.ambari.server.state.RepositoryVersionState;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.ServiceComponent;
+import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.ServiceInfo;
+import org.apache.ambari.server.state.ServiceOsSpecific;
+import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.cluster.ClusterImpl;
+import org.apache.ambari.server.state.stack.upgrade.Direction;
+import org.apache.ambari.server.topology.TopologyManager;
+import org.apache.ambari.server.utils.StageUtils;
+import org.apache.commons.io.IOUtils;
+import org.easymock.Capture;
+import org.easymock.EasyMock;
+import org.easymock.IAnswer;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.springframework.security.core.Authentication;
+import org.springframework.security.core.context.SecurityContextHolder;
+
+import com.google.gson.JsonArray;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParser;
+import com.google.inject.AbstractModule;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import com.google.inject.util.Modules;
 
 
  /**
@@ -222,7 +222,7 @@ public class ClusterStackVersionResourceProviderTest {
     repoVersion.setId(1l);
     repoVersion.setOperatingSystems(OS_JSON);
 
-    Map<String, Host> hostsForCluster = new HashMap<String, Host>();
+    Map<String, Host> hostsForCluster = new HashMap<>();
     int hostCount = 10;
     for (int i = 0; i < hostCount; i++) {
       String hostname = "host" + i;
@@ -271,7 +271,7 @@ public class ClusterStackVersionResourceProviderTest {
 
     AbstractControllerResourceProvider.init(resourceProviderFactory);
 
-    Map<String, Map<String, String>> hostConfigTags = new HashMap<String, Map<String, String>>();
+    Map<String, Map<String, String>> hostConfigTags = new HashMap<>();
     expect(configHelper.getEffectiveDesiredTags(anyObject(ClusterImpl.class), anyObject(String.class))).andReturn(hostConfigTags);
 
     expect(managementController.getClusters()).andReturn(clusters).anyTimes();
@@ -363,9 +363,9 @@ public class ClusterStackVersionResourceProviderTest {
     injector.injectMembers(provider);
 
     // add the property map to a set for the request.  add more maps for multiple creates
-    Set<Map<String, Object>> propertySet = new LinkedHashSet<Map<String, Object>>();
+    Set<Map<String, Object>> propertySet = new LinkedHashSet<>();
 
-    Map<String, Object> properties = new LinkedHashMap<String, Object>();
+    Map<String, Object> properties = new LinkedHashMap<>();
 
     // add properties to the request map
     properties.put(ClusterStackVersionResourceProvider.CLUSTER_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID, "Cluster100");
@@ -413,7 +413,7 @@ public class ClusterStackVersionResourceProviderTest {
     ambariMetaInfo.getComponent("HDP", "2.1.1", "HBASE", "HBASE_MASTER").setVersionAdvertised(true);
 
 
-    Map<String, Host> hostsForCluster = new HashMap<String, Host>();
+    Map<String, Host> hostsForCluster = new HashMap<>();
     int hostCount = 10;
     for (int i = 0; i < hostCount; i++) {
       String hostname = "host" + i;
@@ -491,7 +491,7 @@ public class ClusterStackVersionResourceProviderTest {
 
     AbstractControllerResourceProvider.init(resourceProviderFactory);
 
-    Map<String, Map<String, String>> hostConfigTags = new HashMap<String, Map<String, String>>();
+    Map<String, Map<String, String>> hostConfigTags = new HashMap<>();
     expect(configHelper.getEffectiveDesiredTags(anyObject(ClusterImpl.class), anyObject(String.class))).andReturn(hostConfigTags);
 
     expect(managementController.getClusters()).andReturn(clusters).anyTimes();
@@ -585,9 +585,9 @@ public class ClusterStackVersionResourceProviderTest {
     injector.injectMembers(provider);
 
     // add the property map to a set for the request.  add more maps for multiple creates
-    Set<Map<String, Object>> propertySet = new LinkedHashSet<Map<String, Object>>();
+    Set<Map<String, Object>> propertySet = new LinkedHashSet<>();
 
-    Map<String, Object> properties = new LinkedHashMap<String, Object>();
+    Map<String, Object> properties = new LinkedHashMap<>();
 
     // add properties to the request map
     properties.put(ClusterStackVersionResourceProvider.CLUSTER_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID, "Cluster100");
@@ -646,7 +646,7 @@ public class ClusterStackVersionResourceProviderTest {
     ambariMetaInfo.getComponent("HDP", "2.1.1", "HBASE", "HBASE_MASTER").setVersionAdvertised(true);
 
 
-    Map<String, Host> hostsForCluster = new HashMap<String, Host>();
+    Map<String, Host> hostsForCluster = new HashMap<>();
     int hostCount = 10;
     for (int i = 0; i < hostCount; i++) {
       String hostname = "host" + i;
@@ -708,7 +708,7 @@ public class ClusterStackVersionResourceProviderTest {
 
     AbstractControllerResourceProvider.init(resourceProviderFactory);
 
-    Map<String, Map<String, String>> hostConfigTags = new HashMap<String, Map<String, String>>();
+    Map<String, Map<String, String>> hostConfigTags = new HashMap<>();
     expect(configHelper.getEffectiveDesiredTags(anyObject(ClusterImpl.class), anyObject(String.class))).andReturn(hostConfigTags);
 
     expect(managementController.getClusters()).andReturn(clusters).anyTimes();
@@ -806,9 +806,9 @@ public class ClusterStackVersionResourceProviderTest {
     injector.injectMembers(provider);
 
     // add the property map to a set for the request.  add more maps for multiple creates
-    Set<Map<String, Object>> propertySet = new LinkedHashSet<Map<String, Object>>();
+    Set<Map<String, Object>> propertySet = new LinkedHashSet<>();
 
-    Map<String, Object> properties = new LinkedHashMap<String, Object>();
+    Map<String, Object> properties = new LinkedHashMap<>();
 
     // add properties to the request map
     properties.put(ClusterStackVersionResourceProvider.CLUSTER_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID, "Cluster100");
@@ -877,7 +877,7 @@ public class ClusterStackVersionResourceProviderTest {
 
     ambariMetaInfo.getComponent("HDP", "2.1.1", "HBASE", "HBASE_MASTER").setVersionAdvertised(true);
 
-    Map<String, Host> hostsForCluster = new HashMap<String, Host>();
+    Map<String, Host> hostsForCluster = new HashMap<>();
     int hostCount = 10;
     for (int i = 0; i < hostCount; i++) {
       String hostname = "host" + i;
@@ -939,7 +939,7 @@ public class ClusterStackVersionResourceProviderTest {
 
     AbstractControllerResourceProvider.init(resourceProviderFactory);
 
-    Map<String, Map<String, String>> hostConfigTags = new HashMap<String, Map<String, String>>();
+    Map<String, Map<String, String>> hostConfigTags = new HashMap<>();
     expect(configHelper.getEffectiveDesiredTags(anyObject(ClusterImpl.class), anyObject(String.class))).andReturn(hostConfigTags);
 
     expect(managementController.getClusters()).andReturn(clusters).anyTimes();
@@ -1038,9 +1038,9 @@ public class ClusterStackVersionResourceProviderTest {
     injector.injectMembers(provider);
 
     // add the property map to a set for the request.  add more maps for multiple creates
-    Set<Map<String, Object>> propertySet = new LinkedHashSet<Map<String, Object>>();
+    Set<Map<String, Object>> propertySet = new LinkedHashSet<>();
 
-    Map<String, Object> properties = new LinkedHashMap<String, Object>();
+    Map<String, Object> properties = new LinkedHashMap<>();
 
     // add properties to the request map
     properties.put(ClusterStackVersionResourceProvider.CLUSTER_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID, "Cluster100");
@@ -1152,7 +1152,7 @@ public class ClusterStackVersionResourceProviderTest {
 
     AbstractControllerResourceProvider.init(resourceProviderFactory);
 
-    Map<String, Map<String, String>> hostConfigTags = new HashMap<String, Map<String, String>>();
+    Map<String, Map<String, String>> hostConfigTags = new HashMap<>();
     expect(configHelper.getEffectiveDesiredTags(anyObject(ClusterImpl.class), anyObject(String.class))).andReturn(hostConfigTags);
 
     expect(managementController.getClusters()).andReturn(clusters).anyTimes();
@@ -1169,7 +1169,7 @@ public class ClusterStackVersionResourceProviderTest {
     expect(cluster.getCurrentStackVersion()).andReturn(stackId);
     expect(cluster.getServiceComponentHosts(anyObject(String.class))).andReturn(schs).anyTimes();
 
-    Capture<StackId> capturedStackId = new Capture<StackId>();
+    Capture<StackId> capturedStackId = new Capture<>();
     cluster.setDesiredStackVersion(capture(capturedStackId));
       expectLastCall().once();
     expect(cluster.getHosts()).andReturn(hosts).anyTimes();
@@ -1206,7 +1206,7 @@ public class ClusterStackVersionResourceProviderTest {
     field.set(provider, finalizeUpgradeAction);
 
     // add the property map to a set for the request.  add more maps for multiple creates
-    Map<String, Object> properties = new LinkedHashMap<String, Object>();
+    Map<String, Object> properties = new LinkedHashMap<>();
 
     // add properties to the request map
     properties.put(ClusterStackVersionResourceProvider.CLUSTER_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID, clusterName);
@@ -1305,7 +1305,7 @@ public class ClusterStackVersionResourceProviderTest {
 
     AbstractControllerResourceProvider.init(resourceProviderFactory);
 
-    Map<String, Map<String, String>> hostConfigTags = new HashMap<String, Map<String, String>>();
+    Map<String, Map<String, String>> hostConfigTags = new HashMap<>();
     expect(configHelper.getEffectiveDesiredTags(anyObject(ClusterImpl.class), anyObject(String.class))).andReturn(hostConfigTags);
 
     expect(managementController.getClusters()).andReturn(clusters).anyTimes();
@@ -1327,7 +1327,7 @@ public class ClusterStackVersionResourceProviderTest {
     ClusterVersionEntity current = new ClusterVersionEntity();
     current.setRepositoryVersion(currentRepo);
 
-    Capture<StackId> capturedStackId = new Capture<StackId>();
+    Capture<StackId> capturedStackId = new Capture<>();
     cluster.setDesiredStackVersion(capture(capturedStackId));
       expectLastCall().once();
     expect(cluster.getHosts()).andReturn(hosts).anyTimes();
@@ -1362,7 +1362,7 @@ public class ClusterStackVersionResourceProviderTest {
 
 
     // add the property map to a set for the request.  add more maps for multiple creates
-    Map<String, Object> properties = new LinkedHashMap<String, Object>();
+    Map<String, Object> properties = new LinkedHashMap<>();
 
     // add properties to the request map
     properties.put(ClusterStackVersionResourceProvider.CLUSTER_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID, clusterName);
@@ -1424,7 +1424,7 @@ public class ClusterStackVersionResourceProviderTest {
     repoVersion.setType(RepositoryType.STANDARD);
 
 
-    Map<String, Host> hostsForCluster = new HashMap<String, Host>();
+    Map<String, Host> hostsForCluster = new HashMap<>();
     int hostCount = 10;
     for (int i = 0; i < hostCount; i++) {
       String hostname = "host" + i;
@@ -1473,7 +1473,7 @@ public class ClusterStackVersionResourceProviderTest {
 
     AbstractControllerResourceProvider.init(resourceProviderFactory);
 
-    Map<String, Map<String, String>> hostConfigTags = new HashMap<String, Map<String, String>>();
+    Map<String, Map<String, String>> hostConfigTags = new HashMap<>();
     expect(configHelper.getEffectiveDesiredTags(anyObject(ClusterImpl.class), anyObject(String.class))).andReturn(hostConfigTags);
 
     expect(managementController.getClusters()).andReturn(clusters).anyTimes();
@@ -1570,9 +1570,9 @@ public class ClusterStackVersionResourceProviderTest {
     injector.injectMembers(provider);
 
     // add the property map to a set for the request.  add more maps for multiple creates
-    Set<Map<String, Object>> propertySet = new LinkedHashSet<Map<String, Object>>();
+    Set<Map<String, Object>> propertySet = new LinkedHashSet<>();
 
-    Map<String, Object> properties = new LinkedHashMap<String, Object>();
+    Map<String, Object> properties = new LinkedHashMap<>();
 
     // add properties to the request map
     properties.put(ClusterStackVersionResourceProvider.CLUSTER_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID, "Cluster100");
@@ -1636,7 +1636,7 @@ public class ClusterStackVersionResourceProviderTest {
     repoVersionEntity.setVersionXsd("version_definition.xsd");
     repoVersionEntity.setType(RepositoryType.STANDARD);
 
-    Map<String, Host> hostsForCluster = new HashMap<String, Host>();
+    Map<String, Host> hostsForCluster = new HashMap<>();
     List<HostVersionEntity> hostVersionEntitiesMergedWithNotRequired = new ArrayList<>();
     int hostCount = 10;
 
@@ -1714,8 +1714,8 @@ public class ClusterStackVersionResourceProviderTest {
     expect(cluster.getClusterName()).andReturn(clusterName).atLeastOnce();
     expect(cluster.getHosts()).andReturn(hostsForCluster.values()).atLeastOnce();
     expect(cluster.getServices()).andReturn(serviceMap).anyTimes();
-    expect(cluster.getServiceComponentHosts(anyObject(String.class))).andReturn(
-        serviceComponentHosts).anyTimes();
+    expect(cluster.getServiceComponentHosts(anyObject(String.class))).andReturn(serviceComponentHosts).anyTimes();
+    expect(cluster.getCurrentStackVersion()).andReturn(stackId).atLeastOnce();
 
     expect(repositoryVersionDAOMock.findByStackAndVersion(anyObject(StackId.class),
         anyObject(String.class))).andReturn(repoVersionEntity);
@@ -1757,9 +1757,9 @@ public class ClusterStackVersionResourceProviderTest {
 
     // add the property map to a set for the request. add more maps for multiple
     // creates
-    Set<Map<String, Object>> propertySet = new LinkedHashSet<Map<String, Object>>();
+    Set<Map<String, Object>> propertySet = new LinkedHashSet<>();
 
-    Map<String, Object> properties = new LinkedHashMap<String, Object>();
+    Map<String, Object> properties = new LinkedHashMap<>();
 
     // add properties to the request map
     properties.put(
@@ -1826,9 +1826,9 @@ public class ClusterStackVersionResourceProviderTest {
     injector.injectMembers(provider);
 
     // add the property map to a set for the request.  add more maps for multiple creates
-    Set<Map<String, Object>> propertySet = new LinkedHashSet<Map<String, Object>>();
+    Set<Map<String, Object>> propertySet = new LinkedHashSet<>();
 
-    Map<String, Object> properties = new LinkedHashMap<String, Object>();
+    Map<String, Object> properties = new LinkedHashMap<>();
 
     // add properties to the request map
     properties.put(ClusterStackVersionResourceProvider.CLUSTER_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID, "Cluster100");

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterConfigurationRequestTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterConfigurationRequestTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterConfigurationRequestTest.java
index 53bd405..6e47b02 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterConfigurationRequestTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterConfigurationRequestTest.java
@@ -18,6 +18,27 @@
 
 package org.apache.ambari.server.topology;
 
+import static org.easymock.EasyMock.anyBoolean;
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.anyString;
+import static org.easymock.EasyMock.capture;
+import static org.easymock.EasyMock.eq;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.expectLastCall;
+import static org.easymock.EasyMock.newCapture;
+import static org.easymock.EasyMock.verify;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.api.services.stackadvisor.StackAdvisorBlueprintProcessor;
 import org.apache.ambari.server.controller.AmbariManagementController;
@@ -28,20 +49,11 @@ import org.apache.ambari.server.controller.internal.Stack;
 import org.apache.ambari.server.serveraction.kerberos.KerberosInvalidConfigurationException;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import com.google.common.collect.Maps;
-import org.easymock.EasyMock;
+import org.apache.ambari.server.state.ConfigHelper;
+import org.apache.ambari.server.state.StackId;
 import org.easymock.Capture;
 import org.easymock.CaptureType;
+import org.easymock.EasyMock;
 import org.easymock.EasyMockRule;
 import org.easymock.Mock;
 import org.easymock.MockType;
@@ -52,18 +64,7 @@ import org.powermock.api.easymock.PowerMock;
 import org.powermock.core.classloader.annotations.PrepareForTest;
 import org.powermock.modules.junit4.PowerMockRunner;
 
-import static org.easymock.EasyMock.anyBoolean;
-import static org.easymock.EasyMock.anyObject;
-import static org.easymock.EasyMock.anyString;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.expectLastCall;
-import static org.easymock.EasyMock.eq;
-import static org.easymock.EasyMock.newCapture;
-import static org.easymock.EasyMock.verify;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.easymock.EasyMock.capture;
-import static org.junit.Assert.assertEquals;
+import com.google.common.collect.Maps;
 
 /**
  * ClusterConfigurationRequest unit tests
@@ -103,6 +104,13 @@ public class ClusterConfigurationRequestTest {
   @Mock(type = MockType.NICE)
   private KerberosHelper kerberosHelper;
 
+  @Mock(type = MockType.NICE)
+  private ConfigHelper configHelper;
+
+  private final String STACK_NAME = "testStack";
+  private final String STACK_VERSION = "1";
+  private final Map<String, Map<String, String>> stackProperties = new HashMap<>();
+
   /**
    * testConfigType config type should be in updatedConfigTypes, as no custom property in Blueprint
    * ==> Kerberos config property should be updated
@@ -198,7 +206,7 @@ public class ClusterConfigurationRequestTest {
     ConfigurationTopologyException {
 
 
-    Map<String, Map<String, String>> existingConfig = new HashMap<String, Map<String, String>>();
+    Map<String, Map<String, String>> existingConfig = new HashMap<>();
     Configuration stackDefaultConfig = new Configuration(existingConfig,
       new HashMap<String, Map<String, Map<String, String>>>());
     if (stackPropertyValue != null) {
@@ -221,6 +229,8 @@ public class ClusterConfigurationRequestTest {
     expect(clusters.getCluster("testCluster")).andReturn(cluster).anyTimes();
 
     expect(blueprint.getStack()).andReturn(stack).anyTimes();
+    expect(stack.getName()).andReturn(STACK_NAME).anyTimes();
+    expect(stack.getVersion()).andReturn(STACK_VERSION).anyTimes();
     expect(stack.getServiceForConfigType("testConfigType")).andReturn("KERBEROS").anyTimes();
     expect(stack.getAllConfigurationTypes(anyString())).andReturn(Collections.<String>singletonList("testConfigType")
     ).anyTimes();
@@ -246,6 +256,7 @@ public class ClusterConfigurationRequestTest {
     expect(blueprint.getComponents("KERBEROS")).andReturn(kerberosComponents).anyTimes();
     expect(blueprint.getComponents("ZOOKEPER")).andReturn(zookeeperComponents).anyTimes();
 
+    expect(topology.getAmbariContext()).andReturn(ambariContext).anyTimes();
     expect(topology.getConfigRecommendationStrategy()).andReturn(ConfigRecommendationStrategy.NEVER_APPLY).anyTimes();
     expect(topology.getBlueprint()).andReturn(blueprint).anyTimes();
     expect(blueprint.isValidConfigType("testConfigType")).andReturn(true).anyTimes();
@@ -256,10 +267,14 @@ public class ClusterConfigurationRequestTest {
     expect(topology.getHostGroupsForComponent(anyString())).andReturn(Collections.<String>emptyList())
       .anyTimes();
 
-      expect(ambariContext.getClusterName(Long.valueOf(1))).andReturn("testCluster").anyTimes();
+    expect(ambariContext.getConfigHelper()).andReturn(configHelper).anyTimes();
+    expect(ambariContext.getClusterName(Long.valueOf(1))).andReturn("testCluster").anyTimes();
     expect(ambariContext.createConfigurationRequests(anyObject(Map.class))).andReturn(Collections
       .<ConfigurationRequest>emptyList()).anyTimes();
 
+    expect(configHelper.getDefaultStackProperties(
+        EasyMock.eq(new StackId(STACK_NAME, STACK_VERSION)))).andReturn(stackProperties).anyTimes();
+
     if (kerberosConfig == null) {
       kerberosConfig = new HashMap<>();
       Map<String, String> properties = new HashMap<>();
@@ -277,15 +292,14 @@ public class ClusterConfigurationRequestTest {
       (captureUpdatedConfigTypes));
     expectLastCall();
 
-    PowerMock.replay(stack, blueprint, topology, controller, clusters, kerberosHelper, ambariContext,
-      AmbariContext
-        .class);
+    PowerMock.replay(stack, blueprint, topology, controller, clusters, kerberosHelper,
+        ambariContext, AmbariContext.class, configHelper);
 
     ClusterConfigurationRequest clusterConfigurationRequest = new ClusterConfigurationRequest(
       ambariContext, topology, false, stackAdvisorBlueprintProcessor, true);
     clusterConfigurationRequest.process();
 
-    verify(blueprint, topology, ambariContext, controller, kerberosHelper);
+    verify(blueprint, topology, ambariContext, controller, kerberosHelper, configHelper);
 
 
     String clusterName = captureClusterName.getValue();
@@ -296,7 +310,7 @@ public class ClusterConfigurationRequestTest {
   @Test
   public void testProcessClusterConfigRequestDontIncludeKererosConfigs() throws Exception {
 
-    Map<String, Map<String, String>> existingConfig = new HashMap<String, Map<String, String>>();
+    Map<String, Map<String, String>> existingConfig = new HashMap<>();
     Configuration stackConfig = new Configuration(existingConfig,
       new HashMap<String, Map<String, Map<String, String>>>());
 
@@ -308,8 +322,9 @@ public class ClusterConfigurationRequestTest {
     expect(clusters.getCluster("testCluster")).andReturn(cluster).anyTimes();
 
     expect(blueprint.getStack()).andReturn(stack).anyTimes();
-    expect(stack.getAllConfigurationTypes(anyString())).andReturn(Collections.<String>singletonList("testConfigType")
-    ).anyTimes();
+    expect(stack.getName()).andReturn(STACK_NAME).anyTimes();
+    expect(stack.getVersion()).andReturn(STACK_VERSION).anyTimes();
+    expect(stack.getAllConfigurationTypes(anyString())).andReturn(Collections.<String>singletonList("testConfigType")).anyTimes();
     expect(stack.getExcludedConfigurationTypes(anyString())).andReturn(Collections.<String>emptySet()).anyTimes();
     expect(stack.getConfigurationPropertiesWithMetadata(anyString(), anyString())).andReturn(Collections.<String,
       Stack.ConfigProperty>emptyMap()).anyTimes();
@@ -331,25 +346,29 @@ public class ClusterConfigurationRequestTest {
     expect(blueprint.getComponents("KERBEROS")).andReturn(kerberosComponents).anyTimes();
     expect(blueprint.getComponents("ZOOKEPER")).andReturn(zookeeperComponents).anyTimes();
 
+    expect(topology.getAmbariContext()).andReturn(ambariContext).anyTimes();
     expect(topology.getConfigRecommendationStrategy()).andReturn(ConfigRecommendationStrategy.NEVER_APPLY).anyTimes();
     expect(topology.getBlueprint()).andReturn(blueprint).anyTimes();
     expect(topology.getConfiguration()).andReturn(stackConfig).anyTimes();
     expect(topology.getHostGroupInfo()).andReturn(Collections.<String, HostGroupInfo>emptyMap()).anyTimes();
     expect(topology.getClusterId()).andReturn(Long.valueOf(1)).anyTimes();
+
+    expect(ambariContext.getConfigHelper()).andReturn(configHelper).anyTimes();
     expect(ambariContext.getClusterName(Long.valueOf(1))).andReturn("testCluster").anyTimes();
     expect(ambariContext.createConfigurationRequests(anyObject(Map.class))).andReturn(Collections
       .<ConfigurationRequest>emptyList()).anyTimes();
 
+    expect(configHelper.getDefaultStackProperties(
+        EasyMock.eq(new StackId(STACK_NAME, STACK_VERSION)))).andReturn(stackProperties).anyTimes();
 
     PowerMock.replay(stack, blueprint, topology, controller, clusters, ambariContext,
-      AmbariContext
-        .class);
+        AmbariContext.class, configHelper);
 
     ClusterConfigurationRequest clusterConfigurationRequest = new ClusterConfigurationRequest(
       ambariContext, topology, false, stackAdvisorBlueprintProcessor);
     clusterConfigurationRequest.process();
 
-    verify(blueprint, topology, ambariContext, controller);
+    verify(blueprint, topology, ambariContext, controller, configHelper);
 
   }
 
@@ -357,7 +376,7 @@ public class ClusterConfigurationRequestTest {
   public void testProcessClusterConfigRequestRemoveUnusedConfigTypes() throws Exception {
     // GIVEN
     Configuration configuration = createConfigurations();
-    Set<String> services = new HashSet<String>();
+    Set<String> services = new HashSet<>();
     services.add("HDFS");
     services.add("RANGER");
     Map<String, HostGroupInfo> hostGroupInfoMap = Maps.newHashMap();
@@ -365,6 +384,7 @@ public class ClusterConfigurationRequestTest {
     hg1.setConfiguration(createConfigurationsForHostGroup());
     hostGroupInfoMap.put("hg1", hg1);
 
+    expect(topology.getAmbariContext()).andReturn(ambariContext).anyTimes();
     expect(topology.getConfiguration()).andReturn(configuration).anyTimes();
     expect(topology.getBlueprint()).andReturn(blueprint).anyTimes();
     expect(topology.getHostGroupInfo()).andReturn(hostGroupInfoMap);
@@ -377,7 +397,12 @@ public class ClusterConfigurationRequestTest {
     expect(blueprint.isValidConfigType("cluster-env")).andReturn(true).anyTimes();
     expect(blueprint.isValidConfigType("global")).andReturn(true).anyTimes();
 
-    EasyMock.replay(stack, blueprint, topology);
+    expect(ambariContext.getConfigHelper()).andReturn(configHelper).anyTimes();
+
+    expect(configHelper.getDefaultStackProperties(
+        EasyMock.eq(new StackId(STACK_NAME, STACK_VERSION)))).andReturn(stackProperties).anyTimes();
+
+    EasyMock.replay(stack, blueprint, topology, ambariContext, configHelper);
     // WHEN
     new ClusterConfigurationRequest(ambariContext, topology, false, stackAdvisorBlueprintProcessor);
     // THEN
@@ -388,7 +413,7 @@ public class ClusterConfigurationRequestTest {
 
     assertFalse("SPARK service not present in topology host group config thus 'spark-env' config type should be removed from config.", hg1.getConfiguration().getFullAttributes().containsKey("spark-env"));
     assertTrue("HDFS service is present in topology host group config thus 'hdfs-site' config type should be left in the config.", hg1.getConfiguration().getFullAttributes().containsKey("hdfs-site"));
-    verify(stack, blueprint, topology);
+    verify(stack, blueprint, topology, ambariContext, configHelper);
   }
 
   @Test
@@ -409,6 +434,7 @@ public class ClusterConfigurationRequestTest {
     hg1.setConfiguration(createConfigurationsForHostGroup());
     hostGroupInfoMap.put("hg1", hg1);
 
+    expect(topology.getAmbariContext()).andReturn(ambariContext).anyTimes();
     expect(topology.getConfiguration()).andReturn(configuration).anyTimes();
     expect(topology.getBlueprint()).andReturn(blueprint).anyTimes();
     expect(topology.getHostGroupInfo()).andReturn(hostGroupInfoMap);
@@ -419,7 +445,12 @@ public class ClusterConfigurationRequestTest {
     expect(blueprint.isValidConfigType("cluster-env")).andReturn(true).anyTimes();
     expect(blueprint.isValidConfigType("global")).andReturn(true).anyTimes();
 
-    EasyMock.replay(stack, blueprint, topology);
+    expect(ambariContext.getConfigHelper()).andReturn(configHelper).anyTimes();
+
+    expect(configHelper.getDefaultStackProperties(
+        EasyMock.eq(new StackId(STACK_NAME, STACK_VERSION)))).andReturn(stackProperties).anyTimes();
+
+    EasyMock.replay(stack, blueprint, topology, ambariContext, configHelper);
 
     // When
 
@@ -431,7 +462,7 @@ public class ClusterConfigurationRequestTest {
 
     assertFalse("SPARK service not present in topology host group config thus 'spark-env' config type should be removed from config.", hg1.getConfiguration().getFullAttributes().containsKey("spark-env"));
     assertTrue("HDFS service is present in topology host group config thus 'hdfs-site' config type should be left in the config.", hg1.getConfiguration().getFullAttributes().containsKey("hdfs-site"));
-    verify(stack, blueprint, topology);
+    verify(stack, blueprint, topology, ambariContext, configHelper);
 
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/test/python/common-services/configs/hawq_default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/common-services/configs/hawq_default.json b/ambari-server/src/test/python/common-services/configs/hawq_default.json
index 79864a9..1b6fafb 100644
--- a/ambari-server/src/test/python/common-services/configs/hawq_default.json
+++ b/ambari-server/src/test/python/common-services/configs/hawq_default.json
@@ -73,7 +73,11 @@
         "cluster-env": {
             "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
-            "user_group": "hadoop"
+            "user_group": "hadoop",
+            "stack_name": "PHD",
+            "stack_root": "{\"PHD\": \"/usr/phd\"}",
+            "stack_tools": "{\n \"PHD\": { \"stack_selector\": [\"phd-select\", \"/usr/bin/phd-select\", \"phd-select\"],\n  \"conf_selector\": [\"conf-select\", \"/usr/bin/conf-select\", \"conf-select\"]\n}\n}",
+            "stack_features": "{\"PHD\":{\"stack_features\":[{\"name\":\"express_upgrade\",\"description\":\"Express upgrade support\",\"min_version\":\"3.0.0.0\"},{\"name\":\"rolling_upgrade\",\"description\":\"Rolling upgrade support\",\"min_version\":\"3.0.0.0\"},{\"name\":\"config_versioning\",\"description\":\"Configurable versions support\",\"min_version\":\"3.0.0.0\"}]\n}\n}"
         }
     },
     "clusterHostInfo": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/test/python/host_scripts/TestAlertDiskSpace.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/host_scripts/TestAlertDiskSpace.py b/ambari-server/src/test/python/host_scripts/TestAlertDiskSpace.py
index 0d47061..e6cce98 100644
--- a/ambari-server/src/test/python/host_scripts/TestAlertDiskSpace.py
+++ b/ambari-server/src/test/python/host_scripts/TestAlertDiskSpace.py
@@ -41,7 +41,11 @@ class TestAlertDiskSpace(RMFTestCase):
       total = 21673930752L, used = 5695861760L,
       free = 15978068992L, path="/")
 
-    res = alert_disk_space.execute(configurations={'{{cluster-env/stack_root}}': '/usr/hdp'})
+    configurations = {'{{cluster-env/stack_name}}': 'HDP',
+      '{{cluster-env/stack_root}}': '{"HDP":"/usr/hdp"}'}
+
+    res = alert_disk_space.execute(configurations=configurations)
+
     self.assertEqual(res,
       ('OK', ['Capacity Used: [26.28%, 5.7 GB], Capacity Total: [21.7 GB], path=/']))
 
@@ -50,7 +54,7 @@ class TestAlertDiskSpace(RMFTestCase):
       total = 21673930752L, used = 14521533603L,
       free = 7152397149L, path="/")
 
-    res = alert_disk_space.execute(configurations={'{{cluster-env/stack_root}}': '/usr/hdp'})
+    res = alert_disk_space.execute(configurations = configurations)
     self.assertEqual(res, (
       'WARNING',
       ['Capacity Used: [67.00%, 14.5 GB], Capacity Total: [21.7 GB], path=/']))
@@ -60,7 +64,7 @@ class TestAlertDiskSpace(RMFTestCase):
       total = 21673930752L, used = 20590234214L,
       free = 1083696538, path="/")
 
-    res = alert_disk_space.execute(configurations={'{{cluster-env/stack_root}}': '/usr/hdp'})
+    res = alert_disk_space.execute(configurations = configurations)
     self.assertEqual(res, ('CRITICAL',
     ['Capacity Used: [95.00%, 20.6 GB], Capacity Total: [21.7 GB], path=/']))
 
@@ -69,7 +73,7 @@ class TestAlertDiskSpace(RMFTestCase):
       total = 5418482688L, used = 1625544806L,
       free = 3792937882L, path="/")
 
-    res = alert_disk_space.execute(configurations={'{{cluster-env/stack_root}}': '/usr/hdp'})
+    res = alert_disk_space.execute(configurations = configurations)
     self.assertEqual(res, ('WARNING', [
       'Capacity Used: [30.00%, 1.6 GB], Capacity Total: [5.4 GB], path=/. Total free space is less than 5.0 GB']))
 
@@ -81,7 +85,7 @@ class TestAlertDiskSpace(RMFTestCase):
       total = 21673930752L, used = 5695861760L,
       free = 15978068992L, path="/usr/hdp")
 
-    res = alert_disk_space.execute(configurations={'{{cluster-env/stack_root}}': '/usr/hdp'})
+    res = alert_disk_space.execute(configurations = configurations)
     self.assertEqual(res,
       ('OK', ['Capacity Used: [26.28%, 5.7 GB], Capacity Total: [21.7 GB], path=/usr/hdp']))
 
@@ -90,6 +94,6 @@ class TestAlertDiskSpace(RMFTestCase):
       total = 5418482688L, used = 1625544806L,
       free = 3792937882L, path="/usr/hdp")
 
-    res = alert_disk_space.execute(configurations={'{{cluster-env/stack_root}}': '/usr/hdp'})
+    res = alert_disk_space.execute(configurations = configurations)
     self.assertEqual(res, (
       'WARNING', ["Capacity Used: [30.00%, 1.6 GB], Capacity Total: [5.4 GB], path=/usr/hdp. Total free space is less than 5.0 GB"]))


[03/10] ambari git commit: AMBARI-21430 - Allow Multiple Versions of Stack Tools to Co-Exist (jonathanhurley)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-default.json b/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-default.json
index abe84ab..e5abe32 100644
--- a/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-default.json
+++ b/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-default.json
@@ -1,143 +1,143 @@
 {
     "localComponents": [
-        "NAMENODE", 
-        "SECONDARY_NAMENODE", 
-        "ZOOKEEPER_SERVER", 
-        "DATANODE", 
-        "HDFS_CLIENT", 
-        "ZOOKEEPER_CLIENT", 
-        "RANGER_USERSYNC", 
-        "RANGER_ADMIN", 
+        "NAMENODE",
+        "SECONDARY_NAMENODE",
+        "ZOOKEEPER_SERVER",
+        "DATANODE",
+        "HDFS_CLIENT",
+        "ZOOKEEPER_CLIENT",
+        "RANGER_USERSYNC",
+        "RANGER_ADMIN",
         "RANGER_TAGSYNC",
         "LOGSEARCH_SOLR",
         "LOGSEARCH_SOLR_CLIENT"
-    ], 
+    ],
     "configuration_attributes": {
-        "ranger-hdfs-audit": {}, 
-        "ssl-client": {}, 
-        "ranger-admin-site": {}, 
-        "ranger-hdfs-policymgr-ssl": {}, 
-        "tagsync-application-properties": {}, 
-        "ranger-env": {}, 
-        "usersync-log4j": {}, 
-        "admin-properties": {}, 
-        "ranger-ugsync-site": {}, 
+        "ranger-hdfs-audit": {},
+        "ssl-client": {},
+        "ranger-admin-site": {},
+        "ranger-hdfs-policymgr-ssl": {},
+        "tagsync-application-properties": {},
+        "ranger-env": {},
+        "usersync-log4j": {},
+        "admin-properties": {},
+        "ranger-ugsync-site": {},
         "hdfs-site": {
             "final": {
-                "dfs.datanode.data.dir": "true", 
-                "dfs.namenode.http-address": "true", 
-                "dfs.datanode.failed.volumes.tolerated": "true", 
-                "dfs.support.append": "true", 
-                "dfs.namenode.name.dir": "true", 
+                "dfs.datanode.data.dir": "true",
+                "dfs.namenode.http-address": "true",
+                "dfs.datanode.failed.volumes.tolerated": "true",
+                "dfs.support.append": "true",
+                "dfs.namenode.name.dir": "true",
                 "dfs.webhdfs.enabled": "true"
             }
-        }, 
+        },
         "ranger-tagsync-site": {},
         "ranger-tagsync-policymgr-ssl": {},
         "zoo.cfg": {},
         "hadoop-policy": {},
-        "hdfs-log4j": {}, 
-        "ranger-hdfs-plugin-properties": {}, 
+        "hdfs-log4j": {},
+        "ranger-hdfs-plugin-properties": {},
         "core-site": {
             "final": {
                 "fs.defaultFS": "true"
             }
-        }, 
-        "hadoop-env": {}, 
-        "zookeeper-log4j": {}, 
-        "ssl-server": {}, 
-        "ranger-site": {}, 
-        "admin-log4j": {}, 
-        "tagsync-log4j": {}, 
-        "ranger-hdfs-security": {}, 
+        },
+        "hadoop-env": {},
+        "zookeeper-log4j": {},
+        "ssl-server": {},
+        "ranger-site": {},
+        "admin-log4j": {},
+        "tagsync-log4j": {},
+        "ranger-hdfs-security": {},
         "ranger-solr-configuration": {},
         "usersync-properties": {},
         "zookeeper-env": {},
         "infra-solr-env": {},
         "infra-solr-client-log4j": {},
         "cluster-env": {}
-    }, 
-    "public_hostname": "c6401.ambari.apache.org", 
-    "commandId": "11-0", 
-    "hostname": "c6401.ambari.apache.org", 
-    "kerberosCommandParams": [], 
-    "serviceName": "RANGER", 
-    "role": "RANGER_ADMIN", 
-    "forceRefreshConfigTagsBeforeExecution": [], 
-    "requestId": 11, 
+    },
+    "public_hostname": "c6401.ambari.apache.org",
+    "commandId": "11-0",
+    "hostname": "c6401.ambari.apache.org",
+    "kerberosCommandParams": [],
+    "serviceName": "RANGER",
+    "role": "RANGER_ADMIN",
+    "forceRefreshConfigTagsBeforeExecution": [],
+    "requestId": 11,
     "agentConfigParams": {
         "agent": {
             "parallel_execution": 0
         }
-    }, 
-    "clusterName": "c1", 
-    "commandType": "EXECUTION_COMMAND", 
-    "taskId": 31, 
-    "roleParams": {}, 
+    },
+    "clusterName": "c1",
+    "commandType": "EXECUTION_COMMAND",
+    "taskId": 31,
+    "roleParams": {},
     "configurationTags": {
         "ranger-hdfs-audit": {
             "tag": "version1466705299922"
-        }, 
+        },
         "ssl-client": {
             "tag": "version1"
-        }, 
+        },
         "ranger-admin-site": {
             "tag": "version1466705299949"
-        }, 
+        },
         "ranger-hdfs-policymgr-ssl": {
             "tag": "version1466705299922"
-        }, 
+        },
         "tagsync-application-properties": {
             "tag": "version1466705299949"
-        }, 
+        },
         "ranger-env": {
             "tag": "version1466705299949"
-        }, 
+        },
         "usersync-log4j": {
             "tag": "version1466705299949"
-        }, 
+        },
         "admin-properties": {
             "tag": "version1466705299949"
-        }, 
+        },
         "ranger-ugsync-site": {
             "tag": "version1466705299949"
-        }, 
+        },
         "hdfs-site": {
             "tag": "version1"
-        }, 
+        },
         "ranger-tagsync-site": {
             "tag": "version1466705299949"
-        }, 
+        },
         "zoo.cfg": {
             "tag": "version1"
-        }, 
+        },
         "hadoop-policy": {
             "tag": "version1"
-        }, 
+        },
         "hdfs-log4j": {
             "tag": "version1"
-        }, 
+        },
         "ranger-hdfs-plugin-properties": {
             "tag": "version1466705299922"
-        }, 
+        },
         "core-site": {
             "tag": "version1"
-        }, 
+        },
         "hadoop-env": {
             "tag": "version1"
-        }, 
+        },
         "zookeeper-log4j": {
             "tag": "version1"
-        }, 
+        },
         "ssl-server": {
             "tag": "version1"
-        }, 
+        },
         "ranger-site": {
             "tag": "version1466705299949"
-        }, 
+        },
         "admin-log4j": {
             "tag": "version1466705299949"
-        }, 
+        },
         "tagsync-log4j": {
             "tag": "version1466705299949"
         },
@@ -146,7 +146,7 @@
         },
         "ranger-hdfs-security": {
             "tag": "version1466705299922"
-        }, 
+        },
         "usersync-properties": {
             "tag": "version1466705299949"
         },
@@ -165,116 +165,116 @@
         "cluster-env": {
             "tag": "version1"
         }
-    }, 
-    "roleCommand": "START", 
+    },
+    "roleCommand": "START",
     "hostLevelParams": {
-        "agent_stack_retry_on_unavailability": "false", 
-        "stack_name": "HDP", 
+        "agent_stack_retry_on_unavailability": "false",
+        "stack_name": "HDP",
         "package_version": "2_6_0_0_*",
         "custom_mysql_jdbc_name": "mysql-connector-java.jar",
         "previous_custom_mysql_jdbc_name": "mysql-connector-java-old.jar",
-        "host_sys_prepped": "false", 
-        "ambari_db_rca_username": "mapred", 
+        "host_sys_prepped": "false",
+        "ambari_db_rca_username": "mapred",
         "current_version": "2.6.0.0-801",
         "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar",
         "agent_stack_retry_count": "5",
         "stack_version": "2.6",
-        "jdk_name": "jdk-8u60-linux-x64.tar.gz", 
-        "ambari_db_rca_driver": "org.postgresql.Driver", 
-        "java_home": "/usr/jdk64/jdk1.7.0_45", 
-        "repository_version_id": "1", 
-        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
-        "not_managed_hdfs_path_list": "[\"/tmp\"]", 
-        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
-        "java_version": "8", 
+        "jdk_name": "jdk-8u60-linux-x64.tar.gz",
+        "ambari_db_rca_driver": "org.postgresql.Driver",
+        "java_home": "/usr/jdk64/jdk1.7.0_45",
+        "repository_version_id": "1",
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
+        "not_managed_hdfs_path_list": "[\"/tmp\"]",
+        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
+        "java_version": "8",
         "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.6.0.0-801\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.6\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.6.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.6.0.0-801\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]",
         "package_list": "[{\"name\":\"ranger_${stack_version}-admin\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-usersync\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-tagsync\",\"condition\":\"should_install_ranger_tagsync\",\"skipUpgrade\":false},{\"name\":\"ambari-logsearch-solr-client\",\"condition\":\"should_install_logsearch_solr_client\",\"skipUpgrade\":false}]",
-        "db_name": "ambari", 
-        "group_list": "[\"ranger\",\"hadoop\",\"users\"]", 
-        "agentCacheDir": "/var/lib/ambari-agent/cache", 
-        "ambari_db_rca_password": "mapred", 
-        "jce_name": "jce_policy-8.zip", 
-        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar", 
-        "db_driver_filename": "mysql-connector-java.jar", 
-        "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]", 
+        "db_name": "ambari",
+        "group_list": "[\"ranger\",\"hadoop\",\"users\"]",
+        "agentCacheDir": "/var/lib/ambari-agent/cache",
+        "ambari_db_rca_password": "mapred",
+        "jce_name": "jce_policy-8.zip",
+        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
+        "db_driver_filename": "mysql-connector-java.jar",
+        "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]",
         "clientsToUpdateConfigs": "[\"*\"]"
-    }, 
+    },
     "commandParams": {
-        "service_package_folder": "common-services/RANGER/0.4.0/package", 
-        "script": "scripts/ranger_admin.py", 
+        "service_package_folder": "common-services/RANGER/0.4.0/package",
+        "script": "scripts/ranger_admin.py",
         "hooks_folder": "HDP/2.0.6/hooks",
         "version": "2.6.0.0-801",
-        "max_duration_for_retries": "0", 
-        "command_retry_enabled": "false", 
-        "command_timeout": "600", 
+        "max_duration_for_retries": "0",
+        "command_retry_enabled": "false",
+        "command_timeout": "600",
         "script_type": "PYTHON"
-    }, 
-    "forceRefreshConfigTags": [], 
-    "stageId": 0, 
+    },
+    "forceRefreshConfigTags": [],
+    "stageId": 0,
     "clusterHostInfo": {
         "snamenode_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ambari_server_use_ssl": [
             "false"
-        ], 
+        ],
         "all_ping_ports": [
             "8670"
-        ], 
+        ],
         "ranger_tagsync_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ranger_usersync_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "all_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "slave_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "namenode_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ambari_server_port": [
             "8080"
-        ], 
+        ],
         "ranger_admin_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "all_racks": [
             "/default-rack"
-        ], 
+        ],
         "all_ipv4_ips": [
             "172.22.83.73"
-        ], 
+        ],
         "ambari_server_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "zookeeper_hosts": [
             "c6401.ambari.apache.org"
         ],
         "infra_solr_hosts": [
             "c6401.ambari.apache.org"
         ]
-    }, 
+    },
     "configurations": {
         "ranger-hdfs-audit": {
-            "xasecure.audit.destination.solr.zookeepers": "NONE", 
-            "xasecure.audit.destination.solr.urls": "", 
-            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool", 
-            "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool", 
+            "xasecure.audit.destination.solr.zookeepers": "NONE",
+            "xasecure.audit.destination.solr.urls": "",
+            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool",
+            "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool",
             "xasecure.audit.destination.hdfs": "true",
-            "xasecure.audit.destination.solr": "false", 
+            "xasecure.audit.destination.solr": "false",
             "xasecure.audit.provider.summary.enabled": "false",
             "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
             "xasecure.audit.is.enabled": "true"
         },
         "ranger-tagsync-policymgr-ssl": {
-            "xasecure.policymgr.clientssl.keystore": "/etc/security/serverKeys/ranger-tagsync-keystore.jks", 
-            "xasecure.policymgr.clientssl.truststore.password": "changeit", 
+            "xasecure.policymgr.clientssl.keystore": "/etc/security/serverKeys/ranger-tagsync-keystore.jks",
+            "xasecure.policymgr.clientssl.truststore.password": "changeit",
             "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{ranger_tagsync_credential_file}}",
-            "xasecure.policymgr.clientssl.truststore": "/etc/security/serverKeys/ranger-tagsync-mytruststore.jks", 
+            "xasecure.policymgr.clientssl.truststore": "/etc/security/serverKeys/ranger-tagsync-mytruststore.jks",
             "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{ranger_tagsync_credential_file}}",
             "xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
         },
@@ -287,143 +287,143 @@
             "xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
         },
         "ssl-client": {
-            "ssl.client.truststore.reload.interval": "10000", 
-            "ssl.client.keystore.password": "bigdata", 
-            "ssl.client.truststore.type": "jks", 
-            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks", 
-            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks", 
-            "ssl.client.truststore.password": "bigdata", 
+            "ssl.client.truststore.reload.interval": "10000",
+            "ssl.client.keystore.password": "bigdata",
+            "ssl.client.truststore.type": "jks",
+            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks",
+            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks",
+            "ssl.client.truststore.password": "bigdata",
             "ssl.client.keystore.type": "jks"
-        }, 
+        },
         "ranger-admin-site": {
             "ranger.admin.kerberos.cookie.domain": "",
-            "ranger.kms.service.user.hdfs": "hdfs", 
-            "ranger.spnego.kerberos.principal": "", 
-            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}", 
-            "ranger.plugins.hive.serviceuser": "hive", 
-            "ranger.lookup.kerberos.keytab": "", 
-            "ranger.plugins.kms.serviceuser": "kms", 
-            "ranger.service.https.attrib.ssl.enabled": "false", 
-            "ranger.sso.browser.useragent": "Mozilla,chrome", 
-            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01", 
-            "ranger.plugins.hbase.serviceuser": "hbase", 
-            "ranger.plugins.hdfs.serviceuser": "hdfs", 
-            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}", 
-            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net", 
-            "ranger.plugins.knox.serviceuser": "knox", 
-            "ranger.ldap.base.dn": "dc=example,dc=com", 
-            "ranger.sso.publicKey": "", 
-            "ranger.admin.kerberos.cookie.path": "/", 
-            "ranger.service.https.attrib.clientAuth": "want", 
-            "ranger.jpa.jdbc.user": "{{ranger_db_user}}", 
-            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})", 
-            "ranger.ldap.group.roleattribute": "cn", 
-            "ranger.plugins.kafka.serviceuser": "kafka", 
-            "ranger.admin.kerberos.principal": "", 
-            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
-            "ranger.credential.provider.path": "/etc/ranger/admin/rangeradmin.jceks", 
-            "ranger.ldap.referral": "ignore", 
-            "ranger.service.http.port": "6080", 
-            "ranger.ldap.user.searchfilter": "(uid={0})", 
-            "ranger.plugins.atlas.serviceuser": "atlas", 
+            "ranger.kms.service.user.hdfs": "hdfs",
+            "ranger.spnego.kerberos.principal": "",
+            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}",
+            "ranger.plugins.hive.serviceuser": "hive",
+            "ranger.lookup.kerberos.keytab": "",
+            "ranger.plugins.kms.serviceuser": "kms",
+            "ranger.service.https.attrib.ssl.enabled": "false",
+            "ranger.sso.browser.useragent": "Mozilla,chrome",
+            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01",
+            "ranger.plugins.hbase.serviceuser": "hbase",
+            "ranger.plugins.hdfs.serviceuser": "hdfs",
+            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}",
+            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net",
+            "ranger.plugins.knox.serviceuser": "knox",
+            "ranger.ldap.base.dn": "dc=example,dc=com",
+            "ranger.sso.publicKey": "",
+            "ranger.admin.kerberos.cookie.path": "/",
+            "ranger.service.https.attrib.clientAuth": "want",
+            "ranger.jpa.jdbc.user": "{{ranger_db_user}}",
+            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})",
+            "ranger.ldap.group.roleattribute": "cn",
+            "ranger.plugins.kafka.serviceuser": "kafka",
+            "ranger.admin.kerberos.principal": "",
+            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
+            "ranger.credential.provider.path": "/etc/ranger/admin/rangeradmin.jceks",
+            "ranger.ldap.referral": "ignore",
+            "ranger.service.http.port": "6080",
+            "ranger.ldap.user.searchfilter": "(uid={0})",
+            "ranger.plugins.atlas.serviceuser": "atlas",
             "ranger.truststore.password": "changeit",
             "ranger.truststore.alias": "trustStoreAlias",
-            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
-            "ranger.audit.solr.password": "NONE", 
+            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+            "ranger.audit.solr.password": "NONE",
             "ranger.audit.solr.zookeepers": "c6401.ambari.apache.org:2181/infra-solr",
-            "ranger.lookup.kerberos.principal": "", 
-            "ranger.service.https.port": "6182", 
-            "ranger.plugins.storm.serviceuser": "storm", 
-            "ranger.externalurl": "{{ranger_external_url}}", 
-            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
-            "ranger.kms.service.user.hive": "", 
-            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
-            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}", 
-            "ranger.service.host": "{{ranger_host}}", 
-            "ranger.service.https.attrib.keystore.keyalias": "rangeradmin", 
-            "ranger.service.https.attrib.keystore.pass": "xasecure", 
-            "ranger.unixauth.remote.login.enabled": "true", 
-            "ranger.jpa.jdbc.credential.alias": "rangeradmin", 
-            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
-            "ranger.audit.solr.username": "ranger_solr", 
-            "ranger.sso.enabled": "false", 
-            "ranger.audit.solr.urls": "", 
-            "ranger.ldap.ad.domain": "", 
-            "ranger.plugins.yarn.serviceuser": "yarn", 
-            "ranger.audit.source.type": "solr", 
-            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
-            "ranger.ldap.url": "{{ranger_ug_ldap_url}}", 
-            "ranger.authentication.method": "UNIX", 
-            "ranger.service.http.enabled": "true", 
-            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}", 
-            "ranger.ldap.ad.referral": "ignore", 
-            "ranger.ldap.ad.base.dn": "dc=example,dc=com", 
-            "ranger.jpa.jdbc.password": "_", 
-            "ranger.spnego.kerberos.keytab": "", 
-            "ranger.sso.providerurl": "", 
-            "ranger.unixauth.service.hostname": "{{ugsync_host}}", 
-            "ranger.admin.kerberos.keytab": "", 
-            "ranger.admin.kerberos.token.valid.seconds": "30", 
-            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver", 
+            "ranger.lookup.kerberos.principal": "",
+            "ranger.service.https.port": "6182",
+            "ranger.plugins.storm.serviceuser": "storm",
+            "ranger.externalurl": "{{ranger_external_url}}",
+            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+            "ranger.kms.service.user.hive": "",
+            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}",
+            "ranger.service.host": "{{ranger_host}}",
+            "ranger.service.https.attrib.keystore.keyalias": "rangeradmin",
+            "ranger.service.https.attrib.keystore.pass": "xasecure",
+            "ranger.unixauth.remote.login.enabled": "true",
+            "ranger.jpa.jdbc.credential.alias": "rangeradmin",
+            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+            "ranger.audit.solr.username": "ranger_solr",
+            "ranger.sso.enabled": "false",
+            "ranger.audit.solr.urls": "",
+            "ranger.ldap.ad.domain": "",
+            "ranger.plugins.yarn.serviceuser": "yarn",
+            "ranger.audit.source.type": "solr",
+            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
+            "ranger.ldap.url": "{{ranger_ug_ldap_url}}",
+            "ranger.authentication.method": "UNIX",
+            "ranger.service.http.enabled": "true",
+            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}",
+            "ranger.ldap.ad.referral": "ignore",
+            "ranger.ldap.ad.base.dn": "dc=example,dc=com",
+            "ranger.jpa.jdbc.password": "_",
+            "ranger.spnego.kerberos.keytab": "",
+            "ranger.sso.providerurl": "",
+            "ranger.unixauth.service.hostname": "{{ugsync_host}}",
+            "ranger.admin.kerberos.keytab": "",
+            "ranger.admin.kerberos.token.valid.seconds": "30",
+            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver",
             "ranger.unixauth.service.port": "5151",
             "ranger.service.https.attrib.keystore.credential.alias": "keyStoreCredentialAlias"
-        }, 
+        },
         "ranger-hdfs-policymgr-ssl": {
-            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks", 
-            "xasecure.policymgr.clientssl.truststore.password": "changeit", 
-            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}", 
-            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks", 
-            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}", 
+            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks",
+            "xasecure.policymgr.clientssl.truststore.password": "changeit",
+            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}",
+            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks",
+            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}",
             "xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
-        }, 
+        },
         "tagsync-application-properties": {
-            "atlas.kafka.entities.group.id": "ranger_entities_consumer", 
-            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181", 
+            "atlas.kafka.entities.group.id": "ranger_entities_consumer",
+            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181",
             "atlas.kafka.bootstrap.servers": "localhost:6667"
-        }, 
+        },
         "ranger-env": {
-            "ranger_solr_shards": "1", 
-            "ranger_solr_config_set": "ranger_audits", 
-            "ranger_user": "ranger", 
+            "ranger_solr_shards": "1",
+            "ranger_solr_config_set": "ranger_audits",
+            "ranger_user": "ranger",
             "ranger_solr_replication_factor": "1",
-            "xml_configurations_supported": "true", 
-            "ranger-atlas-plugin-enabled": "No", 
-            "ranger-hbase-plugin-enabled": "No", 
-            "ranger-yarn-plugin-enabled": "No", 
-            "bind_anonymous": "false", 
-            "ranger_admin_username": "amb_ranger_admin", 
-            "admin_password": "admin", 
-            "is_solrCloud_enabled": "true", 
-            "ranger-storm-plugin-enabled": "No", 
-            "ranger-hdfs-plugin-enabled": "No", 
-            "ranger_group": "ranger", 
-            "ranger-knox-plugin-enabled": "No", 
-            "ranger_admin_log_dir": "/var/log/ranger/admin", 
-            "ranger-kafka-plugin-enabled": "No", 
-            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306", 
-            "ranger-hive-plugin-enabled": "No", 
-            "xasecure.audit.destination.solr": "true", 
-            "ranger_pid_dir": "/var/run/ranger", 
-            "xasecure.audit.destination.hdfs": "true", 
-            "admin_username": "admin", 
-            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit", 
-            "create_db_dbuser": "true", 
-            "ranger_solr_collection_name": "ranger_audits", 
-            "ranger_admin_password": "P1!q9xa96SMi5NCl", 
+            "xml_configurations_supported": "true",
+            "ranger-atlas-plugin-enabled": "No",
+            "ranger-hbase-plugin-enabled": "No",
+            "ranger-yarn-plugin-enabled": "No",
+            "bind_anonymous": "false",
+            "ranger_admin_username": "amb_ranger_admin",
+            "admin_password": "admin",
+            "is_solrCloud_enabled": "true",
+            "ranger-storm-plugin-enabled": "No",
+            "ranger-hdfs-plugin-enabled": "No",
+            "ranger_group": "ranger",
+            "ranger-knox-plugin-enabled": "No",
+            "ranger_admin_log_dir": "/var/log/ranger/admin",
+            "ranger-kafka-plugin-enabled": "No",
+            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306",
+            "ranger-hive-plugin-enabled": "No",
+            "xasecure.audit.destination.solr": "true",
+            "ranger_pid_dir": "/var/run/ranger",
+            "xasecure.audit.destination.hdfs": "true",
+            "admin_username": "admin",
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
+            "create_db_dbuser": "true",
+            "ranger_solr_collection_name": "ranger_audits",
+            "ranger_admin_password": "P1!q9xa96SMi5NCl",
             "ranger_usersync_log_dir": "/var/log/ranger/usersync"
-        }, 
+        },
         "usersync-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/usersync.
 log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n"
-        }, 
+        },
         "admin-properties": {
-            "db_user": "rangeradmin01", 
-            "DB_FLAVOR": "MYSQL", 
-            "db_password": "rangeradmin01", 
-            "db_root_user": "root", 
-            "policymgr_external_url": "http://c6401.ambari.apache.org:6080", 
-            "db_name": "ranger01", 
-            "db_host": "c6401.ambari.apache.org", 
-            "db_root_password": "vagrant", 
+            "db_user": "rangeradmin01",
+            "DB_FLAVOR": "MYSQL",
+            "db_password": "rangeradmin01",
+            "db_root_user": "root",
+            "policymgr_external_url": "http://c6401.ambari.apache.org:6080",
+            "db_name": "ranger01",
+            "db_host": "c6401.ambari.apache.org",
+            "db_root_password": "vagrant",
             "SQL_CONNECTOR_JAR": "{{driver_curl_target}}"
         },
         "ranger-solr-configuration": {
@@ -432,248 +432,248 @@
             "ranger_audit_logs_merge_factor": "5"
         },
         "ranger-ugsync-site": {
-            "ranger.usersync.ldap.binddn": "", 
-            "ranger.usersync.policymgr.username": "rangerusersync", 
-            "ranger.usersync.policymanager.mockrun": "false", 
-            "ranger.usersync.group.searchbase": "", 
-            "ranger.usersync.ldap.bindalias": "testldapalias", 
-            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks", 
-            "ranger.usersync.port": "5151", 
-            "ranger.usersync.pagedresultssize": "500", 
-            "ranger.usersync.group.memberattributename": "", 
-            "ranger.usersync.kerberos.principal": "", 
-            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder", 
-            "ranger.usersync.ldap.referral": "ignore", 
-            "ranger.usersync.group.searchfilter": "", 
-            "ranger.usersync.ldap.user.objectclass": "person", 
-            "ranger.usersync.logdir": "{{usersync_log_dir}}", 
-            "ranger.usersync.ldap.user.searchfilter": "", 
-            "ranger.usersync.ldap.groupname.caseconversion": "none", 
-            "ranger.usersync.ldap.ldapbindpassword": "", 
-            "ranger.usersync.unix.minUserId": "500", 
-            "ranger.usersync.policymanager.maxrecordsperapicall": "1000", 
-            "ranger.usersync.group.nameattribute": "", 
-            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password", 
-            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks", 
-            "ranger.usersync.user.searchenabled": "false", 
-            "ranger.usersync.group.usermapsyncenabled": "true", 
-            "ranger.usersync.ldap.bindkeystore": "", 
-            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof", 
-            "ranger.usersync.kerberos.keytab": "", 
-            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe", 
-            "ranger.usersync.group.objectclass": "", 
-            "ranger.usersync.ldap.user.searchscope": "sub", 
-            "ranger.usersync.unix.password.file": "/etc/passwd", 
-            "ranger.usersync.ldap.user.nameattribute": "", 
-            "ranger.usersync.pagedresultsenabled": "true", 
-            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}", 
-            "ranger.usersync.group.search.first.enabled": "false", 
-            "ranger.usersync.group.searchenabled": "false", 
-            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder", 
-            "ranger.usersync.ssl": "true", 
-            "ranger.usersync.ldap.url": "", 
-            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org", 
-            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
-            "ranger.usersync.ldap.user.searchbase": "", 
-            "ranger.usersync.ldap.username.caseconversion": "none", 
-            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
-            "ranger.usersync.keystore.password": "UnIx529p", 
-            "ranger.usersync.unix.group.file": "/etc/group", 
-            "ranger.usersync.filesource.file": "/tmp/usergroup.txt", 
-            "ranger.usersync.group.searchscope": "", 
-            "ranger.usersync.truststore.password": "changeit", 
-            "ranger.usersync.enabled": "true", 
-            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000", 
+            "ranger.usersync.ldap.binddn": "",
+            "ranger.usersync.policymgr.username": "rangerusersync",
+            "ranger.usersync.policymanager.mockrun": "false",
+            "ranger.usersync.group.searchbase": "",
+            "ranger.usersync.ldap.bindalias": "testldapalias",
+            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks",
+            "ranger.usersync.port": "5151",
+            "ranger.usersync.pagedresultssize": "500",
+            "ranger.usersync.group.memberattributename": "",
+            "ranger.usersync.kerberos.principal": "",
+            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder",
+            "ranger.usersync.ldap.referral": "ignore",
+            "ranger.usersync.group.searchfilter": "",
+            "ranger.usersync.ldap.user.objectclass": "person",
+            "ranger.usersync.logdir": "{{usersync_log_dir}}",
+            "ranger.usersync.ldap.user.searchfilter": "",
+            "ranger.usersync.ldap.groupname.caseconversion": "none",
+            "ranger.usersync.ldap.ldapbindpassword": "",
+            "ranger.usersync.unix.minUserId": "500",
+            "ranger.usersync.policymanager.maxrecordsperapicall": "1000",
+            "ranger.usersync.group.nameattribute": "",
+            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password",
+            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks",
+            "ranger.usersync.user.searchenabled": "false",
+            "ranger.usersync.group.usermapsyncenabled": "true",
+            "ranger.usersync.ldap.bindkeystore": "",
+            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof",
+            "ranger.usersync.kerberos.keytab": "",
+            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe",
+            "ranger.usersync.group.objectclass": "",
+            "ranger.usersync.ldap.user.searchscope": "sub",
+            "ranger.usersync.unix.password.file": "/etc/passwd",
+            "ranger.usersync.ldap.user.nameattribute": "",
+            "ranger.usersync.pagedresultsenabled": "true",
+            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}",
+            "ranger.usersync.group.search.first.enabled": "false",
+            "ranger.usersync.group.searchenabled": "false",
+            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder",
+            "ranger.usersync.ssl": "true",
+            "ranger.usersync.ldap.url": "",
+            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org",
+            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+            "ranger.usersync.ldap.user.searchbase": "",
+            "ranger.usersync.ldap.username.caseconversion": "none",
+            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+            "ranger.usersync.keystore.password": "UnIx529p",
+            "ranger.usersync.unix.group.file": "/etc/group",
+            "ranger.usersync.filesource.file": "/tmp/usergroup.txt",
+            "ranger.usersync.group.searchscope": "",
+            "ranger.usersync.truststore.password": "changeit",
+            "ranger.usersync.enabled": "true",
+            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000",
             "ranger.usersync.filesource.text.delimiter": ","
-        }, 
+        },
         "hdfs-site": {
-            "dfs.namenode.checkpoint.period": "21600", 
-            "dfs.namenode.avoid.write.stale.datanode": "true", 
-            "dfs.namenode.startup.delay.block.deletion.sec": "3600", 
-            "dfs.namenode.checkpoint.txns": "1000000", 
-            "dfs.content-summary.limit": "5000", 
-            "dfs.support.append": "true", 
-            "dfs.datanode.address": "0.0.0.0:50010", 
-            "dfs.cluster.administrators": " hdfs", 
-            "dfs.namenode.audit.log.async": "true", 
-            "dfs.datanode.balance.bandwidthPerSec": "6250000", 
-            "dfs.namenode.safemode.threshold-pct": "1", 
-            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", 
-            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020", 
-            "dfs.permissions.enabled": "true", 
-            "dfs.client.read.shortcircuit": "true", 
-            "dfs.https.port": "50470", 
-            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470", 
-            "nfs.file.dump.dir": "/tmp/.hdfs-nfs", 
-            "dfs.blocksize": "134217728", 
-            "dfs.blockreport.initialDelay": "120", 
-            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode", 
-            "dfs.namenode.fslock.fair": "false", 
-            "dfs.datanode.max.transfer.threads": "4096", 
-            "dfs.heartbeat.interval": "3", 
-            "dfs.replication": "3", 
-            "dfs.namenode.handler.count": "50", 
-            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary", 
-            "fs.permissions.umask-mode": "022", 
-            "dfs.namenode.stale.datanode.interval": "30000", 
-            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
-            "dfs.datanode.failed.volumes.tolerated": "0", 
-            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data", 
-            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070", 
-            "dfs.webhdfs.enabled": "true", 
-            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding", 
-            "dfs.namenode.accesstime.precision": "0", 
-            "dfs.datanode.https.address": "0.0.0.0:50475", 
-            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
-            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090", 
-            "nfs.exports.allowed.hosts": "* rw", 
-            "dfs.datanode.http.address": "0.0.0.0:50075", 
-            "dfs.datanode.du.reserved": "33011188224", 
-            "dfs.client.read.shortcircuit.streams.cache.size": "4096", 
-            "dfs.http.policy": "HTTP_ONLY", 
-            "dfs.block.access.token.enable": "true", 
-            "dfs.client.retry.policy.enabled": "false", 
-            "dfs.namenode.name.dir.restore": "true", 
-            "dfs.permissions.superusergroup": "hdfs", 
-            "dfs.journalnode.https-address": "0.0.0.0:8481", 
-            "dfs.journalnode.http-address": "0.0.0.0:8480", 
-            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
-            "dfs.namenode.avoid.read.stale.datanode": "true", 
-            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
-            "dfs.datanode.data.dir.perm": "750", 
-            "dfs.encryption.key.provider.uri": "", 
-            "dfs.replication.max": "50", 
+            "dfs.namenode.checkpoint.period": "21600",
+            "dfs.namenode.avoid.write.stale.datanode": "true",
+            "dfs.namenode.startup.delay.block.deletion.sec": "3600",
+            "dfs.namenode.checkpoint.txns": "1000000",
+            "dfs.content-summary.limit": "5000",
+            "dfs.support.append": "true",
+            "dfs.datanode.address": "0.0.0.0:50010",
+            "dfs.cluster.administrators": " hdfs",
+            "dfs.namenode.audit.log.async": "true",
+            "dfs.datanode.balance.bandwidthPerSec": "6250000",
+            "dfs.namenode.safemode.threshold-pct": "1",
+            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
+            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020",
+            "dfs.permissions.enabled": "true",
+            "dfs.client.read.shortcircuit": "true",
+            "dfs.https.port": "50470",
+            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470",
+            "nfs.file.dump.dir": "/tmp/.hdfs-nfs",
+            "dfs.blocksize": "134217728",
+            "dfs.blockreport.initialDelay": "120",
+            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode",
+            "dfs.namenode.fslock.fair": "false",
+            "dfs.datanode.max.transfer.threads": "4096",
+            "dfs.heartbeat.interval": "3",
+            "dfs.replication": "3",
+            "dfs.namenode.handler.count": "50",
+            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary",
+            "fs.permissions.umask-mode": "022",
+            "dfs.namenode.stale.datanode.interval": "30000",
+            "dfs.datanode.ipc.address": "0.0.0.0:8010",
+            "dfs.datanode.failed.volumes.tolerated": "0",
+            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data",
+            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070",
+            "dfs.webhdfs.enabled": "true",
+            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding",
+            "dfs.namenode.accesstime.precision": "0",
+            "dfs.datanode.https.address": "0.0.0.0:50475",
+            "dfs.namenode.write.stale.datanode.ratio": "1.0f",
+            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090",
+            "nfs.exports.allowed.hosts": "* rw",
+            "dfs.datanode.http.address": "0.0.0.0:50075",
+            "dfs.datanode.du.reserved": "33011188224",
+            "dfs.client.read.shortcircuit.streams.cache.size": "4096",
+            "dfs.http.policy": "HTTP_ONLY",
+            "dfs.block.access.token.enable": "true",
+            "dfs.client.retry.policy.enabled": "false",
+            "dfs.namenode.name.dir.restore": "true",
+            "dfs.permissions.superusergroup": "hdfs",
+            "dfs.journalnode.https-address": "0.0.0.0:8481",
+            "dfs.journalnode.http-address": "0.0.0.0:8480",
+            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
+            "dfs.namenode.avoid.read.stale.datanode": "true",
+            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
+            "dfs.datanode.data.dir.perm": "750",
+            "dfs.encryption.key.provider.uri": "",
+            "dfs.replication.max": "50",
             "dfs.namenode.name.dir": "/grid/0/hadoop/hdfs/namenode"
-        }, 
+        },
         "ranger-tagsync-site": {
             "ranger.tagsync.dest.ranger.ssl.config.filename": "{{stack_root}}/current/ranger-tagsync/conf/ranger-policymgr-ssl.xml",
             "ranger.tagsync.source.atlasrest.username": "",
-            "ranger.tagsync.logdir": "/var/log/ranger/tagsync", 
-            "ranger.tagsync.source.atlasrest.download.interval.millis": "", 
-            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks", 
-            "ranger.tagsync.source.file.check.interval.millis": "", 
-            "ranger.tagsync.source.atlasrest.endpoint": "", 
-            "ranger.tagsync.dest.ranger.username": "rangertagsync", 
-            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}", 
-            "ranger.tagsync.kerberos.principal": "", 
-            "ranger.tagsync.kerberos.keytab": "", 
-            "ranger.tagsync.source.atlas": "false", 
-            "ranger.tagsync.source.atlasrest": "false", 
-            "ranger.tagsync.source.file": "false", 
+            "ranger.tagsync.logdir": "/var/log/ranger/tagsync",
+            "ranger.tagsync.source.atlasrest.download.interval.millis": "",
+            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks",
+            "ranger.tagsync.source.file.check.interval.millis": "",
+            "ranger.tagsync.source.atlasrest.endpoint": "",
+            "ranger.tagsync.dest.ranger.username": "rangertagsync",
+            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}",
+            "ranger.tagsync.kerberos.principal": "",
+            "ranger.tagsync.kerberos.keytab": "",
+            "ranger.tagsync.source.atlas": "false",
+            "ranger.tagsync.source.atlasrest": "false",
+            "ranger.tagsync.source.file": "false",
             "ranger.tagsync.source.file.filename": ""
-        }, 
+        },
         "zoo.cfg": {
-            "clientPort": "2181", 
-            "autopurge.purgeInterval": "24", 
-            "syncLimit": "5", 
-            "dataDir": "/grid/0/hadoop/zookeeper", 
-            "initLimit": "10", 
-            "tickTime": "2000", 
+            "clientPort": "2181",
+            "autopurge.purgeInterval": "24",
+            "syncLimit": "5",
+            "dataDir": "/grid/0/hadoop/zookeeper",
+            "initLimit": "10",
+            "tickTime": "2000",
             "autopurge.snapRetainCount": "30"
-        }, 
+        },
         "hadoop-policy": {
-            "security.job.client.protocol.acl": "*", 
-            "security.job.task.protocol.acl": "*", 
-            "security.datanode.protocol.acl": "*", 
-            "security.namenode.protocol.acl": "*", 
-            "security.client.datanode.protocol.acl": "*", 
-            "security.inter.tracker.protocol.acl": "*", 
-            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop", 
-            "security.client.protocol.acl": "*", 
-            "security.refresh.policy.protocol.acl": "hadoop", 
-            "security.admin.operations.protocol.acl": "hadoop", 
+            "security.job.client.protocol.acl": "*",
+            "security.job.task.protocol.acl": "*",
+            "security.datanode.protocol.acl": "*",
+            "security.namenode.protocol.acl": "*",
+            "security.client.datanode.protocol.acl": "*",
+            "security.inter.tracker.protocol.acl": "*",
+            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
+            "security.client.protocol.acl": "*",
+            "security.refresh.policy.protocol.acl": "hadoop",
+            "security.admin.operations.protocol.acl": "hadoop",
             "security.inter.datanode.protocol.acl": "*"
-        }, 
+        },
         "hdfs-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.lo
 gger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender
 .console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logg
 er}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=
 false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# NameNode metrics logging.\n# The default is to retain two namenode-metrics.log files up to 64MB each.\n#\nnamenode.metrics.logger=INFO,NullAppender\nlog4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}\nlog4j.additivity.NameNodeMetricsLog=false\nlog4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log\nlog4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n\nlog4j.appender.NNMETRICSRFA.MaxBackupIndex=1\nlog4j.appender.NNMETRICSRFA.MaxFileSize=64MB\n\n#\n# mapred audit logging\n#\nmapred
 .audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging leve
 ls\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateCh
 ange=WARN"
-        }, 
+        },
         "ranger-hdfs-plugin-properties": {
-            "hadoop.rpc.protection": "authentication", 
-            "ranger-hdfs-plugin-enabled": "No", 
-            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
-            "policy_user": "ambari-qa", 
-            "common.name.for.certificate": "", 
+            "hadoop.rpc.protection": "authentication",
+            "ranger-hdfs-plugin-enabled": "No",
+            "REPOSITORY_CONFIG_USERNAME": "hadoop",
+            "policy_user": "ambari-qa",
+            "common.name.for.certificate": "",
             "REPOSITORY_CONFIG_PASSWORD": "hadoop"
-        }, 
+        },
         "core-site": {
-            "hadoop.proxyuser.root.hosts": "c6401.ambari.apache.org", 
-            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", 
-            "fs.trash.interval": "360", 
-            "ha.failover-controller.active-standby-elector.zk.op.retries": "120", 
-            "hadoop.http.authentication.simple.anonymous.allowed": "true", 
-            "hadoop.security.authentication": "simple", 
-            "hadoop.proxyuser.root.groups": "*", 
-            "ipc.client.connection.maxidletime": "30000", 
-            "hadoop.security.key.provider.path": "", 
-            "mapreduce.jobtracker.webinterface.trusted": "false", 
-            "hadoop.security.authorization": "false", 
-            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py", 
-            "ipc.server.tcpnodelay": "true", 
-            "ipc.client.connect.max.retries": "50", 
-            "hadoop.security.auth_to_local": "DEFAULT", 
-            "io.file.buffer.size": "131072", 
-            "hadoop.proxyuser.hdfs.hosts": "*", 
-            "hadoop.proxyuser.hdfs.groups": "*", 
-            "ipc.client.idlethreshold": "8000", 
-            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020", 
+            "hadoop.proxyuser.root.hosts": "c6401.ambari.apache.org",
+            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
+            "fs.trash.interval": "360",
+            "ha.failover-controller.active-standby-elector.zk.op.retries": "120",
+            "hadoop.http.authentication.simple.anonymous.allowed": "true",
+            "hadoop.security.authentication": "simple",
+            "hadoop.proxyuser.root.groups": "*",
+            "ipc.client.connection.maxidletime": "30000",
+            "hadoop.security.key.provider.path": "",
+            "mapreduce.jobtracker.webinterface.trusted": "false",
+            "hadoop.security.authorization": "false",
+            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py",
+            "ipc.server.tcpnodelay": "true",
+            "ipc.client.connect.max.retries": "50",
+            "hadoop.security.auth_to_local": "DEFAULT",
+            "io.file.buffer.size": "131072",
+            "hadoop.proxyuser.hdfs.hosts": "*",
+            "hadoop.proxyuser.hdfs.groups": "*",
+            "ipc.client.idlethreshold": "8000",
+            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020",
             "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec"
-        }, 
+        },
         "hadoop-env": {
-            "keyserver_port": "", 
-            "proxyuser_group": "users", 
-            "hdfs_user_nproc_limit": "65536", 
-            "hdfs_log_dir_prefix": "/var/log/hadoop", 
-            "hdfs_user_nofile_limit": "128000", 
-            "hdfs_user": "hdfs", 
-            "keyserver_host": " ", 
-            "namenode_opt_maxnewsize": "128m", 
-            "namenode_opt_maxpermsize": "256m", 
-            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
 {{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
 node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
 NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
 N_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
 ing priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n  for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n  do\n    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n  done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
 ECURE_DN_USER\" ]; then\n  ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}", 
-            "namenode_heapsize": "1024m", 
-            "namenode_opt_newsize": "128m", 
-            "nfsgateway_heapsize": "1024", 
-            "dtnode_heapsize": "1024m", 
-            "hadoop_root_logger": "INFO,RFA", 
-            "hadoop_heapsize": "1024", 
-            "hadoop_pid_dir_prefix": "/var/run/hadoop", 
-            "namenode_opt_permsize": "128m", 
+            "keyserver_port": "",
+            "proxyuser_group": "users",
+            "hdfs_user_nproc_limit": "65536",
+            "hdfs_log_dir_prefix": "/var/log/hadoop",
+            "hdfs_user_nofile_limit": "128000",
+            "hdfs_user": "hdfs",
+            "keyserver_host": " ",
+            "namenode_opt_maxnewsize": "128m",
+            "namenode_opt_maxpermsize": "256m",
+            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
 {{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
 node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
 NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
 N_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
 ing priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n  for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n  do\n    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n  done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
 ECURE_DN_USER\" ]; then\n  ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}",
+            "namenode_heapsize": "1024m",
+            "namenode_opt_newsize": "128m",
+            "nfsgateway_heapsize": "1024",
+            "dtnode_heapsize": "1024m",
+            "hadoop_root_logger": "INFO,RFA",
+            "hadoop_heapsize": "1024",
+            "hadoop_pid_dir_prefix": "/var/run/hadoop",
+            "namenode_opt_permsize": "128m",
             "hdfs_tmp_dir": "/tmp"
-        }, 
+        },
         "zookeeper-log4j": {
             "content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: console appender only\nlog4j.rootLogger=INFO, CONSOLE\n\n# Example with rolling log file\n#log4
 j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE\n\n# Example with rolling log file and tracing\n#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE\n\n#\n# Log INFO level and above messages to the console\n#\nlog4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\nlog4j.appender.CONSOLE.Threshold=INFO\nlog4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n#\n# Add ROLLINGFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender\nlog4j.appender.ROLLINGFILE.Threshold=DEBUG\nlog4j.appender.ROLLINGFILE.File=zookeeper.log\n\n# Max log file size of 10MB\nlog4j.appender.ROLLINGFILE.MaxFileSize=10MB\n# uncomment the next line to limit number of backup files\n#log4j.appender.ROLLINGFILE.MaxBackupIndex=10\n\nlog4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.ROLLIN
 GFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n\n#\n# Add TRACEFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.TRACEFILE=org.apache.log4j.FileAppender\nlog4j.appender.TRACEFILE.Threshold=TRACE\nlog4j.appender.TRACEFILE.File=zookeeper_trace.log\n\nlog4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout\n### Notice we are including log4j's NDC here (%x)\nlog4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n"
-        }, 
+        },
         "ssl-server": {
-            "ssl.server.keystore.location": "/etc/security/serverKeys/keystore.jks", 
-            "ssl.server.keystore.keypassword": "bigdata", 
-            "ssl.server.truststore.location": "/etc/security/serverKeys/all.jks", 
-            "ssl.server.keystore.password": "bigdata", 
-            "ssl.server.truststore.password": "bigdata", 
-            "ssl.server.truststore.type": "jks", 
-            "ssl.server.keystore.type": "jks", 
+            "ssl.server.keystore.location": "/etc/security/serverKeys/keystore.jks",
+            "ssl.server.keystore.keypassword": "bigdata",
+            "ssl.server.truststore.location": "/etc/security/serverKeys/all.jks",
+            "ssl.server.keystore.password": "bigdata",
+            "ssl.server.truststore.password": "bigdata",
+            "ssl.server.truststore.type": "jks",
+            "ssl.server.keystore.type": "jks",
             "ssl.server.truststore.reload.interval": "10000"
-        }, 
-        "ranger-site": {}, 
+        },
+        "ranger-site": {},
         "admin-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nlog4j.rootLogger = warn,xa_log_appender\n\n\n# xa_logger\nlog4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.xa_log_app
 ender.file=${logdir}/xa_portal.log\nlog4j.appender.xa_log_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.xa_log_appender.append=true\nlog4j.appender.xa_log_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.xa_log_appender.layout.ConversionPattern=%d [%t] %-5p %C{6} (%F:%L) - %m%n\n# xa_log_appender : category and additivity\nlog4j.category.org.springframework=warn,xa_log_appender\nlog4j.additivity.org.springframework=false\n\nlog4j.category.org.apache.ranger=info,xa_log_appender\nlog4j.additivity.org.apache.ranger=false\n\nlog4j.category.xa=info,xa_log_appender\nlog4j.additivity.xa=false\n\n# perf_logger\nlog4j.appender.perf_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.perf_appender.file=${logdir}/ranger_admin_perf.log\nlog4j.appender.perf_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.perf_appender.append=true\nlog4j.appender.perf_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.perf_appender.layout.ConversionPattern=%d [%t] 
 %m%n\n\n\n# sql_appender\nlog4j.appender.sql_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.sql_appender.file=${logdir}/xa_portal_sql.log\nlog4j.appender.sql_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.sql_appender.append=true\nlog4j.appender.sql_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.sql_appender.layout.ConversionPattern=%d [%t] %-5p %C{6} (%F:%L) - %m%n\n\n# sql_appender : category and additivity\nlog4j.category.org.hibernate.SQL=warn,sql_appender\nlog4j.additivity.org.hibernate.SQL=false\n\nlog4j.category.jdbc.sqlonly=fatal,sql_appender\nlog4j.additivity.jdbc.sqlonly=false\n\nlog4j.category.jdbc.sqltiming=warn,sql_appender\nlog4j.additivity.jdbc.sqltiming=false\n\nlog4j.category.jdbc.audit=fatal,sql_appender\nlog4j.additivity.jdbc.audit=false\n\nlog4j.category.jdbc.resultset=fatal,sql_appender\nlog4j.additivity.jdbc.resultset=false\n\nlog4j.category.jdbc.connection=fatal,sql_appender\nlog4j.additivity.jdbc.connection=false"
-        }, 
+        },
         "tagsync-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/tagsync
 .log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n"
-        }, 
+        },
         "ranger-hdfs-security": {
-            "ranger.plugin.hdfs.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient", 
-            "ranger.plugin.hdfs.service.name": "{{repo_name}}", 
-            "ranger.plugin.hdfs.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache", 
-            "ranger.plugin.hdfs.policy.pollIntervalMs": "30000", 
-            "ranger.plugin.hdfs.policy.rest.url": "{{policymgr_mgr_url}}", 
-            "ranger.plugin.hdfs.policy.rest.ssl.config.file": "/etc/hadoop/conf/ranger-policymgr-ssl.xml", 
+            "ranger.plugin.hdfs.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient",
+            "ranger.plugin.hdfs.service.name": "{{repo_name}}",
+            "ranger.plugin.hdfs.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache",
+            "ranger.plugin.hdfs.policy.pollIntervalMs": "30000",
+            "ranger.plugin.hdfs.policy.rest.url": "{{policymgr_mgr_url}}",
+            "ranger.plugin.hdfs.policy.rest.ssl.config.file": "/etc/hadoop/conf/ranger-policymgr-ssl.xml",
             "xasecure.add-hadoop-authorization": "true"
-        }, 
-        "usersync-properties": {}, 
+        },
+        "usersync-properties": {},
         "zookeeper-env": {
-            "zk_log_dir": "/var/log/zookeeper", 
-            "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}", 
-            "zk_server_heapsize": "1024m", 
-            "zk_pid_dir": "/var/run/zookeeper", 
+            "zk_log_dir": "/var/log/zookeeper",
+            "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}",
+            "zk_server_heapsize": "1024m",
+            "zk_pid_dir": "/var/run/zookeeper",
             "zk_user": "zookeeper"
         },
         "infra-solr-env": {
@@ -682,7 +682,7 @@
             "infra_solr_kerberos_name_rules": "DEFAULT",
             "infra_solr_user": "infra-solr",
             "infra_solr_maxmem": "1024",
-            "content": "#!/bin/bash\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# By default the script will use JAVA_HOME to determine which java\n# to use, but you can set a specific path for Solr to use without\n# affecting other Java applica
 tions on your server/workstation.\nSOLR_JAVA_HOME

<TRUNCATED>

[07/10] ambari git commit: AMBARI-21430 - Allow Multiple Versions of Stack Tools to Co-Exist (jonathanhurley)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-default.json b/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-default.json
index a1d930c..fb77531 100644
--- a/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-default.json
+++ b/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-default.json
@@ -1,150 +1,150 @@
 {
     "localComponents": [
-        "NAMENODE", 
-        "SECONDARY_NAMENODE", 
-        "ZOOKEEPER_SERVER", 
-        "DATANODE", 
-        "HDFS_CLIENT", 
-        "ZOOKEEPER_CLIENT", 
-        "RANGER_USERSYNC", 
-        "RANGER_ADMIN", 
+        "NAMENODE",
+        "SECONDARY_NAMENODE",
+        "ZOOKEEPER_SERVER",
+        "DATANODE",
+        "HDFS_CLIENT",
+        "ZOOKEEPER_CLIENT",
+        "RANGER_USERSYNC",
+        "RANGER_ADMIN",
         "RANGER_TAGSYNC",
         "LOGSEARCH_SOLR",
         "LOGSEARCH_SOLR_CLIENT"
-    ], 
+    ],
     "configuration_attributes": {
-        "ranger-hdfs-audit": {}, 
-        "ssl-client": {}, 
-        "ranger-admin-site": {}, 
-        "ranger-hdfs-policymgr-ssl": {}, 
-        "tagsync-application-properties": {}, 
-        "ranger-env": {}, 
-        "usersync-log4j": {}, 
-        "admin-properties": {}, 
-        "ranger-ugsync-site": {}, 
+        "ranger-hdfs-audit": {},
+        "ssl-client": {},
+        "ranger-admin-site": {},
+        "ranger-hdfs-policymgr-ssl": {},
+        "tagsync-application-properties": {},
+        "ranger-env": {},
+        "usersync-log4j": {},
+        "admin-properties": {},
+        "ranger-ugsync-site": {},
         "hdfs-site": {
             "final": {
-                "dfs.datanode.data.dir": "true", 
-                "dfs.namenode.http-address": "true", 
-                "dfs.datanode.failed.volumes.tolerated": "true", 
-                "dfs.support.append": "true", 
-                "dfs.namenode.name.dir": "true", 
+                "dfs.datanode.data.dir": "true",
+                "dfs.namenode.http-address": "true",
+                "dfs.datanode.failed.volumes.tolerated": "true",
+                "dfs.support.append": "true",
+                "dfs.namenode.name.dir": "true",
                 "dfs.webhdfs.enabled": "true"
             }
-        }, 
-        "ranger-tagsync-site": {}, 
-        "zoo.cfg": {}, 
-        "hadoop-policy": {}, 
-        "hdfs-log4j": {}, 
-        "ranger-hdfs-plugin-properties": {}, 
+        },
+        "ranger-tagsync-site": {},
+        "zoo.cfg": {},
+        "hadoop-policy": {},
+        "hdfs-log4j": {},
+        "ranger-hdfs-plugin-properties": {},
         "core-site": {
             "final": {
                 "fs.defaultFS": "true"
             }
-        }, 
-        "hadoop-env": {}, 
-        "zookeeper-log4j": {}, 
-        "ssl-server": {}, 
-        "ranger-site": {}, 
-        "admin-log4j": {}, 
-        "tagsync-log4j": {}, 
-        "ranger-hdfs-security": {}, 
-        "usersync-properties": {}, 
+        },
+        "hadoop-env": {},
+        "zookeeper-log4j": {},
+        "ssl-server": {},
+        "ranger-site": {},
+        "admin-log4j": {},
+        "tagsync-log4j": {},
+        "ranger-hdfs-security": {},
+        "usersync-properties": {},
         "zookeeper-env": {},
         "infra-solr-env": {},
         "infra-solr-client-log4j": {},
         "cluster-env": {}
-    }, 
-    "public_hostname": "c6401.ambari.apache.org", 
-    "commandId": "11-0", 
-    "hostname": "c6401.ambari.apache.org", 
-    "kerberosCommandParams": [], 
-    "serviceName": "RANGER", 
-    "role": "RANGER_ADMIN", 
-    "forceRefreshConfigTagsBeforeExecution": [], 
-    "requestId": 11, 
+    },
+    "public_hostname": "c6401.ambari.apache.org",
+    "commandId": "11-0",
+    "hostname": "c6401.ambari.apache.org",
+    "kerberosCommandParams": [],
+    "serviceName": "RANGER",
+    "role": "RANGER_ADMIN",
+    "forceRefreshConfigTagsBeforeExecution": [],
+    "requestId": 11,
     "agentConfigParams": {
         "agent": {
             "parallel_execution": 0
         }
-    }, 
-    "clusterName": "c1", 
-    "commandType": "EXECUTION_COMMAND", 
-    "taskId": 31, 
-    "roleParams": {}, 
+    },
+    "clusterName": "c1",
+    "commandType": "EXECUTION_COMMAND",
+    "taskId": 31,
+    "roleParams": {},
     "configurationTags": {
         "ranger-hdfs-audit": {
             "tag": "version1466705299922"
-        }, 
+        },
         "ssl-client": {
             "tag": "version1"
-        }, 
+        },
         "ranger-admin-site": {
             "tag": "version1466705299949"
-        }, 
+        },
         "ranger-hdfs-policymgr-ssl": {
             "tag": "version1466705299922"
-        }, 
+        },
         "tagsync-application-properties": {
             "tag": "version1466705299949"
-        }, 
+        },
         "ranger-env": {
             "tag": "version1466705299949"
-        }, 
+        },
         "usersync-log4j": {
             "tag": "version1466705299949"
-        }, 
+        },
         "admin-properties": {
             "tag": "version1466705299949"
-        }, 
+        },
         "ranger-ugsync-site": {
             "tag": "version1466705299949"
-        }, 
+        },
         "hdfs-site": {
             "tag": "version1"
-        }, 
+        },
         "ranger-tagsync-site": {
             "tag": "version1466705299949"
-        }, 
+        },
         "zoo.cfg": {
             "tag": "version1"
-        }, 
+        },
         "hadoop-policy": {
             "tag": "version1"
-        }, 
+        },
         "hdfs-log4j": {
             "tag": "version1"
-        }, 
+        },
         "ranger-hdfs-plugin-properties": {
             "tag": "version1466705299922"
-        }, 
+        },
         "core-site": {
             "tag": "version1"
-        }, 
+        },
         "hadoop-env": {
             "tag": "version1"
-        }, 
+        },
         "zookeeper-log4j": {
             "tag": "version1"
-        }, 
+        },
         "ssl-server": {
             "tag": "version1"
-        }, 
+        },
         "ranger-site": {
             "tag": "version1466705299949"
-        }, 
+        },
         "admin-log4j": {
             "tag": "version1466705299949"
-        }, 
+        },
         "tagsync-log4j": {
             "tag": "version1466705299949"
-        }, 
+        },
         "ranger-hdfs-security": {
             "tag": "version1466705299922"
-        }, 
+        },
         "usersync-properties": {
             "tag": "version1466705299949"
-        }, 
+        },
         "zookeeper-env": {
             "tag": "version1"
         },
@@ -157,492 +157,492 @@
         "cluster-env": {
             "tag": "version1"
         }
-    }, 
-    "roleCommand": "START", 
+    },
+    "roleCommand": "START",
     "hostLevelParams": {
-        "agent_stack_retry_on_unavailability": "false", 
-        "stack_name": "HDP", 
-        "package_version": "2_5_0_0_*", 
+        "agent_stack_retry_on_unavailability": "false",
+        "stack_name": "HDP",
+        "package_version": "2_5_0_0_*",
         "custom_mysql_jdbc_name": "mysql-connector-java.jar",
         "previous_custom_mysql_jdbc_name": "mysql-connector-java-old.jar",
-        "host_sys_prepped": "false", 
-        "ambari_db_rca_username": "mapred", 
-        "current_version": "2.5.0.0-801", 
-        "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar", 
-        "agent_stack_retry_count": "5", 
-        "stack_version": "2.5", 
-        "jdk_name": "jdk-8u60-linux-x64.tar.gz", 
-        "ambari_db_rca_driver": "org.postgresql.Driver", 
-        "java_home": "/usr/jdk64/jdk1.7.0_45", 
-        "repository_version_id": "1", 
-        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
-        "not_managed_hdfs_path_list": "[\"/tmp\"]", 
-        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
-        "java_version": "8", 
-        "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.5\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]", 
-        "package_list": "[{\"name\":\"ranger_${stack_version}-admin\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-usersync\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-tagsync\",\"condition\":\"should_install_ranger_tagsync\",\"skipUpgrade\":false},{\"name\":\"ambari-logsearch-solr-client\",\"condition\":\"should_install_logsearch_solr_client\",\"skipUpgrade\":false}]", 
-        "db_name": "ambari", 
-        "group_list": "[\"ranger\",\"hadoop\",\"users\"]", 
-        "agentCacheDir": "/var/lib/ambari-agent/cache", 
-        "ambari_db_rca_password": "mapred", 
-        "jce_name": "jce_policy-8.zip", 
-        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar", 
-        "db_driver_filename": "mysql-connector-java.jar", 
-        "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]", 
+        "host_sys_prepped": "false",
+        "ambari_db_rca_username": "mapred",
+        "current_version": "2.5.0.0-801",
+        "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar",
+        "agent_stack_retry_count": "5",
+        "stack_version": "2.5",
+        "jdk_name": "jdk-8u60-linux-x64.tar.gz",
+        "ambari_db_rca_driver": "org.postgresql.Driver",
+        "java_home": "/usr/jdk64/jdk1.7.0_45",
+        "repository_version_id": "1",
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
+        "not_managed_hdfs_path_list": "[\"/tmp\"]",
+        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
+        "java_version": "8",
+        "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.5\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]",
+        "package_list": "[{\"name\":\"ranger_${stack_version}-admin\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-usersync\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-tagsync\",\"condition\":\"should_install_ranger_tagsync\",\"skipUpgrade\":false},{\"name\":\"ambari-logsearch-solr-client\",\"condition\":\"should_install_logsearch_solr_client\",\"skipUpgrade\":false}]",
+        "db_name": "ambari",
+        "group_list": "[\"ranger\",\"hadoop\",\"users\"]",
+        "agentCacheDir": "/var/lib/ambari-agent/cache",
+        "ambari_db_rca_password": "mapred",
+        "jce_name": "jce_policy-8.zip",
+        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
+        "db_driver_filename": "mysql-connector-java.jar",
+        "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]",
         "clientsToUpdateConfigs": "[\"*\"]"
-    }, 
+    },
     "commandParams": {
-        "service_package_folder": "common-services/RANGER/0.4.0/package", 
-        "script": "scripts/ranger_admin.py", 
-        "hooks_folder": "HDP/2.0.6/hooks", 
-        "version": "2.5.0.0-801", 
-        "max_duration_for_retries": "0", 
-        "command_retry_enabled": "false", 
-        "command_timeout": "600", 
+        "service_package_folder": "common-services/RANGER/0.4.0/package",
+        "script": "scripts/ranger_admin.py",
+        "hooks_folder": "HDP/2.0.6/hooks",
+        "version": "2.5.0.0-801",
+        "max_duration_for_retries": "0",
+        "command_retry_enabled": "false",
+        "command_timeout": "600",
         "script_type": "PYTHON"
-    }, 
-    "forceRefreshConfigTags": [], 
-    "stageId": 0, 
+    },
+    "forceRefreshConfigTags": [],
+    "stageId": 0,
     "clusterHostInfo": {
         "snamenode_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ambari_server_use_ssl": [
             "false"
-        ], 
+        ],
         "all_ping_ports": [
             "8670"
-        ], 
+        ],
         "ranger_tagsync_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ranger_usersync_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "all_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "slave_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "namenode_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ambari_server_port": [
             "8080"
-        ], 
+        ],
         "ranger_admin_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "all_racks": [
             "/default-rack"
-        ], 
+        ],
         "all_ipv4_ips": [
             "172.22.83.73"
-        ], 
+        ],
         "ambari_server_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "zookeeper_hosts": [
             "c6401.ambari.apache.org"
         ],
         "infra_solr_hosts": [
             "c6401.ambari.apache.org"
         ]
-    }, 
+    },
     "configurations": {
         "ranger-hdfs-audit": {
-            "xasecure.audit.destination.solr.zookeepers": "NONE", 
-            "xasecure.audit.destination.solr.urls": "", 
-            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool", 
-            "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool", 
+            "xasecure.audit.destination.solr.zookeepers": "NONE",
+            "xasecure.audit.destination.solr.urls": "",
+            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool",
+            "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool",
             "xasecure.audit.destination.hdfs": "true",
-            "xasecure.audit.destination.solr": "false", 
+            "xasecure.audit.destination.solr": "false",
             "xasecure.audit.provider.summary.enabled": "false",
             "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
             "xasecure.audit.is.enabled": "true"
-        }, 
+        },
         "ssl-client": {
-            "ssl.client.truststore.reload.interval": "10000", 
-            "ssl.client.keystore.password": "bigdata", 
-            "ssl.client.truststore.type": "jks", 
-            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks", 
-            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks", 
-            "ssl.client.truststore.password": "bigdata", 
+            "ssl.client.truststore.reload.interval": "10000",
+            "ssl.client.keystore.password": "bigdata",
+            "ssl.client.truststore.type": "jks",
+            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks",
+            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks",
+            "ssl.client.truststore.password": "bigdata",
             "ssl.client.keystore.type": "jks"
-        }, 
+        },
         "ranger-admin-site": {
             "ranger.admin.kerberos.cookie.domain": "",
-            "ranger.kms.service.user.hdfs": "hdfs", 
-            "ranger.spnego.kerberos.principal": "", 
-            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}", 
-            "ranger.plugins.hive.serviceuser": "hive", 
-            "ranger.lookup.kerberos.keytab": "", 
-            "ranger.plugins.kms.serviceuser": "kms", 
-            "ranger.service.https.attrib.ssl.enabled": "false", 
-            "ranger.sso.browser.useragent": "Mozilla,chrome", 
-            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01", 
-            "ranger.plugins.hbase.serviceuser": "hbase", 
-            "ranger.plugins.hdfs.serviceuser": "hdfs", 
-            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}", 
-            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net", 
-            "ranger.plugins.knox.serviceuser": "knox", 
-            "ranger.ldap.base.dn": "dc=example,dc=com", 
-            "ranger.sso.publicKey": "", 
-            "ranger.admin.kerberos.cookie.path": "/", 
-            "ranger.service.https.attrib.clientAuth": "want", 
-            "ranger.jpa.jdbc.user": "{{ranger_db_user}}", 
-            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})", 
-            "ranger.ldap.group.roleattribute": "cn", 
-            "ranger.plugins.kafka.serviceuser": "kafka", 
-            "ranger.admin.kerberos.principal": "", 
-            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
-            "ranger.credential.provider.path": "/etc/ranger/admin/rangeradmin.jceks", 
-            "ranger.ldap.referral": "ignore", 
-            "ranger.service.http.port": "6080", 
-            "ranger.ldap.user.searchfilter": "(uid={0})", 
-            "ranger.plugins.atlas.serviceuser": "atlas", 
-            "ranger.truststore.password": "changeit", 
-            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
-            "ranger.audit.solr.password": "NONE", 
+            "ranger.kms.service.user.hdfs": "hdfs",
+            "ranger.spnego.kerberos.principal": "",
+            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}",
+            "ranger.plugins.hive.serviceuser": "hive",
+            "ranger.lookup.kerberos.keytab": "",
+            "ranger.plugins.kms.serviceuser": "kms",
+            "ranger.service.https.attrib.ssl.enabled": "false",
+            "ranger.sso.browser.useragent": "Mozilla,chrome",
+            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01",
+            "ranger.plugins.hbase.serviceuser": "hbase",
+            "ranger.plugins.hdfs.serviceuser": "hdfs",
+            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}",
+            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net",
+            "ranger.plugins.knox.serviceuser": "knox",
+            "ranger.ldap.base.dn": "dc=example,dc=com",
+            "ranger.sso.publicKey": "",
+            "ranger.admin.kerberos.cookie.path": "/",
+            "ranger.service.https.attrib.clientAuth": "want",
+            "ranger.jpa.jdbc.user": "{{ranger_db_user}}",
+            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})",
+            "ranger.ldap.group.roleattribute": "cn",
+            "ranger.plugins.kafka.serviceuser": "kafka",
+            "ranger.admin.kerberos.principal": "",
+            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
+            "ranger.credential.provider.path": "/etc/ranger/admin/rangeradmin.jceks",
+            "ranger.ldap.referral": "ignore",
+            "ranger.service.http.port": "6080",
+            "ranger.ldap.user.searchfilter": "(uid={0})",
+            "ranger.plugins.atlas.serviceuser": "atlas",
+            "ranger.truststore.password": "changeit",
+            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+            "ranger.audit.solr.password": "NONE",
             "ranger.audit.solr.zookeepers": "c6401.ambari.apache.org:2181/ambari-solr",
-            "ranger.lookup.kerberos.principal": "", 
-            "ranger.service.https.port": "6182", 
-            "ranger.plugins.storm.serviceuser": "storm", 
-            "ranger.externalurl": "{{ranger_external_url}}", 
-            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
-            "ranger.kms.service.user.hive": "", 
-            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
-            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}", 
-            "ranger.service.host": "{{ranger_host}}", 
-            "ranger.service.https.attrib.keystore.keyalias": "rangeradmin", 
-            "ranger.service.https.attrib.keystore.pass": "xasecure", 
-            "ranger.unixauth.remote.login.enabled": "true", 
-            "ranger.jpa.jdbc.credential.alias": "rangeradmin", 
-            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
-            "ranger.audit.solr.username": "ranger_solr", 
-            "ranger.sso.enabled": "false", 
-            "ranger.audit.solr.urls": "", 
-            "ranger.ldap.ad.domain": "", 
-            "ranger.plugins.yarn.serviceuser": "yarn", 
-            "ranger.audit.source.type": "solr", 
-            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
-            "ranger.ldap.url": "{{ranger_ug_ldap_url}}", 
-            "ranger.authentication.method": "UNIX", 
-            "ranger.service.http.enabled": "true", 
-            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}", 
-            "ranger.ldap.ad.referral": "ignore", 
-            "ranger.ldap.ad.base.dn": "dc=example,dc=com", 
-            "ranger.jpa.jdbc.password": "_", 
-            "ranger.spnego.kerberos.keytab": "", 
-            "ranger.sso.providerurl": "", 
-            "ranger.unixauth.service.hostname": "{{ugsync_host}}", 
-            "ranger.admin.kerberos.keytab": "", 
-            "ranger.admin.kerberos.token.valid.seconds": "30", 
-            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver", 
+            "ranger.lookup.kerberos.principal": "",
+            "ranger.service.https.port": "6182",
+            "ranger.plugins.storm.serviceuser": "storm",
+            "ranger.externalurl": "{{ranger_external_url}}",
+            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+            "ranger.kms.service.user.hive": "",
+            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}",
+            "ranger.service.host": "{{ranger_host}}",
+            "ranger.service.https.attrib.keystore.keyalias": "rangeradmin",
+            "ranger.service.https.attrib.keystore.pass": "xasecure",
+            "ranger.unixauth.remote.login.enabled": "true",
+            "ranger.jpa.jdbc.credential.alias": "rangeradmin",
+            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+            "ranger.audit.solr.username": "ranger_solr",
+            "ranger.sso.enabled": "false",
+            "ranger.audit.solr.urls": "",
+            "ranger.ldap.ad.domain": "",
+            "ranger.plugins.yarn.serviceuser": "yarn",
+            "ranger.audit.source.type": "solr",
+            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
+            "ranger.ldap.url": "{{ranger_ug_ldap_url}}",
+            "ranger.authentication.method": "UNIX",
+            "ranger.service.http.enabled": "true",
+            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}",
+            "ranger.ldap.ad.referral": "ignore",
+            "ranger.ldap.ad.base.dn": "dc=example,dc=com",
+            "ranger.jpa.jdbc.password": "_",
+            "ranger.spnego.kerberos.keytab": "",
+            "ranger.sso.providerurl": "",
+            "ranger.unixauth.service.hostname": "{{ugsync_host}}",
+            "ranger.admin.kerberos.keytab": "",
+            "ranger.admin.kerberos.token.valid.seconds": "30",
+            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver",
             "ranger.unixauth.service.port": "5151"
-        }, 
+        },
         "ranger-hdfs-policymgr-ssl": {
-            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks", 
-            "xasecure.policymgr.clientssl.truststore.password": "changeit", 
-            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}", 
-            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks", 
-            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}", 
+            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks",
+            "xasecure.policymgr.clientssl.truststore.password": "changeit",
+            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}",
+            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks",
+            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}",
             "xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
-        }, 
+        },
         "tagsync-application-properties": {
-            "atlas.kafka.entities.group.id": "ranger_entities_consumer", 
-            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181", 
+            "atlas.kafka.entities.group.id": "ranger_entities_consumer",
+            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181",
             "atlas.kafka.bootstrap.servers": "localhost:6667"
-        }, 
+        },
         "ranger-env": {
-            "ranger_solr_shards": "1", 
-            "ranger_solr_config_set": "ranger_audits", 
-            "ranger_user": "ranger", 
+            "ranger_solr_shards": "1",
+            "ranger_solr_config_set": "ranger_audits",
+            "ranger_user": "ranger",
             "ranger_solr_replication_factor": "1",
-            "xml_configurations_supported": "true", 
-            "ranger-atlas-plugin-enabled": "No", 
-            "ranger-hbase-plugin-enabled": "No", 
-            "ranger-yarn-plugin-enabled": "No", 
-            "bind_anonymous": "false", 
-            "ranger_admin_username": "amb_ranger_admin", 
-            "admin_password": "admin", 
-            "is_solrCloud_enabled": "true", 
-            "ranger-storm-plugin-enabled": "No", 
-            "ranger-hdfs-plugin-enabled": "No", 
-            "ranger_group": "ranger", 
-            "ranger-knox-plugin-enabled": "No", 
-            "ranger_admin_log_dir": "/var/log/ranger/admin", 
-            "ranger-kafka-plugin-enabled": "No", 
-            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306", 
-            "ranger-hive-plugin-enabled": "No", 
-            "xasecure.audit.destination.solr": "true", 
-            "ranger_pid_dir": "/var/run/ranger", 
-            "xasecure.audit.destination.hdfs": "true", 
-            "admin_username": "admin", 
-            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit", 
-            "create_db_dbuser": "true", 
-            "ranger_solr_collection_name": "ranger_audits", 
-            "ranger_admin_password": "P1!q9xa96SMi5NCl", 
+            "xml_configurations_supported": "true",
+            "ranger-atlas-plugin-enabled": "No",
+            "ranger-hbase-plugin-enabled": "No",
+            "ranger-yarn-plugin-enabled": "No",
+            "bind_anonymous": "false",
+            "ranger_admin_username": "amb_ranger_admin",
+            "admin_password": "admin",
+            "is_solrCloud_enabled": "true",
+            "ranger-storm-plugin-enabled": "No",
+            "ranger-hdfs-plugin-enabled": "No",
+            "ranger_group": "ranger",
+            "ranger-knox-plugin-enabled": "No",
+            "ranger_admin_log_dir": "/var/log/ranger/admin",
+            "ranger-kafka-plugin-enabled": "No",
+            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306",
+            "ranger-hive-plugin-enabled": "No",
+            "xasecure.audit.destination.solr": "true",
+            "ranger_pid_dir": "/var/run/ranger",
+            "xasecure.audit.destination.hdfs": "true",
+            "admin_username": "admin",
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
+            "create_db_dbuser": "true",
+            "ranger_solr_collection_name": "ranger_audits",
+            "ranger_admin_password": "P1!q9xa96SMi5NCl",
             "ranger_usersync_log_dir": "/var/log/ranger/usersync"
-        }, 
+        },
         "usersync-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/usersync.
 log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n"
-        }, 
+        },
         "admin-properties": {
-            "db_user": "rangeradmin01", 
-            "DB_FLAVOR": "MYSQL", 
-            "db_password": "rangeradmin01", 
-            "db_root_user": "root", 
-            "policymgr_external_url": "http://c6401.ambari.apache.org:6080", 
-            "db_name": "ranger01", 
-            "db_host": "c6401.ambari.apache.org", 
-            "db_root_password": "vagrant", 
+            "db_user": "rangeradmin01",
+            "DB_FLAVOR": "MYSQL",
+            "db_password": "rangeradmin01",
+            "db_root_user": "root",
+            "policymgr_external_url": "http://c6401.ambari.apache.org:6080",
+            "db_name": "ranger01",
+            "db_host": "c6401.ambari.apache.org",
+            "db_root_password": "vagrant",
             "SQL_CONNECTOR_JAR": "{{driver_curl_target}}"
-        }, 
+        },
         "ranger-ugsync-site": {
-            "ranger.usersync.ldap.binddn": "", 
-            "ranger.usersync.policymgr.username": "rangerusersync", 
-            "ranger.usersync.policymanager.mockrun": "false", 
-            "ranger.usersync.group.searchbase": "", 
-            "ranger.usersync.ldap.bindalias": "testldapalias", 
-            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks", 
-            "ranger.usersync.port": "5151", 
-            "ranger.usersync.pagedresultssize": "500", 
-            "ranger.usersync.group.memberattributename": "", 
-            "ranger.usersync.kerberos.principal": "", 
-            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder", 
-            "ranger.usersync.ldap.referral": "ignore", 
-            "ranger.usersync.group.searchfilter": "", 
-            "ranger.usersync.ldap.user.objectclass": "person", 
-            "ranger.usersync.logdir": "{{usersync_log_dir}}", 
-            "ranger.usersync.ldap.user.searchfilter": "", 
-            "ranger.usersync.ldap.groupname.caseconversion": "none", 
-            "ranger.usersync.ldap.ldapbindpassword": "", 
-            "ranger.usersync.unix.minUserId": "500", 
-            "ranger.usersync.policymanager.maxrecordsperapicall": "1000", 
-            "ranger.usersync.group.nameattribute": "", 
-            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password", 
-            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks", 
-            "ranger.usersync.user.searchenabled": "false", 
-            "ranger.usersync.group.usermapsyncenabled": "true", 
-            "ranger.usersync.ldap.bindkeystore": "", 
-            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof", 
-            "ranger.usersync.kerberos.keytab": "", 
-            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe", 
-            "ranger.usersync.group.objectclass": "", 
-            "ranger.usersync.ldap.user.searchscope": "sub", 
-            "ranger.usersync.unix.password.file": "/etc/passwd", 
-            "ranger.usersync.ldap.user.nameattribute": "", 
-            "ranger.usersync.pagedresultsenabled": "true", 
-            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}", 
-            "ranger.usersync.group.search.first.enabled": "false", 
-            "ranger.usersync.group.searchenabled": "false", 
-            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder", 
-            "ranger.usersync.ssl": "true", 
-            "ranger.usersync.ldap.url": "", 
-            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org", 
-            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
-            "ranger.usersync.ldap.user.searchbase": "", 
-            "ranger.usersync.ldap.username.caseconversion": "none", 
-            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
-            "ranger.usersync.keystore.password": "UnIx529p", 
-            "ranger.usersync.unix.group.file": "/etc/group", 
-            "ranger.usersync.filesource.file": "/tmp/usergroup.txt", 
-            "ranger.usersync.group.searchscope": "", 
-            "ranger.usersync.truststore.password": "changeit", 
-            "ranger.usersync.enabled": "true", 
-            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000", 
+            "ranger.usersync.ldap.binddn": "",
+            "ranger.usersync.policymgr.username": "rangerusersync",
+            "ranger.usersync.policymanager.mockrun": "false",
+            "ranger.usersync.group.searchbase": "",
+            "ranger.usersync.ldap.bindalias": "testldapalias",
+            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks",
+            "ranger.usersync.port": "5151",
+            "ranger.usersync.pagedresultssize": "500",
+            "ranger.usersync.group.memberattributename": "",
+            "ranger.usersync.kerberos.principal": "",
+            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder",
+            "ranger.usersync.ldap.referral": "ignore",
+            "ranger.usersync.group.searchfilter": "",
+            "ranger.usersync.ldap.user.objectclass": "person",
+            "ranger.usersync.logdir": "{{usersync_log_dir}}",
+            "ranger.usersync.ldap.user.searchfilter": "",
+            "ranger.usersync.ldap.groupname.caseconversion": "none",
+            "ranger.usersync.ldap.ldapbindpassword": "",
+            "ranger.usersync.unix.minUserId": "500",
+            "ranger.usersync.policymanager.maxrecordsperapicall": "1000",
+            "ranger.usersync.group.nameattribute": "",
+            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password",
+            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks",
+            "ranger.usersync.user.searchenabled": "false",
+            "ranger.usersync.group.usermapsyncenabled": "true",
+            "ranger.usersync.ldap.bindkeystore": "",
+            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof",
+            "ranger.usersync.kerberos.keytab": "",
+            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe",
+            "ranger.usersync.group.objectclass": "",
+            "ranger.usersync.ldap.user.searchscope": "sub",
+            "ranger.usersync.unix.password.file": "/etc/passwd",
+            "ranger.usersync.ldap.user.nameattribute": "",
+            "ranger.usersync.pagedresultsenabled": "true",
+            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}",
+            "ranger.usersync.group.search.first.enabled": "false",
+            "ranger.usersync.group.searchenabled": "false",
+            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder",
+            "ranger.usersync.ssl": "true",
+            "ranger.usersync.ldap.url": "",
+            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org",
+            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+            "ranger.usersync.ldap.user.searchbase": "",
+            "ranger.usersync.ldap.username.caseconversion": "none",
+            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+            "ranger.usersync.keystore.password": "UnIx529p",
+            "ranger.usersync.unix.group.file": "/etc/group",
+            "ranger.usersync.filesource.file": "/tmp/usergroup.txt",
+            "ranger.usersync.group.searchscope": "",
+            "ranger.usersync.truststore.password": "changeit",
+            "ranger.usersync.enabled": "true",
+            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000",
             "ranger.usersync.filesource.text.delimiter": ","
-        }, 
+        },
         "hdfs-site": {
-            "dfs.namenode.checkpoint.period": "21600", 
-            "dfs.namenode.avoid.write.stale.datanode": "true", 
-            "dfs.namenode.startup.delay.block.deletion.sec": "3600", 
-            "dfs.namenode.checkpoint.txns": "1000000", 
-            "dfs.content-summary.limit": "5000", 
-            "dfs.support.append": "true", 
-            "dfs.datanode.address": "0.0.0.0:50010", 
-            "dfs.cluster.administrators": " hdfs", 
-            "dfs.namenode.audit.log.async": "true", 
-            "dfs.datanode.balance.bandwidthPerSec": "6250000", 
-            "dfs.namenode.safemode.threshold-pct": "1", 
-            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", 
-            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020", 
-            "dfs.permissions.enabled": "true", 
-            "dfs.client.read.shortcircuit": "true", 
-            "dfs.https.port": "50470", 
-            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470", 
-            "nfs.file.dump.dir": "/tmp/.hdfs-nfs", 
-            "dfs.blocksize": "134217728", 
-            "dfs.blockreport.initialDelay": "120", 
-            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode", 
-            "dfs.namenode.fslock.fair": "false", 
-            "dfs.datanode.max.transfer.threads": "4096", 
-            "dfs.heartbeat.interval": "3", 
-            "dfs.replication": "3", 
-            "dfs.namenode.handler.count": "50", 
-            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary", 
-            "fs.permissions.umask-mode": "022", 
-            "dfs.namenode.stale.datanode.interval": "30000", 
-            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
-            "dfs.datanode.failed.volumes.tolerated": "0", 
-            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data", 
-            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070", 
-            "dfs.webhdfs.enabled": "true", 
-            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding", 
-            "dfs.namenode.accesstime.precision": "0", 
-            "dfs.datanode.https.address": "0.0.0.0:50475", 
-            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
-            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090", 
-            "nfs.exports.allowed.hosts": "* rw", 
-            "dfs.datanode.http.address": "0.0.0.0:50075", 
-            "dfs.datanode.du.reserved": "33011188224", 
-            "dfs.client.read.shortcircuit.streams.cache.size": "4096", 
-            "dfs.http.policy": "HTTP_ONLY", 
-            "dfs.block.access.token.enable": "true", 
-            "dfs.client.retry.policy.enabled": "false", 
-            "dfs.namenode.name.dir.restore": "true", 
-            "dfs.permissions.superusergroup": "hdfs", 
-            "dfs.journalnode.https-address": "0.0.0.0:8481", 
-            "dfs.journalnode.http-address": "0.0.0.0:8480", 
-            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
-            "dfs.namenode.avoid.read.stale.datanode": "true", 
-            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
-            "dfs.datanode.data.dir.perm": "750", 
-            "dfs.encryption.key.provider.uri": "", 
-            "dfs.replication.max": "50", 
+            "dfs.namenode.checkpoint.period": "21600",
+            "dfs.namenode.avoid.write.stale.datanode": "true",
+            "dfs.namenode.startup.delay.block.deletion.sec": "3600",
+            "dfs.namenode.checkpoint.txns": "1000000",
+            "dfs.content-summary.limit": "5000",
+            "dfs.support.append": "true",
+            "dfs.datanode.address": "0.0.0.0:50010",
+            "dfs.cluster.administrators": " hdfs",
+            "dfs.namenode.audit.log.async": "true",
+            "dfs.datanode.balance.bandwidthPerSec": "6250000",
+            "dfs.namenode.safemode.threshold-pct": "1",
+            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
+            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020",
+            "dfs.permissions.enabled": "true",
+            "dfs.client.read.shortcircuit": "true",
+            "dfs.https.port": "50470",
+            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470",
+            "nfs.file.dump.dir": "/tmp/.hdfs-nfs",
+            "dfs.blocksize": "134217728",
+            "dfs.blockreport.initialDelay": "120",
+            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode",
+            "dfs.namenode.fslock.fair": "false",
+            "dfs.datanode.max.transfer.threads": "4096",
+            "dfs.heartbeat.interval": "3",
+            "dfs.replication": "3",
+            "dfs.namenode.handler.count": "50",
+            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary",
+            "fs.permissions.umask-mode": "022",
+            "dfs.namenode.stale.datanode.interval": "30000",
+            "dfs.datanode.ipc.address": "0.0.0.0:8010",
+            "dfs.datanode.failed.volumes.tolerated": "0",
+            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data",
+            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070",
+            "dfs.webhdfs.enabled": "true",
+            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding",
+            "dfs.namenode.accesstime.precision": "0",
+            "dfs.datanode.https.address": "0.0.0.0:50475",
+            "dfs.namenode.write.stale.datanode.ratio": "1.0f",
+            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090",
+            "nfs.exports.allowed.hosts": "* rw",
+            "dfs.datanode.http.address": "0.0.0.0:50075",
+            "dfs.datanode.du.reserved": "33011188224",
+            "dfs.client.read.shortcircuit.streams.cache.size": "4096",
+            "dfs.http.policy": "HTTP_ONLY",
+            "dfs.block.access.token.enable": "true",
+            "dfs.client.retry.policy.enabled": "false",
+            "dfs.namenode.name.dir.restore": "true",
+            "dfs.permissions.superusergroup": "hdfs",
+            "dfs.journalnode.https-address": "0.0.0.0:8481",
+            "dfs.journalnode.http-address": "0.0.0.0:8480",
+            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
+            "dfs.namenode.avoid.read.stale.datanode": "true",
+            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
+            "dfs.datanode.data.dir.perm": "750",
+            "dfs.encryption.key.provider.uri": "",
+            "dfs.replication.max": "50",
             "dfs.namenode.name.dir": "/grid/0/hadoop/hdfs/namenode"
-        }, 
+        },
         "ranger-tagsync-site": {
-            "ranger.tagsync.dest.ranger.ssl.config.filename": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks", 
-            "ranger.tagsync.source.atlasrest.username": "", 
-            "ranger.tagsync.logdir": "/var/log/ranger/tagsync", 
-            "ranger.tagsync.source.atlasrest.download.interval.millis": "", 
-            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks", 
-            "ranger.tagsync.source.file.check.interval.millis": "", 
-            "ranger.tagsync.source.atlasrest.endpoint": "", 
-            "ranger.tagsync.dest.ranger.username": "rangertagsync", 
-            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}", 
-            "ranger.tagsync.kerberos.principal": "", 
-            "ranger.tagsync.kerberos.keytab": "", 
-            "ranger.tagsync.source.atlas": "false", 
-            "ranger.tagsync.source.atlasrest": "false", 
-            "ranger.tagsync.source.file": "false", 
+            "ranger.tagsync.dest.ranger.ssl.config.filename": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks",
+            "ranger.tagsync.source.atlasrest.username": "",
+            "ranger.tagsync.logdir": "/var/log/ranger/tagsync",
+            "ranger.tagsync.source.atlasrest.download.interval.millis": "",
+            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks",
+            "ranger.tagsync.source.file.check.interval.millis": "",
+            "ranger.tagsync.source.atlasrest.endpoint": "",
+            "ranger.tagsync.dest.ranger.username": "rangertagsync",
+            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}",
+            "ranger.tagsync.kerberos.principal": "",
+            "ranger.tagsync.kerberos.keytab": "",
+            "ranger.tagsync.source.atlas": "false",
+            "ranger.tagsync.source.atlasrest": "false",
+            "ranger.tagsync.source.file": "false",
             "ranger.tagsync.source.file.filename": ""
-        }, 
+        },
         "zoo.cfg": {
-            "clientPort": "2181", 
-            "autopurge.purgeInterval": "24", 
-            "syncLimit": "5", 
-            "dataDir": "/grid/0/hadoop/zookeeper", 
-            "initLimit": "10", 
-            "tickTime": "2000", 
+            "clientPort": "2181",
+            "autopurge.purgeInterval": "24",
+            "syncLimit": "5",
+            "dataDir": "/grid/0/hadoop/zookeeper",
+            "initLimit": "10",
+            "tickTime": "2000",
             "autopurge.snapRetainCount": "30"
-        }, 
+        },
         "hadoop-policy": {
-            "security.job.client.protocol.acl": "*", 
-            "security.job.task.protocol.acl": "*", 
-            "security.datanode.protocol.acl": "*", 
-            "security.namenode.protocol.acl": "*", 
-            "security.client.datanode.protocol.acl": "*", 
-            "security.inter.tracker.protocol.acl": "*", 
-            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop", 
-            "security.client.protocol.acl": "*", 
-            "security.refresh.policy.protocol.acl": "hadoop", 
-            "security.admin.operations.protocol.acl": "hadoop", 
+            "security.job.client.protocol.acl": "*",
+            "security.job.task.protocol.acl": "*",
+            "security.datanode.protocol.acl": "*",
+            "security.namenode.protocol.acl": "*",
+            "security.client.datanode.protocol.acl": "*",
+            "security.inter.tracker.protocol.acl": "*",
+            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
+            "security.client.protocol.acl": "*",
+            "security.refresh.policy.protocol.acl": "hadoop",
+            "security.admin.operations.protocol.acl": "hadoop",
             "security.inter.datanode.protocol.acl": "*"
-        }, 
+        },
         "hdfs-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.lo
 gger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender
 .console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logg
 er}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=
 false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# NameNode metrics logging.\n# The default is to retain two namenode-metrics.log files up to 64MB each.\n#\nnamenode.metrics.logger=INFO,NullAppender\nlog4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}\nlog4j.additivity.NameNodeMetricsLog=false\nlog4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log\nlog4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n\nlog4j.appender.NNMETRICSRFA.MaxBackupIndex=1\nlog4j.appender.NNMETRICSRFA.MaxFileSize=64MB\n\n#\n# mapred audit logging\n#\nmapred
 .audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging leve
 ls\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateCh
 ange=WARN"
-        }, 
+        },
         "ranger-hdfs-plugin-properties": {
-            "hadoop.rpc.protection": "authentication", 
-            "ranger-hdfs-plugin-enabled": "No", 
-            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
-            "policy_user": "ambari-qa", 
-            "common.name.for.certificate": "", 
+            "hadoop.rpc.protection": "authentication",
+            "ranger-hdfs-plugin-enabled": "No",
+            "REPOSITORY_CONFIG_USERNAME": "hadoop",
+            "policy_user": "ambari-qa",
+            "common.name.for.certificate": "",
             "REPOSITORY_CONFIG_PASSWORD": "hadoop"
-        }, 
+        },
         "core-site": {
-            "hadoop.proxyuser.root.hosts": "c6401.ambari.apache.org", 
-            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", 
-            "fs.trash.interval": "360", 
-            "ha.failover-controller.active-standby-elector.zk.op.retries": "120", 
-            "hadoop.http.authentication.simple.anonymous.allowed": "true", 
-            "hadoop.security.authentication": "simple", 
-            "hadoop.proxyuser.root.groups": "*", 
-            "ipc.client.connection.maxidletime": "30000", 
-            "hadoop.security.key.provider.path": "", 
-            "mapreduce.jobtracker.webinterface.trusted": "false", 
-            "hadoop.security.authorization": "false", 
-            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py", 
-            "ipc.server.tcpnodelay": "true", 
-            "ipc.client.connect.max.retries": "50", 
-            "hadoop.security.auth_to_local": "DEFAULT", 
-            "io.file.buffer.size": "131072", 
-            "hadoop.proxyuser.hdfs.hosts": "*", 
-            "hadoop.proxyuser.hdfs.groups": "*", 
-            "ipc.client.idlethreshold": "8000", 
-            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020", 
+            "hadoop.proxyuser.root.hosts": "c6401.ambari.apache.org",
+            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
+            "fs.trash.interval": "360",
+            "ha.failover-controller.active-standby-elector.zk.op.retries": "120",
+            "hadoop.http.authentication.simple.anonymous.allowed": "true",
+            "hadoop.security.authentication": "simple",
+            "hadoop.proxyuser.root.groups": "*",
+            "ipc.client.connection.maxidletime": "30000",
+            "hadoop.security.key.provider.path": "",
+            "mapreduce.jobtracker.webinterface.trusted": "false",
+            "hadoop.security.authorization": "false",
+            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py",
+            "ipc.server.tcpnodelay": "true",
+            "ipc.client.connect.max.retries": "50",
+            "hadoop.security.auth_to_local": "DEFAULT",
+            "io.file.buffer.size": "131072",
+            "hadoop.proxyuser.hdfs.hosts": "*",
+            "hadoop.proxyuser.hdfs.groups": "*",
+            "ipc.client.idlethreshold": "8000",
+            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020",
             "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec"
-        }, 
+        },
         "hadoop-env": {
-            "keyserver_port": "", 
-            "proxyuser_group": "users", 
-            "hdfs_user_nproc_limit": "65536", 
-            "hdfs_log_dir_prefix": "/var/log/hadoop", 
-            "hdfs_user_nofile_limit": "128000", 
-            "hdfs_user": "hdfs", 
-            "keyserver_host": " ", 
-            "namenode_opt_maxnewsize": "128m", 
-            "namenode_opt_maxpermsize": "256m", 
-            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
 {{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
 node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
 NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
 N_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
 ing priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n  for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n  do\n    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n  done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
 ECURE_DN_USER\" ]; then\n  ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}", 
-            "namenode_heapsize": "1024m", 
-            "namenode_opt_newsize": "128m", 
-            "nfsgateway_heapsize": "1024", 
-            "dtnode_heapsize": "1024m", 
-            "hadoop_root_logger": "INFO,RFA", 
-            "hadoop_heapsize": "1024", 
-            "hadoop_pid_dir_prefix": "/var/run/hadoop", 
-            "namenode_opt_permsize": "128m", 
+            "keyserver_port": "",
+            "proxyuser_group": "users",
+            "hdfs_user_nproc_limit": "65536",
+            "hdfs_log_dir_prefix": "/var/log/hadoop",
+            "hdfs_user_nofile_limit": "128000",
+            "hdfs_user": "hdfs",
+            "keyserver_host": " ",
+            "namenode_opt_maxnewsize": "128m",
+            "namenode_opt_maxpermsize": "256m",
+            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
 {{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
 node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
 NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
 N_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
 ing priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n  for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n  do\n    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n  done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
 ECURE_DN_USER\" ]; then\n  ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}",
+            "namenode_heapsize": "1024m",
+            "namenode_opt_newsize": "128m",
+            "nfsgateway_heapsize": "1024",
+            "dtnode_heapsize": "1024m",
+            "hadoop_root_logger": "INFO,RFA",
+            "hadoop_heapsize": "1024",
+            "hadoop_pid_dir_prefix": "/var/run/hadoop",
+            "namenode_opt_permsize": "128m",
             "hdfs_tmp_dir": "/tmp"
-        }, 
+        },
         "zookeeper-log4j": {
             "content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: console appender only\nlog4j.rootLogger=INFO, CONSOLE\n\n# Example with rolling log file\n#log4
 j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE\n\n# Example with rolling log file and tracing\n#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE\n\n#\n# Log INFO level and above messages to the console\n#\nlog4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\nlog4j.appender.CONSOLE.Threshold=INFO\nlog4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n#\n# Add ROLLINGFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender\nlog4j.appender.ROLLINGFILE.Threshold=DEBUG\nlog4j.appender.ROLLINGFILE.File=zookeeper.log\n\n# Max log file size of 10MB\nlog4j.appender.ROLLINGFILE.MaxFileSize=10MB\n# uncomment the next line to limit number of backup files\n#log4j.appender.ROLLINGFILE.MaxBackupIndex=10\n\nlog4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.ROLLIN
 GFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n\n#\n# Add TRACEFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.TRACEFILE=org.apache.log4j.FileAppender\nlog4j.appender.TRACEFILE.Threshold=TRACE\nlog4j.appender.TRACEFILE.File=zookeeper_trace.log\n\nlog4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout\n### Notice we are including log4j's NDC here (%x)\nlog4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n"
-        }, 
+        },
         "ssl-server": {
-            "ssl.server.keystore.location": "/etc/security/serverKeys/keystore.jks", 
-            "ssl.server.keystore.keypassword": "bigdata", 
-            "ssl.server.truststore.location": "/etc/security/serverKeys/all.jks", 
-            "ssl.server.keystore.password": "bigdata", 
-            "ssl.server.truststore.password": "bigdata", 
-            "ssl.server.truststore.type": "jks", 
-            "ssl.server.keystore.type": "jks", 
+            "ssl.server.keystore.location": "/etc/security/serverKeys/keystore.jks",
+            "ssl.server.keystore.keypassword": "bigdata",
+            "ssl.server.truststore.location": "/etc/security/serverKeys/all.jks",
+            "ssl.server.keystore.password": "bigdata",
+            "ssl.server.truststore.password": "bigdata",
+            "ssl.server.truststore.type": "jks",
+            "ssl.server.keystore.type": "jks",
             "ssl.server.truststore.reload.interval": "10000"
-        }, 
-        "ranger-site": {}, 
+        },
+        "ranger-site": {},
         "admin-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nlog4j.rootLogger = warn,xa_log_appender\n\n\n# xa_logger\nlog4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.xa_log_app
 ender.file=${logdir}/xa_portal.log\nlog4j.appender.xa_log_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.xa_log_appender.append=true\nlog4j.appender.xa_log_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.xa_log_appender.layout.ConversionPattern=%d [%t] %-5p %C{6} (%F:%L) - %m%n\n# xa_log_appender : category and additivity\nlog4j.category.org.springframework=warn,xa_log_appender\nlog4j.additivity.org.springframework=false\n\nlog4j.category.org.apache.ranger=info,xa_log_appender\nlog4j.additivity.org.apache.ranger=false\n\nlog4j.category.xa=info,xa_log_appender\nlog4j.additivity.xa=false\n\n# perf_logger\nlog4j.appender.perf_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.perf_appender.file=${logdir}/ranger_admin_perf.log\nlog4j.appender.perf_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.perf_appender.append=true\nlog4j.appender.perf_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.perf_appender.layout.ConversionPattern=%d [%t] 
 %m%n\n\n\n# sql_appender\nlog4j.appender.sql_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.sql_appender.file=${logdir}/xa_portal_sql.log\nlog4j.appender.sql_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.sql_appender.append=true\nlog4j.appender.sql_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.sql_appender.layout.ConversionPattern=%d [%t] %-5p %C{6} (%F:%L) - %m%n\n\n# sql_appender : category and additivity\nlog4j.category.org.hibernate.SQL=warn,sql_appender\nlog4j.additivity.org.hibernate.SQL=false\n\nlog4j.category.jdbc.sqlonly=fatal,sql_appender\nlog4j.additivity.jdbc.sqlonly=false\n\nlog4j.category.jdbc.sqltiming=warn,sql_appender\nlog4j.additivity.jdbc.sqltiming=false\n\nlog4j.category.jdbc.audit=fatal,sql_appender\nlog4j.additivity.jdbc.audit=false\n\nlog4j.category.jdbc.resultset=fatal,sql_appender\nlog4j.additivity.jdbc.resultset=false\n\nlog4j.category.jdbc.connection=fatal,sql_appender\nlog4j.additivity.jdbc.connection=false"
-        }, 
+        },
         "tagsync-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/tagsync
 .log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n"
-        }, 
+        },
         "ranger-hdfs-security": {
-            "ranger.plugin.hdfs.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient", 
-            "ranger.plugin.hdfs.service.name": "{{repo_name}}", 
-            "ranger.plugin.hdfs.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache", 
-            "ranger.plugin.hdfs.policy.pollIntervalMs": "30000", 
-            "ranger.plugin.hdfs.policy.rest.url": "{{policymgr_mgr_url}}", 
-            "ranger.plugin.hdfs.policy.rest.ssl.config.file": "/etc/hadoop/conf/ranger-policymgr-ssl.xml", 
+            "ranger.plugin.hdfs.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient",
+            "ranger.plugin.hdfs.service.name": "{{repo_name}}",
+            "ranger.plugin.hdfs.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache",
+            "ranger.plugin.hdfs.policy.pollIntervalMs": "30000",
+            "ranger.plugin.hdfs.policy.rest.url": "{{policymgr_mgr_url}}",
+            "ranger.plugin.hdfs.policy.rest.ssl.config.file": "/etc/hadoop/conf/ranger-policymgr-ssl.xml",
             "xasecure.add-hadoop-authorization": "true"
-        }, 
-        "usersync-properties": {}, 
+        },
+        "usersync-properties": {},
         "zookeeper-env": {
-            "zk_log_dir": "/var/log/zookeeper", 
-            "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}", 
-            "zk_server_heapsize": "1024m", 
-            "zk_pid_dir": "/var/run/zookeeper", 
+            "zk_log_dir": "/var/log/zookeeper",
+            "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}",
+            "zk_server_heapsize": "1024m",
+            "zk_pid_dir": "/var/run/zookeeper",
             "zk_user": "zookeeper"
         },
         "infra-solr-env": {
@@ -651,7 +651,7 @@
             "infra_solr_kerberos_name_rules": "DEFAULT",
             "infra_solr_user": "infra-solr",
             "infra_solr_maxmem": "1024",
-            "content": "#!/bin/bash\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LI

<TRUNCATED>

[09/10] ambari git commit: AMBARI-21430 - Allow Multiple Versions of Stack Tools to Co-Exist (jonathanhurley)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py
index 0e9fe74..54eef18 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py
@@ -26,6 +26,7 @@ from resource_management.core.resources import Execute
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions import get_kinit_path
 from resource_management.libraries.functions import get_klist_path
+from resource_management.libraries.functions import stack_tools
 from ambari_commons.os_check import OSConst, OSCheck
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 from urlparse import urlparse
@@ -66,6 +67,7 @@ USER_PRINCIPAL_DEFAULT = 'oozie@EXAMPLE.COM'
 # default user
 USER_DEFAULT = 'oozie'
 
+STACK_NAME_KEY = '{{cluster-env/stack_name}}'
 STACK_ROOT_KEY = '{{cluster-env/stack_root}}'
 STACK_ROOT_DEFAULT = '/usr/hdp'
 
@@ -86,7 +88,7 @@ def get_tokens():
   to build the dictionary passed into execute
   """
   return (OOZIE_URL_KEY, USER_PRINCIPAL_KEY, SECURITY_ENABLED, USER_KEYTAB_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY,
-          USER_KEY, OOZIE_HTTPS_PORT, OOZIE_ENV_CONTENT, STACK_ROOT_KEY)
+          USER_KEY, OOZIE_HTTPS_PORT, OOZIE_ENV_CONTENT, STACK_NAME_KEY, STACK_ROOT_KEY)
 
 @OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
 def get_check_command(oozie_url, host_name, configurations):
@@ -158,8 +160,8 @@ def get_check_command(oozie_url, host_name, configurations, parameters, only_kin
 
   # Configure stack root
   stack_root = STACK_ROOT_DEFAULT
-  if STACK_ROOT_KEY in configurations:
-    stack_root = configurations[STACK_ROOT_KEY].lower()
+  if STACK_NAME_KEY in configurations and STACK_ROOT_KEY in configurations:
+    stack_root = stack_tools.get_stack_root(configurations[STACK_NAME_KEY], configurations[STACK_ROOT_KEY]).lower()
 
   # oozie configuration directory using a symlink
   oozie_config_directory = OOZIE_CONF_DIR.replace(STACK_ROOT_PATTERN, stack_root)

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/resources/host_scripts/alert_disk_space.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/host_scripts/alert_disk_space.py b/ambari-server/src/main/resources/host_scripts/alert_disk_space.py
index 4c5834f..f3c6406 100644
--- a/ambari-server/src/main/resources/host_scripts/alert_disk_space.py
+++ b/ambari-server/src/main/resources/host_scripts/alert_disk_space.py
@@ -23,6 +23,7 @@ import os
 import platform
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 from ambari_commons import OSConst
+from resource_management.libraries.functions import stack_tools
 
 DiskInfo = collections.namedtuple('DiskInfo', 'total used free path')
 
@@ -36,6 +37,7 @@ MIN_FREE_SPACE_DEFAULT = 5000000000L
 PERCENT_USED_WARNING_DEFAULT = 50
 PERCENT_USED_CRITICAL_DEFAULT = 80
 
+STACK_NAME = '{{cluster-env/stack_name}}'
 STACK_ROOT = '{{cluster-env/stack_root}}'
 
 def get_tokens():
@@ -43,7 +45,7 @@ def get_tokens():
   Returns a tuple of tokens in the format {{site/property}} that will be used
   to build the dictionary passed into execute
   """
-  return (STACK_ROOT, )
+  return (STACK_NAME, STACK_ROOT)
 
 
 @OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
@@ -64,10 +66,10 @@ def execute(configurations={}, parameters={}, host_name=None):
   if configurations is None:
     return (('UNKNOWN', ['There were no configurations supplied to the script.']))
 
-  if not STACK_ROOT in configurations:
-    return (('STACK_ROOT', ['cluster-env/stack_root is not specified']))
+  if not STACK_NAME in configurations or not STACK_ROOT in configurations:
+    return (('STACK_ROOT', ['cluster-env/stack_name and cluster-env/stack_root are required']))
 
-  path = configurations[STACK_ROOT]
+  path = stack_tools.get_stack_root(configurations[STACK_NAME], configurations[STACK_ROOT])
 
   try:
     disk_usage = _get_disk_usage(path)

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/resources/host_scripts/alert_version_select.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/host_scripts/alert_version_select.py b/ambari-server/src/main/resources/host_scripts/alert_version_select.py
index f8755c9..a7b65af 100644
--- a/ambari-server/src/main/resources/host_scripts/alert_version_select.py
+++ b/ambari-server/src/main/resources/host_scripts/alert_version_select.py
@@ -31,6 +31,7 @@ RESULT_STATE_WARNING = 'WARNING'
 RESULT_STATE_CRITICAL = 'CRITICAL'
 RESULT_STATE_UNKNOWN = 'UNKNOWN'
 
+STACK_NAME = '{{cluster-env/stack_name}}'
 STACK_TOOLS = '{{cluster-env/stack_tools}}'
 
 
@@ -42,7 +43,7 @@ def get_tokens():
   Returns a tuple of tokens in the format {{site/property}} that will be used
   to build the dictionary passed into execute
   """
-  return (STACK_TOOLS,)
+  return (STACK_NAME, STACK_TOOLS)
 
 
 def execute(configurations={}, parameters={}, host_name=None):
@@ -65,8 +66,10 @@ def execute(configurations={}, parameters={}, host_name=None):
     if STACK_TOOLS not in configurations:
       return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(STACK_TOOLS)])
 
+    stack_name = Script.get_stack_name()
+
     # Of the form,
-    # { "stack_selector": ["hdp-select", "/usr/bin/hdp-select", "hdp-select"], "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"] }
+    # { "HDP" : { "stack_selector": ["hdp-select", "/usr/bin/hdp-select", "hdp-select"], "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"] } }
     stack_tools_str = configurations[STACK_TOOLS]
 
     if stack_tools_str is None:
@@ -75,6 +78,7 @@ def execute(configurations={}, parameters={}, host_name=None):
     distro_select = "unknown-distro-select"
     try:
       stack_tools = json.loads(stack_tools_str)
+      stack_tools = stack_tools[stack_name]
       distro_select = stack_tools["stack_selector"][0]
     except:
       pass
@@ -87,18 +91,18 @@ def execute(configurations={}, parameters={}, host_name=None):
       (code, out, versions) = unsafe_get_stack_versions()
 
       if code == 0:
-        msg.append("Ok. {0}".format(distro_select))
+        msg.append("{0} ".format(distro_select))
         if versions is not None and type(versions) is list and len(versions) > 0:
-          msg.append("Versions: {0}".format(", ".join(versions)))
+          msg.append("reported the following versions: {0}".format(", ".join(versions)))
         return (RESULT_STATE_OK, ["\n".join(msg)])
       else:
-        msg.append("Failed, check dir {0} for unexpected contents.".format(stack_root_dir))
+        msg.append("{0} could not properly read {1}. Check this directory for unexpected contents.".format(distro_select, stack_root_dir))
         if out is not None:
           msg.append(out)
 
         return (RESULT_STATE_CRITICAL, ["\n".join(msg)])
     else:
-      msg.append("Ok. No stack root {0} to check.".format(stack_root_dir))
+      msg.append("No stack root {0} to check.".format(stack_root_dir))
       return (RESULT_STATE_OK, ["\n".join(msg)])
   except Exception, e:
     return (RESULT_STATE_CRITICAL, [e.message])

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/resources/stacks/BigInsights/4.0/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/configuration/cluster-env.xml
index 93c7948..2d11ef3 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/configuration/cluster-env.xml
@@ -234,7 +234,20 @@ gpgcheck=0</value>
     </value-attributes>
     <on-ambari-upgrade add="true"/>
   </property>
-  
+
+  <!-- Define stack_name property in the base stack. DO NOT override this property for each stack version -->
+  <property>
+    <name>stack_name</name>
+    <value>BigInsights</value>
+    <description>The name of the stack.</description>
+    <value-attributes>
+      <read-only>true</read-only>
+      <overridable>false</overridable>
+      <visible>false</visible>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
   <!-- Define stack_tools property in the base stack. DO NOT override this property for each stack version -->
   <property>
     <name>stack_tools</name>
@@ -267,8 +280,8 @@ gpgcheck=0</value>
   </property>
   <property>
     <name>stack_root</name>
-    <value>/usr/iop</value>
-    <description>Stack root folder</description>
+    <value>{"BigInsights":"/usr/iop"}</value>
+    <description>JSON which defines the stack root by stack name</description>  
     <value-attributes>
       <read-only>true</read-only>
       <overridable>false</overridable>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_features.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_features.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_features.json
index 4627e73..a6672e4 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_features.json
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_features.json
@@ -1,212 +1,214 @@
 {
-  "stack_features": [
-    {
-      "name": "snappy",
-      "description": "Snappy compressor/decompressor support",
-      "max_version": "4.0.0.0"
-    },
-    {
-      "name": "lzo",
-      "description": "LZO libraries support",
-      "min_version": "4.1.0.0"
-    },
-    {
-      "name": "express_upgrade",
-      "description": "Express upgrade support",
-      "min_version": "4.2.0.0"
-    },
-    {
-      "name": "rolling_upgrade",
-      "description": "Rolling upgrade support",
-      "min_version": "4.1.0.0"
-    },
-    {
-      "name": "config_versioning",
-      "description": "Configurable versions support",
-      "min_version": "4.1.0.0"
-    },
-    {
-      "name": "datanode_non_root",
-      "description": "DataNode running as non-root support (AMBARI-7615)",
-      "min_version": "4.1.0.0"
-    },
-    {
-      "name": "remove_ranger_hdfs_plugin_env",
-      "description": "HDFS removes Ranger env files (AMBARI-14299)",
-      "min_version": "4.2.0.0"
-    },
-    {
-      "name": "ranger",
-      "description": "Ranger Service support",
-      "min_version": "4.2.0.0"
-    },
-    {
-      "name": "ranger_tagsync_component",
-      "description": "Ranger Tagsync component support (AMBARI-14383)",
-      "min_version": "4.2.5.0"
-    },
-    {
-      "name": "phoenix",
-      "description": "Phoenix Service support",
-      "min_version": "4.2.0.0"
-    },
-    {
-      "name": "nfs",
-      "description": "NFS support",
-      "min_version": "4.1.0.0"
-    },
-    {
-      "name": "timeline_state_store",
-      "description": "Yarn application timeline-service supports state store property (AMBARI-11442)",
-      "min_version": "4.0.0.0"
-    },
-    {
-      "name": "copy_tarball_to_hdfs",
-      "description": "Copy tarball to HDFS support (AMBARI-12113)",
-      "min_version": "4.1.0.0"
-    },
-    {
-      "name": "spark_16plus",
-      "description": "Spark 1.6+",
-      "min_version": "4.2.0.0"
-    },
-    {
-      "name": "spark_thriftserver",
-      "description": "Spark Thrift Server",
-      "min_version": "4.2.0.0"
-    },
-    {
-      "name": "create_kafka_broker_id",
-      "description": "Ambari should create Kafka Broker Id (AMBARI-12678)",
-      "min_version": "4.0.0.0",
-      "max_version": "4.2.0.0"
-    },
-    {
-      "name": "kafka_listeners",
-      "description": "Kafka listeners (AMBARI-10984)",
-      "min_version": "4.2.0.0"
-    },
-    {
-      "name": "kafka_kerberos",
-      "description": "Kafka Kerberos support (AMBARI-10984)",
-      "min_version": "4.1.0.0"
-    },
-    {
-      "name": "ranger_usersync_non_root",
-      "description": "Ranger Usersync as non-root user (AMBARI-10416)",
-      "min_version": "4.1.0.0"
-    },
-    {
-      "name": "ranger_audit_db_support",
-      "description": "Ranger Audit to DB support",
-      "min_version": "4.2.0.0",
-      "max_version": "4.2.0.0"
-    },
-    {
-      "name": "knox_versioned_data_dir",
-      "description": "Use versioned data dir for Knox (AMBARI-13164)",
-      "min_version": "4.2.0.0"
-    },
-    {
-      "name": "knox_sso_topology",
-      "description": "Knox SSO Topology support (AMBARI-13975)",
-      "min_version": "4.2.0.0"
-    },
-    {
-      "name": "oozie_admin_user",
-      "description": "Oozie install user as an Oozie admin user (AMBARI-7976)",
-      "min_version": "4.0.0.0"
-    },
-    {
-      "name": "oozie_setup_shared_lib",
-      "description": "Oozie setup tools used to shared Oozie lib to HDFS (AMBARI-7240)",
-      "min_version": "4.0.0.0"
-    },
-    {
-      "name": "oozie_host_kerberos",
-      "description": "Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)",
-      "min_version": "4.0.0.0",
-      "max_version": "4.1.0.0"
-    },
-    {
-      "name": "hive_metastore_upgrade_schema",
-      "description": "Hive metastore upgrade schema support (AMBARI-11176)",
-      "min_version": "4.1.0.0"
-     },
-    {
-      "name": "hive_server_interactive",
-      "description": "Hive server interactive support (AMBARI-15573)",
-      "min_version": "4.4.0.0"
-     },
-    {
-      "name": "hive_webhcat_specific_configs",
-      "description": "Hive webhcat specific configurations support (AMBARI-12364)",
-      "min_version": "4.1.0.0"
-     },
-    {
-      "name": "hive_purge_table",
-      "description": "Hive purge table support (AMBARI-12260)",
-      "min_version": "4.1.0.0"
-     },
-    {
-      "name": "hive_server2_kerberized_env",
-      "description": "Hive server2 working on kerberized environment (AMBARI-13749)",
-      "max_version": "4.1.0.0"
-     },
-    {
-      "name": "hive_env_heapsize",
-      "description": "Hive heapsize property defined in hive-env (AMBARI-12801)",
-      "min_version": "4.0.0.0"
-    },
-    {
-      "name": "ranger_kms_hsm_support",
-      "description": "Ranger KMS HSM support (AMBARI-15752)",
-      "min_version": "4.2.5.0"
-    },
-    {
-      "name": "ranger_log4j_support",
-      "description": "Ranger supporting log-4j properties (AMBARI-15681)",
-      "min_version": "4.2.5.0"
-    },
-    {
-      "name": "ranger_kerberos_support",
-      "description": "Ranger Kerberos support",
-      "min_version": "4.2.5.0"
-    },
-    {
-      "name": "hive_metastore_site_support",
-      "description": "Hive Metastore site support",
-      "min_version": "4.2.5.0"
-    },
-    {
-      "name": "ranger_usersync_password_jceks",
-      "description": "Saving Ranger Usersync credentials in jceks",
-      "min_version": "4.2.5.0"
-    },
-    {
-      "name": "ranger_install_logsearch_client",
-      "description": "LogSearch Service support",
-      "min_version": "4.2.5.0"
-    },
-    {
-      "name": "hbase_home_directory",
-      "description": "Hbase home directory in HDFS needed for HBASE backup",
-      "min_version": "4.2.5.0"
-    },
-    {
-      "name": "spark_livy",
-      "description": "Livy as slave component of spark",
-      "min_version": "4.4.0.0"
-    },
-    {
-      "name": "ranger_pid_support",
-      "description": "Ranger Service support pid generation AMBARI-16756",
-      "min_version": "4.2.5.0"
-    },
-    {
-      "name": "ranger_kms_pid_support",
-      "description": "Ranger KMS Service support pid generation",
-      "min_version": "4.2.5.0"
-    }
-  ]
+  "BigInsights": {
+    "stack_features": [
+      {
+        "name": "snappy",
+        "description": "Snappy compressor/decompressor support",
+        "max_version": "4.0.0.0"
+      },
+      {
+        "name": "lzo",
+        "description": "LZO libraries support",
+        "min_version": "4.1.0.0"
+      },
+      {
+        "name": "express_upgrade",
+        "description": "Express upgrade support",
+        "min_version": "4.2.0.0"
+      },
+      {
+        "name": "rolling_upgrade",
+        "description": "Rolling upgrade support",
+        "min_version": "4.1.0.0"
+      },
+      {
+        "name": "config_versioning",
+        "description": "Configurable versions support",
+        "min_version": "4.1.0.0"
+      },
+      {
+        "name": "datanode_non_root",
+        "description": "DataNode running as non-root support (AMBARI-7615)",
+        "min_version": "4.1.0.0"
+      },
+      {
+        "name": "remove_ranger_hdfs_plugin_env",
+        "description": "HDFS removes Ranger env files (AMBARI-14299)",
+        "min_version": "4.2.0.0"
+      },
+      {
+        "name": "ranger",
+        "description": "Ranger Service support",
+        "min_version": "4.2.0.0"
+      },
+      {
+        "name": "ranger_tagsync_component",
+        "description": "Ranger Tagsync component support (AMBARI-14383)",
+        "min_version": "4.2.5.0"
+      },
+      {
+        "name": "phoenix",
+        "description": "Phoenix Service support",
+        "min_version": "4.2.0.0"
+      },
+      {
+        "name": "nfs",
+        "description": "NFS support",
+        "min_version": "4.1.0.0"
+      },
+      {
+        "name": "timeline_state_store",
+        "description": "Yarn application timeline-service supports state store property (AMBARI-11442)",
+        "min_version": "4.0.0.0"
+      },
+      {
+        "name": "copy_tarball_to_hdfs",
+        "description": "Copy tarball to HDFS support (AMBARI-12113)",
+        "min_version": "4.1.0.0"
+      },
+      {
+        "name": "spark_16plus",
+        "description": "Spark 1.6+",
+        "min_version": "4.2.0.0"
+      },
+      {
+        "name": "spark_thriftserver",
+        "description": "Spark Thrift Server",
+        "min_version": "4.2.0.0"
+      },
+      {
+        "name": "create_kafka_broker_id",
+        "description": "Ambari should create Kafka Broker Id (AMBARI-12678)",
+        "min_version": "4.0.0.0",
+        "max_version": "4.2.0.0"
+      },
+      {
+        "name": "kafka_listeners",
+        "description": "Kafka listeners (AMBARI-10984)",
+        "min_version": "4.2.0.0"
+      },
+      {
+        "name": "kafka_kerberos",
+        "description": "Kafka Kerberos support (AMBARI-10984)",
+        "min_version": "4.1.0.0"
+      },
+      {
+        "name": "ranger_usersync_non_root",
+        "description": "Ranger Usersync as non-root user (AMBARI-10416)",
+        "min_version": "4.1.0.0"
+      },
+      {
+        "name": "ranger_audit_db_support",
+        "description": "Ranger Audit to DB support",
+        "min_version": "4.2.0.0",
+        "max_version": "4.2.0.0"
+      },
+      {
+        "name": "knox_versioned_data_dir",
+        "description": "Use versioned data dir for Knox (AMBARI-13164)",
+        "min_version": "4.2.0.0"
+      },
+      {
+        "name": "knox_sso_topology",
+        "description": "Knox SSO Topology support (AMBARI-13975)",
+        "min_version": "4.2.0.0"
+      },
+      {
+        "name": "oozie_admin_user",
+        "description": "Oozie install user as an Oozie admin user (AMBARI-7976)",
+        "min_version": "4.0.0.0"
+      },
+      {
+        "name": "oozie_setup_shared_lib",
+        "description": "Oozie setup tools used to shared Oozie lib to HDFS (AMBARI-7240)",
+        "min_version": "4.0.0.0"
+      },
+      {
+        "name": "oozie_host_kerberos",
+        "description": "Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)",
+        "min_version": "4.0.0.0",
+        "max_version": "4.1.0.0"
+      },
+      {
+        "name": "hive_metastore_upgrade_schema",
+        "description": "Hive metastore upgrade schema support (AMBARI-11176)",
+        "min_version": "4.1.0.0"
+      },
+      {
+        "name": "hive_server_interactive",
+        "description": "Hive server interactive support (AMBARI-15573)",
+        "min_version": "4.4.0.0"
+      },
+      {
+        "name": "hive_webhcat_specific_configs",
+        "description": "Hive webhcat specific configurations support (AMBARI-12364)",
+        "min_version": "4.1.0.0"
+      },
+      {
+        "name": "hive_purge_table",
+        "description": "Hive purge table support (AMBARI-12260)",
+        "min_version": "4.1.0.0"
+      },
+      {
+        "name": "hive_server2_kerberized_env",
+        "description": "Hive server2 working on kerberized environment (AMBARI-13749)",
+        "max_version": "4.1.0.0"
+      },
+      {
+        "name": "hive_env_heapsize",
+        "description": "Hive heapsize property defined in hive-env (AMBARI-12801)",
+        "min_version": "4.0.0.0"
+      },
+      {
+        "name": "ranger_kms_hsm_support",
+        "description": "Ranger KMS HSM support (AMBARI-15752)",
+        "min_version": "4.2.5.0"
+      },
+      {
+        "name": "ranger_log4j_support",
+        "description": "Ranger supporting log-4j properties (AMBARI-15681)",
+        "min_version": "4.2.5.0"
+      },
+      {
+        "name": "ranger_kerberos_support",
+        "description": "Ranger Kerberos support",
+        "min_version": "4.2.5.0"
+      },
+      {
+        "name": "hive_metastore_site_support",
+        "description": "Hive Metastore site support",
+        "min_version": "4.2.5.0"
+      },
+      {
+        "name": "ranger_usersync_password_jceks",
+        "description": "Saving Ranger Usersync credentials in jceks",
+        "min_version": "4.2.5.0"
+      },
+      {
+        "name": "ranger_install_logsearch_client",
+        "description": "LogSearch Service support",
+        "min_version": "4.2.5.0"
+      },
+      {
+        "name": "hbase_home_directory",
+        "description": "Hbase home directory in HDFS needed for HBASE backup",
+        "min_version": "4.2.5.0"
+      },
+      {
+        "name": "spark_livy",
+        "description": "Livy as slave component of spark",
+        "min_version": "4.4.0.0"
+      },
+      {
+        "name": "ranger_pid_support",
+        "description": "Ranger Service support pid generation AMBARI-16756",
+        "min_version": "4.2.5.0"
+      },
+      {
+        "name": "ranger_kms_pid_support",
+        "description": "Ranger KMS Service support pid generation",
+        "min_version": "4.2.5.0"
+      }
+    ]
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_tools.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_tools.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_tools.json
index fdbbdf9..92c9349 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_tools.json
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_tools.json
@@ -1,4 +1,14 @@
 {
-  "stack_selector": ["iop-select", "/usr/bin/iop-select", "iop-select"],
-  "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"]
+  "BigInsights": {
+    "stack_selector": [
+      "iop-select",
+      "/usr/bin/iop-select",
+      "iop-select"
+    ],
+    "conf_selector": [
+      "conf-select",
+      "/usr/bin/conf-select",
+      "conf-select"
+    ]
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
index a79e904..c6b091d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
@@ -220,6 +220,18 @@ gpgcheck=0</value>
     </value-attributes>
     <on-ambari-upgrade add="true"/>
   </property>
+  <!-- Define stack_name property in the base stack. DO NOT override this property for each stack version -->
+  <property>
+    <name>stack_name</name>
+    <value>HDP</value>
+    <description>The name of the stack.</description>
+    <value-attributes>
+      <read-only>true</read-only>
+      <overridable>false</overridable>
+      <visible>false</visible>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
   <!-- Define stack_tools property in the base stack. DO NOT override this property for each stack version -->
   <property>
     <name>stack_tools</name>
@@ -252,8 +264,8 @@ gpgcheck=0</value>
   </property>
   <property>
     <name>stack_root</name>
-    <value>/usr/hdp</value>
-    <description>Stack root folder</description>
+    <value>{"HDP":"/usr/hdp"}</value>
+    <description>JSON which defines the stack root by stack name</description>
     <value-attributes>
       <read-only>true</read-only>
       <overridable>false</overridable>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
index 878645b..31cf0c8 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
@@ -1,427 +1,429 @@
 {
-  "stack_features": [
-    {
-      "name": "snappy",
-      "description": "Snappy compressor/decompressor support",
-      "min_version": "2.0.0.0",
-      "max_version": "2.2.0.0"
-    },
-    {
-      "name": "lzo",
-      "description": "LZO libraries support",
-      "min_version": "2.2.1.0"
-    },
-    {
-      "name": "express_upgrade",
-      "description": "Express upgrade support",
-      "min_version": "2.1.0.0"
-    },
-    {
-      "name": "rolling_upgrade",
-      "description": "Rolling upgrade support",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "kafka_acl_migration_support",
-      "description": "ACL migration support",
-      "min_version": "2.3.4.0"
-    },
-    {
-      "name": "secure_zookeeper",
-      "description": "Protect ZNodes with SASL acl in secure clusters",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "config_versioning",
-      "description": "Configurable versions support",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "datanode_non_root",
-      "description": "DataNode running as non-root support (AMBARI-7615)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "remove_ranger_hdfs_plugin_env",
-      "description": "HDFS removes Ranger env files (AMBARI-14299)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "ranger",
-      "description": "Ranger Service support",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "ranger_tagsync_component",
-      "description": "Ranger Tagsync component support (AMBARI-14383)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "phoenix",
-      "description": "Phoenix Service support",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "nfs",
-      "description": "NFS support",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "tez_for_spark",
-      "description": "Tez dependency for Spark",
-      "min_version": "2.2.0.0",
-      "max_version": "2.3.0.0"
-    },
-    {
-      "name": "timeline_state_store",
-      "description": "Yarn application timeline-service supports state store property (AMBARI-11442)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "copy_tarball_to_hdfs",
-      "description": "Copy tarball to HDFS support (AMBARI-12113)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "spark_16plus",
-      "description": "Spark 1.6+",
-      "min_version": "2.4.0.0"
-    },
-    {
-      "name": "spark_thriftserver",
-      "description": "Spark Thrift Server",
-      "min_version": "2.3.2.0"
-    },
-    {
-      "name": "storm_kerberos",
-      "description": "Storm Kerberos support (AMBARI-7570)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "storm_ams",
-      "description": "Storm AMS integration (AMBARI-10710)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "create_kafka_broker_id",
-      "description": "Ambari should create Kafka Broker Id (AMBARI-12678)",
-      "min_version": "2.2.0.0",
-      "max_version": "2.3.0.0"
-    },
-    {
-      "name": "kafka_listeners",
-      "description": "Kafka listeners (AMBARI-10984)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "kafka_kerberos",
-      "description": "Kafka Kerberos support (AMBARI-10984)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "pig_on_tez",
-      "description": "Pig on Tez support (AMBARI-7863)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "ranger_usersync_non_root",
-      "description": "Ranger Usersync as non-root user (AMBARI-10416)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "ranger_audit_db_support",
-      "description": "Ranger Audit to DB support",
-      "min_version": "2.2.0.0",
-      "max_version": "2.4.99.99"
-    },
-    {
-      "name": "accumulo_kerberos_user_auth",
-      "description": "Accumulo Kerberos User Auth (AMBARI-10163)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "knox_versioned_data_dir",
-      "description": "Use versioned data dir for Knox (AMBARI-13164)",
-      "min_version": "2.3.2.0"
-    },
-    {
-      "name": "knox_sso_topology",
-      "description": "Knox SSO Topology support (AMBARI-13975)",
-      "min_version": "2.3.8.0"
-    },
-    {
-      "name": "atlas_rolling_upgrade",
-      "description": "Rolling upgrade support for Atlas",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "oozie_admin_user",
-      "description": "Oozie install user as an Oozie admin user (AMBARI-7976)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "oozie_create_hive_tez_configs",
-      "description": "Oozie create configs for Ambari Hive and Tez deployments (AMBARI-8074)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "oozie_setup_shared_lib",
-      "description": "Oozie setup tools used to shared Oozie lib to HDFS (AMBARI-7240)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "oozie_host_kerberos",
-      "description": "Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)",
-      "min_version": "2.0.0.0"
-    },
-    {
-      "name": "falcon_extensions",
-      "description": "Falcon Extension",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "hive_metastore_upgrade_schema",
-      "description": "Hive metastore upgrade schema support (AMBARI-11176)",
-      "min_version": "2.3.0.0"
-     },
-    {
-      "name": "hive_server_interactive",
-      "description": "Hive server interactive support (AMBARI-15573)",
-      "min_version": "2.5.0.0"
-     },
-    {
-      "name": "hive_webhcat_specific_configs",
-      "description": "Hive webhcat specific configurations support (AMBARI-12364)",
-      "min_version": "2.3.0.0"
-     },
-    {
-      "name": "hive_purge_table",
-      "description": "Hive purge table support (AMBARI-12260)",
-      "min_version": "2.3.0.0"
-     },
-    {
-      "name": "hive_server2_kerberized_env",
-      "description": "Hive server2 working on kerberized environment (AMBARI-13749)",
-      "min_version": "2.2.3.0",
-      "max_version": "2.2.5.0"
-     },
-    {
-      "name": "hive_env_heapsize",
-      "description": "Hive heapsize property defined in hive-env (AMBARI-12801)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "ranger_kms_hsm_support",
-      "description": "Ranger KMS HSM support (AMBARI-15752)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_log4j_support",
-      "description": "Ranger supporting log-4j properties (AMBARI-15681)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_kerberos_support",
-      "description": "Ranger Kerberos support",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "hive_metastore_site_support",
-      "description": "Hive Metastore site support",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_usersync_password_jceks",
-      "description": "Saving Ranger Usersync credentials in jceks",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_install_infra_client",
-      "description": "Ambari Infra Service support",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "falcon_atlas_support_2_3",
-      "description": "Falcon Atlas integration support for 2.3 stack",
-      "min_version": "2.3.99.0",
-      "max_version": "2.4.0.0"
-    },
-    {
-      "name": "falcon_atlas_support",
-      "description": "Falcon Atlas integration",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "hbase_home_directory",
-      "description": "Hbase home directory in HDFS needed for HBASE backup",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "spark_livy",
-      "description": "Livy as slave component of spark",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "spark_livy2",
-      "description": "Livy as slave component of spark",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "atlas_ranger_plugin_support",
-      "description": "Atlas Ranger plugin support",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "atlas_conf_dir_in_path",
-      "description": "Prepend the Atlas conf dir (/etc/atlas/conf) to the classpath of Storm and Falcon",
-      "min_version": "2.3.0.0",
-      "max_version": "2.4.99.99"
-    },
-    {
-      "name": "atlas_upgrade_support",
-      "description": "Atlas supports express and rolling upgrades",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "atlas_hook_support",
-      "description": "Atlas support for hooks in Hive, Storm, Falcon, and Sqoop",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_pid_support",
-      "description": "Ranger Service support pid generation AMBARI-16756",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_kms_pid_support",
-      "description": "Ranger KMS Service support pid generation",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_admin_password_change",
-      "description": "Allow ranger admin credentials to be specified during cluster creation (AMBARI-17000)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_setup_db_on_start",
-      "description": "Allows setup of ranger db and java patches to be called multiple times on each START",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "storm_metrics_apache_classes",
-      "description": "Metrics sink for Storm that uses Apache class names",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "spark_java_opts_support",
-      "description": "Allow Spark to generate java-opts file",
-      "min_version": "2.2.0.0",
-      "max_version": "2.4.0.0"
-    },
-    {
-      "name": "atlas_hbase_setup",
-      "description": "Use script to create Atlas tables in Hbase and set permissions for Atlas user.",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_hive_plugin_jdbc_url",
-      "description": "Handle Ranger hive repo config jdbc url change for stack 2.5 (AMBARI-18386)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "zkfc_version_advertised",
-      "description": "ZKFC advertise version",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "phoenix_core_hdfs_site_required",
-      "description": "HDFS and CORE site required for Phoenix",
-      "max_version": "2.5.9.9"
-    },
-    {
-      "name": "ranger_tagsync_ssl_xml_support",
-      "description": "Ranger Tagsync ssl xml support.",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "ranger_xml_configuration",
-      "description": "Ranger code base support xml configurations",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "kafka_ranger_plugin_support",
-      "description": "Ambari stack changes for Ranger Kafka Plugin (AMBARI-11299)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "yarn_ranger_plugin_support",
-      "description": "Implement Stack changes for Ranger Yarn Plugin integration (AMBARI-10866)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "ranger_solr_config_support",
-      "description": "Showing Ranger solrconfig.xml on UI",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "hive_interactive_atlas_hook_required",
-      "description": "Registering Atlas Hook for Hive Interactive.",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "core_site_for_ranger_plugins",
-      "description": "Adding core-site.xml in when Ranger plugin is enabled for Storm, Kafka, and Knox.",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "atlas_install_hook_package_support",
-      "description": "Stop installing packages from 2.6",
-      "max_version": "2.5.9.9"
-    },
-    {
-      "name": "atlas_hdfs_site_on_namenode_ha",
-      "description": "Need to create hdfs-site under atlas-conf dir when Namenode-HA is enabled.",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "hive_interactive_ga",
-      "description": "Hive Interactive GA support",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "secure_ranger_ssl_password",
-      "description": "Securing Ranger Admin and Usersync SSL and Trustore related passwords in jceks",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "ranger_kms_ssl",
-      "description": "Ranger KMS SSL properties in ambari stack",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "nifi_encrypt_config",
-      "description": "Encrypt sensitive properties written to nifi property file",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "toolkit_config_update",
-      "description": "Support separate input and output for toolkit configuration",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "admin_toolkit_support",
-      "description": "Supports the nifi admin toolkit",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "tls_toolkit_san",
-      "description": "Support subject alternative name flag",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "nifi_jaas_conf_create",
-      "description": "Create NIFI jaas configuration when kerberos is enabled",
-      "min_version": "2.6.0.0"
-    }
-  ]
+  "HDP": {
+    "stack_features": [
+      {
+        "name": "snappy",
+        "description": "Snappy compressor/decompressor support",
+        "min_version": "2.0.0.0",
+        "max_version": "2.2.0.0"
+      },
+      {
+        "name": "lzo",
+        "description": "LZO libraries support",
+        "min_version": "2.2.1.0"
+      },
+      {
+        "name": "express_upgrade",
+        "description": "Express upgrade support",
+        "min_version": "2.1.0.0"
+      },
+      {
+        "name": "rolling_upgrade",
+        "description": "Rolling upgrade support",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "kafka_acl_migration_support",
+        "description": "ACL migration support",
+        "min_version": "2.3.4.0"
+      },
+      {
+        "name": "secure_zookeeper",
+        "description": "Protect ZNodes with SASL acl in secure clusters",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "config_versioning",
+        "description": "Configurable versions support",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "datanode_non_root",
+        "description": "DataNode running as non-root support (AMBARI-7615)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "remove_ranger_hdfs_plugin_env",
+        "description": "HDFS removes Ranger env files (AMBARI-14299)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "ranger",
+        "description": "Ranger Service support",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "ranger_tagsync_component",
+        "description": "Ranger Tagsync component support (AMBARI-14383)",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "phoenix",
+        "description": "Phoenix Service support",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "nfs",
+        "description": "NFS support",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "tez_for_spark",
+        "description": "Tez dependency for Spark",
+        "min_version": "2.2.0.0",
+        "max_version": "2.3.0.0"
+      },
+      {
+        "name": "timeline_state_store",
+        "description": "Yarn application timeline-service supports state store property (AMBARI-11442)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "copy_tarball_to_hdfs",
+        "description": "Copy tarball to HDFS support (AMBARI-12113)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "spark_16plus",
+        "description": "Spark 1.6+",
+        "min_version": "2.4.0.0"
+      },
+      {
+        "name": "spark_thriftserver",
+        "description": "Spark Thrift Server",
+        "min_version": "2.3.2.0"
+      },
+      {
+        "name": "storm_kerberos",
+        "description": "Storm Kerberos support (AMBARI-7570)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "storm_ams",
+        "description": "Storm AMS integration (AMBARI-10710)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "create_kafka_broker_id",
+        "description": "Ambari should create Kafka Broker Id (AMBARI-12678)",
+        "min_version": "2.2.0.0",
+        "max_version": "2.3.0.0"
+      },
+      {
+        "name": "kafka_listeners",
+        "description": "Kafka listeners (AMBARI-10984)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "kafka_kerberos",
+        "description": "Kafka Kerberos support (AMBARI-10984)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "pig_on_tez",
+        "description": "Pig on Tez support (AMBARI-7863)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "ranger_usersync_non_root",
+        "description": "Ranger Usersync as non-root user (AMBARI-10416)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "ranger_audit_db_support",
+        "description": "Ranger Audit to DB support",
+        "min_version": "2.2.0.0",
+        "max_version": "2.4.99.99"
+      },
+      {
+        "name": "accumulo_kerberos_user_auth",
+        "description": "Accumulo Kerberos User Auth (AMBARI-10163)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "knox_versioned_data_dir",
+        "description": "Use versioned data dir for Knox (AMBARI-13164)",
+        "min_version": "2.3.2.0"
+      },
+      {
+        "name": "knox_sso_topology",
+        "description": "Knox SSO Topology support (AMBARI-13975)",
+        "min_version": "2.3.8.0"
+      },
+      {
+        "name": "atlas_rolling_upgrade",
+        "description": "Rolling upgrade support for Atlas",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "oozie_admin_user",
+        "description": "Oozie install user as an Oozie admin user (AMBARI-7976)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "oozie_create_hive_tez_configs",
+        "description": "Oozie create configs for Ambari Hive and Tez deployments (AMBARI-8074)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "oozie_setup_shared_lib",
+        "description": "Oozie setup tools used to shared Oozie lib to HDFS (AMBARI-7240)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "oozie_host_kerberos",
+        "description": "Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)",
+        "min_version": "2.0.0.0"
+      },
+      {
+        "name": "falcon_extensions",
+        "description": "Falcon Extension",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "hive_metastore_upgrade_schema",
+        "description": "Hive metastore upgrade schema support (AMBARI-11176)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "hive_server_interactive",
+        "description": "Hive server interactive support (AMBARI-15573)",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "hive_webhcat_specific_configs",
+        "description": "Hive webhcat specific configurations support (AMBARI-12364)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "hive_purge_table",
+        "description": "Hive purge table support (AMBARI-12260)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "hive_server2_kerberized_env",
+        "description": "Hive server2 working on kerberized environment (AMBARI-13749)",
+        "min_version": "2.2.3.0",
+        "max_version": "2.2.5.0"
+      },
+      {
+        "name": "hive_env_heapsize",
+        "description": "Hive heapsize property defined in hive-env (AMBARI-12801)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "ranger_kms_hsm_support",
+        "description": "Ranger KMS HSM support (AMBARI-15752)",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_log4j_support",
+        "description": "Ranger supporting log-4j properties (AMBARI-15681)",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_kerberos_support",
+        "description": "Ranger Kerberos support",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "hive_metastore_site_support",
+        "description": "Hive Metastore site support",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_usersync_password_jceks",
+        "description": "Saving Ranger Usersync credentials in jceks",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_install_infra_client",
+        "description": "Ambari Infra Service support",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "falcon_atlas_support_2_3",
+        "description": "Falcon Atlas integration support for 2.3 stack",
+        "min_version": "2.3.99.0",
+        "max_version": "2.4.0.0"
+      },
+      {
+        "name": "falcon_atlas_support",
+        "description": "Falcon Atlas integration",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "hbase_home_directory",
+        "description": "Hbase home directory in HDFS needed for HBASE backup",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "spark_livy",
+        "description": "Livy as slave component of spark",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "spark_livy2",
+        "description": "Livy as slave component of spark",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "atlas_ranger_plugin_support",
+        "description": "Atlas Ranger plugin support",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "atlas_conf_dir_in_path",
+        "description": "Prepend the Atlas conf dir (/etc/atlas/conf) to the classpath of Storm and Falcon",
+        "min_version": "2.3.0.0",
+        "max_version": "2.4.99.99"
+      },
+      {
+        "name": "atlas_upgrade_support",
+        "description": "Atlas supports express and rolling upgrades",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "atlas_hook_support",
+        "description": "Atlas support for hooks in Hive, Storm, Falcon, and Sqoop",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_pid_support",
+        "description": "Ranger Service support pid generation AMBARI-16756",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_kms_pid_support",
+        "description": "Ranger KMS Service support pid generation",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_admin_password_change",
+        "description": "Allow ranger admin credentials to be specified during cluster creation (AMBARI-17000)",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_setup_db_on_start",
+        "description": "Allows setup of ranger db and java patches to be called multiple times on each START",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "storm_metrics_apache_classes",
+        "description": "Metrics sink for Storm that uses Apache class names",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "spark_java_opts_support",
+        "description": "Allow Spark to generate java-opts file",
+        "min_version": "2.2.0.0",
+        "max_version": "2.4.0.0"
+      },
+      {
+        "name": "atlas_hbase_setup",
+        "description": "Use script to create Atlas tables in Hbase and set permissions for Atlas user.",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_hive_plugin_jdbc_url",
+        "description": "Handle Ranger hive repo config jdbc url change for stack 2.5 (AMBARI-18386)",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "zkfc_version_advertised",
+        "description": "ZKFC advertise version",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "phoenix_core_hdfs_site_required",
+        "description": "HDFS and CORE site required for Phoenix",
+        "max_version": "2.5.9.9"
+      },
+      {
+        "name": "ranger_tagsync_ssl_xml_support",
+        "description": "Ranger Tagsync ssl xml support.",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "ranger_xml_configuration",
+        "description": "Ranger code base support xml configurations",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "kafka_ranger_plugin_support",
+        "description": "Ambari stack changes for Ranger Kafka Plugin (AMBARI-11299)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "yarn_ranger_plugin_support",
+        "description": "Implement Stack changes for Ranger Yarn Plugin integration (AMBARI-10866)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "ranger_solr_config_support",
+        "description": "Showing Ranger solrconfig.xml on UI",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "hive_interactive_atlas_hook_required",
+        "description": "Registering Atlas Hook for Hive Interactive.",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "core_site_for_ranger_plugins",
+        "description": "Adding core-site.xml in when Ranger plugin is enabled for Storm, Kafka, and Knox.",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "atlas_install_hook_package_support",
+        "description": "Stop installing packages from 2.6",
+        "max_version": "2.5.9.9"
+      },
+      {
+        "name": "atlas_hdfs_site_on_namenode_ha",
+        "description": "Need to create hdfs-site under atlas-conf dir when Namenode-HA is enabled.",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "hive_interactive_ga",
+        "description": "Hive Interactive GA support",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "secure_ranger_ssl_password",
+        "description": "Securing Ranger Admin and Usersync SSL and Trustore related passwords in jceks",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "ranger_kms_ssl",
+        "description": "Ranger KMS SSL properties in ambari stack",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "nifi_encrypt_config",
+        "description": "Encrypt sensitive properties written to nifi property file",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "toolkit_config_update",
+        "description": "Support separate input and output for toolkit configuration",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "admin_toolkit_support",
+        "description": "Supports the nifi admin toolkit",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "tls_toolkit_san",
+        "description": "Support subject alternative name flag",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "nifi_jaas_conf_create",
+        "description": "Create NIFI jaas configuration when kerberos is enabled",
+        "min_version": "2.6.0.0"
+      }
+    ]
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_tools.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_tools.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_tools.json
index d1aab4b..c515d57 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_tools.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_tools.json
@@ -1,4 +1,14 @@
 {
-  "stack_selector": ["hdp-select", "/usr/bin/hdp-select", "hdp-select"],
-  "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"]
-}
\ No newline at end of file
+  "HDP": {
+    "stack_selector": [
+      "hdp-select",
+      "/usr/bin/hdp-select",
+      "hdp-select"
+    ],
+    "conf_selector": [
+      "conf-select",
+      "/usr/bin/conf-select",
+      "conf-select"
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml
index 7df00ee..f19ac52 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml
@@ -20,6 +20,18 @@
  */
 -->
 <configuration>
+  <!-- Define stack_name property in the base stack. DO NOT override this property for each stack version -->
+  <property>
+    <name>stack_name</name>
+    <value>PERF</value>
+    <description>The name of the stack.</description>
+    <value-attributes>
+      <read-only>true</read-only>
+      <overridable>false</overridable>
+      <visible>false</visible>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
 
   <!-- Define stack_tools property in the base stack. DO NOT override this property for each stack version -->
   <property>
@@ -55,8 +67,8 @@
 
   <property>
     <name>stack_root</name>
-    <value>/usr/perf</value>
-    <description>Stack root folder</description>
+    <value>{"PERF":"/usr/perf"}</value>
+    <description>JSON which defines the stack root by stack name</description>  
     <value-attributes>
       <read-only>true</read-only>
       <overridable>false</overridable>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_features.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_features.json b/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_features.json
index e9e0ed2..839e8e6 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_features.json
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_features.json
@@ -1,19 +1,21 @@
 {
-  "stack_features": [
-    {
-      "name": "rolling_upgrade",
-      "description": "Rolling upgrade support",
-      "min_version": "1.0.0.0"
-    },
-    {
-      "name": "secure_zookeeper",
-      "description": "Protect ZNodes with SASL acl in secure clusters",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "config_versioning",
-      "description": "Configurable versions support",
-      "min_version": "1.0.0.0"
-    }
-  ]
-}
+  "PERF": {
+    "stack_features": [
+      {
+        "name": "rolling_upgrade",
+        "description": "Rolling upgrade support",
+        "min_version": "1.0.0.0"
+      },
+      {
+        "name": "secure_zookeeper",
+        "description": "Protect ZNodes with SASL acl in secure clusters",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "config_versioning",
+        "description": "Configurable versions support",
+        "min_version": "1.0.0.0"
+      }
+    ]
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_tools.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_tools.json b/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_tools.json
index 535b9d9..62562f8 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_tools.json
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_tools.json
@@ -1,4 +1,14 @@
 {
-  "stack_selector": ["distro-select", "/usr/bin/distro-select", "distro-select"],
-  "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"]
-}
\ No newline at end of file
+  "PERF": {
+    "stack_selector": [
+      "distro-select",
+      "/usr/bin/distro-select",
+      "distro-select"
+    ],
+    "conf_selector": [
+      "conf-select",
+      "/usr/bin/conf-select",
+      "conf-select"
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
index 793caf1..3562c96 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
@@ -41,9 +41,11 @@ import java.util.Map;
 import java.util.Set;
 
 import org.apache.ambari.server.controller.StackConfigurationResponse;
+import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.PropertyDependencyInfo;
 import org.apache.ambari.server.state.PropertyInfo;
 import org.apache.ambari.server.state.ServiceInfo;
+import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.ValueAttributesInfo;
 import org.apache.ambari.server.topology.AdvisedConfiguration;
 import org.apache.ambari.server.topology.AmbariContext;
@@ -60,6 +62,7 @@ import org.apache.ambari.server.topology.HostGroupInfo;
 import org.apache.ambari.server.topology.InvalidTopologyException;
 import org.apache.ambari.server.topology.TopologyRequest;
 import org.apache.commons.lang.StringUtils;
+import org.easymock.EasyMock;
 import org.easymock.EasyMockRule;
 import org.easymock.EasyMockSupport;
 import org.easymock.Mock;
@@ -83,6 +86,10 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
 
   private static final Configuration EMPTY_CONFIG = new Configuration(Collections.<String, Map<String, String>>emptyMap(), Collections.<String, Map<String, Map<String, String>>>emptyMap());
   private final Map<String, Collection<String>> serviceComponents = new HashMap<>();
+  private final Map<String, Map<String, String>> stackProperties = new HashMap<>();
+
+  private final String STACK_NAME = "testStack";
+  private final String STACK_VERSION = "1";
 
   @Rule
   public EasyMockRule mocks = new EasyMockRule(this);
@@ -102,13 +109,16 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
   @Mock
   private TopologyRequest topologyRequestMock;
 
+  @Mock(type = MockType.NICE)
+  private ConfigHelper configHelper;
+
   @Before
   public void init() throws Exception {
     expect(bp.getStack()).andReturn(stack).anyTimes();
     expect(bp.getName()).andReturn("test-bp").anyTimes();
 
-    expect(stack.getName()).andReturn("testStack").anyTimes();
-    expect(stack.getVersion()).andReturn("1").anyTimes();
+    expect(stack.getName()).andReturn(STACK_NAME).atLeastOnce();
+    expect(stack.getVersion()).andReturn(STACK_VERSION).atLeastOnce();
     // return false for all components since for this test we don't care about the value
     expect(stack.isMasterComponent((String) anyObject())).andReturn(false).anyTimes();
     expect(stack.getConfigurationPropertiesWithMetadata(anyObject(String.class), anyObject(String.class))).andReturn(Collections.<String, Stack.ConfigProperty>emptyMap()).anyTimes();
@@ -198,11 +208,15 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
 
     Set<String> emptySet = Collections.emptySet();
     expect(stack.getExcludedConfigurationTypes(anyObject(String.class))).andReturn(emptySet).anyTimes();
+
+    expect(ambariContext.getConfigHelper()).andReturn(configHelper).anyTimes();
+    expect(configHelper.getDefaultStackProperties(
+        EasyMock.eq(new StackId(STACK_NAME, STACK_VERSION)))).andReturn(stackProperties).anyTimes();
   }
 
   @After
   public void tearDown() {
-    reset(bp, serviceInfo, stack, ambariContext);
+    reset(bp, serviceInfo, stack, ambariContext, configHelper);
   }
 
   @Test
@@ -6277,13 +6291,16 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     topology.getAdvisedConfigurations().putAll(createAdvisedConfigMap());
     topology.setConfigRecommendationStrategy(ConfigRecommendationStrategy.ONLY_STACK_DEFAULTS_APPLY);
     BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
+
     reset(stack);
+    expect(stack.getName()).andReturn(STACK_NAME).anyTimes();
+    expect(stack.getVersion()).andReturn(STACK_VERSION).anyTimes();
     expect(stack.getConfiguration(bp.getServices())).andReturn(createStackDefaults()).anyTimes();
 
     Set<String> emptySet = Collections.emptySet();
     expect(stack.getExcludedConfigurationTypes(anyObject(String.class))).andReturn(emptySet).anyTimes();
-
     replay(stack);
+
     // WHEN
     Set<String> configTypeUpdated = configProcessor.doUpdateForClusterCreate();
     // THEN
@@ -6334,13 +6351,17 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     topology.getAdvisedConfigurations().putAll(createAdvisedConfigMap());
     topology.setConfigRecommendationStrategy(ConfigRecommendationStrategy.ONLY_STACK_DEFAULTS_APPLY);
     BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
+
     reset(stack);
+    expect(stack.getName()).andReturn(STACK_NAME).anyTimes();
+    expect(stack.getVersion()).andReturn(STACK_VERSION).anyTimes();
     expect(stack.getConfiguration(bp.getServices())).andReturn(createStackDefaults()).anyTimes();
 
     Set<String> emptySet = Collections.emptySet();
     expect(stack.getExcludedConfigurationTypes(anyObject(String.class))).andReturn(emptySet).anyTimes();
 
     replay(stack);
+
     // WHEN
     configProcessor.doUpdateForClusterCreate();
     // THEN
@@ -7997,6 +8018,10 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
   @Test
   public void testValuesTrimming() throws Exception {
     reset(stack);
+
+    expect(stack.getName()).andReturn(STACK_NAME).anyTimes();
+    expect(stack.getVersion()).andReturn(STACK_VERSION).anyTimes();
+
     Map<String, Map<String, String>> properties = new HashMap<>();
 
     Map<String, String> hdfsSite = new HashMap<>();
@@ -8020,6 +8045,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
       new StackConfigurationResponse(null, null, null, null, "hdfs-site", null, Collections.singleton(PropertyInfo.PropertyType.PASSWORD), null, null, null)));
     propertyConfigs.put("test.host", new Stack.ConfigProperty(
       new StackConfigurationResponse(null, null, null, null, "hdfs-site", null, null, null, valueAttributesInfoHost, null)));
+
     expect(stack.getServiceForConfigType("hdfs-site")).andReturn("HDFS").anyTimes();
     expect(stack.getConfigurationPropertiesWithMetadata("HDFS", "hdfs-site")).andReturn(propertyConfigs).anyTimes();
 
@@ -8091,7 +8117,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     throws InvalidTopologyException {
 
 
-    replay(stack, serviceInfo, ambariContext);
+    replay(stack, serviceInfo, ambariContext, configHelper);
 
     Map<String, HostGroupInfo> hostGroupInfo = new HashMap<>();
     Collection<String> allServices = new HashSet<>();
@@ -8154,7 +8180,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
       this.name = name;
       this.components = components;
       this.hosts = hosts;
-      this.configuration = new Configuration(Collections.<String, Map<String, String>>emptyMap(),
+      configuration = new Configuration(Collections.<String, Map<String, String>>emptyMap(),
         Collections.<String, Map<String, Map<String, String>>>emptyMap());
     }
 


[06/10] ambari git commit: AMBARI-21430 - Allow Multiple Versions of Stack Tools to Co-Exist (jonathanhurley)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-secured.json b/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-secured.json
index f959b1f..7f1e549 100644
--- a/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-secured.json
+++ b/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-secured.json
@@ -1,159 +1,159 @@
 {
     "localComponents": [
-        "SECONDARY_NAMENODE", 
-        "HDFS_CLIENT", 
-        "DATANODE", 
-        "NAMENODE", 
-        "RANGER_ADMIN", 
-        "RANGER_TAGSYNC", 
-        "RANGER_USERSYNC", 
-        "ZOOKEEPER_SERVER", 
-        "ZOOKEEPER_CLIENT", 
+        "SECONDARY_NAMENODE",
+        "HDFS_CLIENT",
+        "DATANODE",
+        "NAMENODE",
+        "RANGER_ADMIN",
+        "RANGER_TAGSYNC",
+        "RANGER_USERSYNC",
+        "ZOOKEEPER_SERVER",
+        "ZOOKEEPER_CLIENT",
         "KERBEROS_CLIENT",
         "LOGSEARCH_SOLR",
         "LOGSEARCH_SOLR_CLIENT"
-    ], 
+    ],
     "configuration_attributes": {
-        "ranger-hdfs-audit": {}, 
-        "ssl-client": {}, 
-        "ranger-admin-site": {}, 
-        "ranger-hdfs-policymgr-ssl": {}, 
-        "tagsync-application-properties": {}, 
-        "ranger-env": {}, 
-        "usersync-log4j": {}, 
-        "ranger-hdfs-plugin-properties": {}, 
-        "kerberos-env": {}, 
-        "admin-properties": {}, 
-        "ranger-ugsync-site": {}, 
+        "ranger-hdfs-audit": {},
+        "ssl-client": {},
+        "ranger-admin-site": {},
+        "ranger-hdfs-policymgr-ssl": {},
+        "tagsync-application-properties": {},
+        "ranger-env": {},
+        "usersync-log4j": {},
+        "ranger-hdfs-plugin-properties": {},
+        "kerberos-env": {},
+        "admin-properties": {},
+        "ranger-ugsync-site": {},
         "hdfs-site": {
             "final": {
-                "dfs.datanode.data.dir": "true", 
-                "dfs.namenode.http-address": "true", 
-                "dfs.datanode.failed.volumes.tolerated": "true", 
-                "dfs.support.append": "true", 
-                "dfs.namenode.name.dir": "true", 
+                "dfs.datanode.data.dir": "true",
+                "dfs.namenode.http-address": "true",
+                "dfs.datanode.failed.volumes.tolerated": "true",
+                "dfs.support.append": "true",
+                "dfs.namenode.name.dir": "true",
                 "dfs.webhdfs.enabled": "true"
             }
-        }, 
-        "ranger-tagsync-site": {}, 
-        "zoo.cfg": {}, 
-        "hadoop-policy": {}, 
-        "hdfs-log4j": {}, 
-        "krb5-conf": {}, 
+        },
+        "ranger-tagsync-site": {},
+        "zoo.cfg": {},
+        "hadoop-policy": {},
+        "hdfs-log4j": {},
+        "krb5-conf": {},
         "core-site": {
             "final": {
                 "fs.defaultFS": "true"
             }
-        }, 
-        "hadoop-env": {}, 
-        "zookeeper-log4j": {}, 
-        "ssl-server": {}, 
-        "ranger-site": {}, 
-        "admin-log4j": {}, 
-        "tagsync-log4j": {}, 
-        "ranger-hdfs-security": {}, 
-        "usersync-properties": {}, 
+        },
+        "hadoop-env": {},
+        "zookeeper-log4j": {},
+        "ssl-server": {},
+        "ranger-site": {},
+        "admin-log4j": {},
+        "tagsync-log4j": {},
+        "ranger-hdfs-security": {},
+        "usersync-properties": {},
         "zookeeper-env": {},
         "infra-solr-env": {},
         "infra-solr-client-log4j": {},
         "cluster-env": {}
-    }, 
-    "public_hostname": "c6401.ambari.apache.org", 
-    "commandId": "41-2", 
-    "hostname": "c6401.ambari.apache.org", 
-    "kerberosCommandParams": [], 
-    "serviceName": "RANGER", 
-    "role": "RANGER_ADMIN", 
-    "forceRefreshConfigTagsBeforeExecution": [], 
-    "requestId": 41, 
+    },
+    "public_hostname": "c6401.ambari.apache.org",
+    "commandId": "41-2",
+    "hostname": "c6401.ambari.apache.org",
+    "kerberosCommandParams": [],
+    "serviceName": "RANGER",
+    "role": "RANGER_ADMIN",
+    "forceRefreshConfigTagsBeforeExecution": [],
+    "requestId": 41,
     "agentConfigParams": {
         "agent": {
             "parallel_execution": 0
         }
-    }, 
-    "clusterName": "test_Cluster01", 
-    "commandType": "EXECUTION_COMMAND", 
-    "taskId": 186, 
-    "roleParams": {}, 
+    },
+    "clusterName": "test_Cluster01",
+    "commandType": "EXECUTION_COMMAND",
+    "taskId": 186,
+    "roleParams": {},
     "configurationTags": {
         "ranger-hdfs-audit": {
             "tag": "version1466705299922"
-        }, 
+        },
         "ssl-client": {
             "tag": "version1"
-        }, 
+        },
         "ranger-admin-site": {
             "tag": "version1467016680635"
-        }, 
+        },
         "ranger-hdfs-policymgr-ssl": {
             "tag": "version1466705299922"
-        }, 
+        },
         "tagsync-application-properties": {
             "tag": "version1467016680511"
-        }, 
+        },
         "ranger-env": {
             "tag": "version1466705299949"
-        }, 
+        },
         "ranger-ugsync-site": {
             "tag": "version1467016680537"
-        }, 
+        },
         "ranger-hdfs-plugin-properties": {
             "tag": "version1466705299922"
-        }, 
+        },
         "kerberos-env": {
             "tag": "version1467016537243"
-        }, 
+        },
         "admin-properties": {
             "tag": "version1466705299949"
-        }, 
+        },
         "hdfs-site": {
             "tag": "version1467016680401"
-        }, 
+        },
         "ranger-tagsync-site": {
             "tag": "version1467016680586"
-        }, 
+        },
         "zoo.cfg": {
             "tag": "version1"
-        }, 
+        },
         "hadoop-policy": {
             "tag": "version1"
-        }, 
+        },
         "hdfs-log4j": {
             "tag": "version1"
-        }, 
+        },
         "usersync-log4j": {
             "tag": "version1466705299949"
-        }, 
+        },
         "krb5-conf": {
             "tag": "version1467016537243"
-        }, 
+        },
         "core-site": {
             "tag": "version1467016680612"
-        }, 
+        },
         "hadoop-env": {
             "tag": "version1467016680446"
-        }, 
+        },
         "zookeeper-log4j": {
             "tag": "version1"
-        }, 
+        },
         "ssl-server": {
             "tag": "version1"
-        }, 
+        },
         "ranger-site": {
             "tag": "version1466705299949"
-        }, 
+        },
         "admin-log4j": {
             "tag": "version1466705299949"
-        }, 
+        },
         "tagsync-log4j": {
             "tag": "version1466705299949"
-        }, 
+        },
         "ranger-hdfs-security": {
             "tag": "version1466705299922"
-        }, 
+        },
         "usersync-properties": {
             "tag": "version1466705299949"
-        }, 
+        },
         "zookeeper-env": {
             "tag": "version1467016680492"
         },
@@ -166,550 +166,550 @@
         "cluster-env": {
             "tag": "version1467016680567"
         }
-    }, 
-    "roleCommand": "START", 
+    },
+    "roleCommand": "START",
     "hostLevelParams": {
-        "agent_stack_retry_on_unavailability": "false", 
-        "stack_name": "HDP", 
-        "package_version": "2_5_0_0_*", 
+        "agent_stack_retry_on_unavailability": "false",
+        "stack_name": "HDP",
+        "package_version": "2_5_0_0_*",
         "custom_mysql_jdbc_name": "mysql-connector-java.jar",
         "previous_custom_mysql_jdbc_name": "mysql-connector-java-old.jar",
-        "host_sys_prepped": "false", 
-        "ambari_db_rca_username": "mapred", 
-        "current_version": "2.5.0.0-801", 
-        "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar", 
-        "agent_stack_retry_count": "5", 
-        "stack_version": "2.5", 
-        "jdk_name": "jdk-8u60-linux-x64.tar.gz", 
-        "ambari_db_rca_driver": "org.postgresql.Driver", 
+        "host_sys_prepped": "false",
+        "ambari_db_rca_username": "mapred",
+        "current_version": "2.5.0.0-801",
+        "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar",
+        "agent_stack_retry_count": "5",
+        "stack_version": "2.5",
+        "jdk_name": "jdk-8u60-linux-x64.tar.gz",
+        "ambari_db_rca_driver": "org.postgresql.Driver",
         "java_home": "/usr/jdk64/jdk1.7.0_45",
-        "repository_version_id": "1", 
-        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
-        "not_managed_hdfs_path_list": "[\"/tmp\"]", 
-        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
-        "java_version": "8", 
-        "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.5\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]", 
-        "package_list": "[{\"name\":\"ranger_${stack_version}-admin\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-usersync\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-tagsync\",\"condition\":\"should_install_ranger_tagsync\",\"skipUpgrade\":false},{\"name\":\"ambari-logsearch-solr-client\",\"condition\":\"should_install_logsearch_solr_client\",\"skipUpgrade\":false}]", 
-        "db_name": "ambari", 
-        "group_list": "[\"ranger\",\"hadoop\",\"users\"]", 
-        "agentCacheDir": "/var/lib/ambari-agent/cache", 
-        "ambari_db_rca_password": "mapred", 
-        "jce_name": "jce_policy-8.zip", 
-        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar", 
-        "db_driver_filename": "mysql-connector-java.jar", 
-        "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]", 
+        "repository_version_id": "1",
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
+        "not_managed_hdfs_path_list": "[\"/tmp\"]",
+        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
+        "java_version": "8",
+        "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.5\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]",
+        "package_list": "[{\"name\":\"ranger_${stack_version}-admin\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-usersync\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-tagsync\",\"condition\":\"should_install_ranger_tagsync\",\"skipUpgrade\":false},{\"name\":\"ambari-logsearch-solr-client\",\"condition\":\"should_install_logsearch_solr_client\",\"skipUpgrade\":false}]",
+        "db_name": "ambari",
+        "group_list": "[\"ranger\",\"hadoop\",\"users\"]",
+        "agentCacheDir": "/var/lib/ambari-agent/cache",
+        "ambari_db_rca_password": "mapred",
+        "jce_name": "jce_policy-8.zip",
+        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
+        "db_driver_filename": "mysql-connector-java.jar",
+        "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]",
         "clientsToUpdateConfigs": "[\"*\"]"
-    }, 
+    },
     "commandParams": {
-        "service_package_folder": "common-services/RANGER/0.4.0/package", 
-        "script": "scripts/ranger_admin.py", 
-        "hooks_folder": "HDP/2.0.6/hooks", 
-        "version": "2.5.0.0-801", 
-        "max_duration_for_retries": "0", 
-        "command_retry_enabled": "false", 
-        "command_timeout": "600", 
+        "service_package_folder": "common-services/RANGER/0.4.0/package",
+        "script": "scripts/ranger_admin.py",
+        "hooks_folder": "HDP/2.0.6/hooks",
+        "version": "2.5.0.0-801",
+        "max_duration_for_retries": "0",
+        "command_retry_enabled": "false",
+        "command_timeout": "600",
         "script_type": "PYTHON"
-    }, 
-    "forceRefreshConfigTags": [], 
-    "stageId": 2, 
+    },
+    "forceRefreshConfigTags": [],
+    "stageId": 2,
     "clusterHostInfo": {
         "snamenode_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ambari_server_use_ssl": [
             "false"
-        ], 
+        ],
         "all_ping_ports": [
             "8670"
-        ], 
+        ],
         "ranger_tagsync_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ranger_usersync_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "all_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "slave_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "namenode_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ambari_server_port": [
             "8080"
-        ], 
+        ],
         "ranger_admin_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "all_racks": [
             "/default-rack"
-        ], 
+        ],
         "all_ipv4_ips": [
             "172.22.83.73"
-        ], 
+        ],
         "ambari_server_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "zookeeper_hosts": [
             "c6401.ambari.apache.org"
         ],
         "infra_solr_hosts": [
             "c6401.ambari.apache.org"
         ]
-    }, 
+    },
     "configurations": {
         "ranger-hdfs-audit": {
-            "xasecure.audit.destination.solr.zookeepers": "NONE", 
-            "xasecure.audit.destination.solr.urls": "", 
-            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool", 
+            "xasecure.audit.destination.solr.zookeepers": "NONE",
+            "xasecure.audit.destination.solr.urls": "",
+            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool",
             "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool",
-            "xasecure.audit.destination.hdfs": "true", 
+            "xasecure.audit.destination.hdfs": "true",
             "xasecure.audit.destination.solr": "false",
-            "xasecure.audit.provider.summary.enabled": "false", 
-            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit", 
+            "xasecure.audit.provider.summary.enabled": "false",
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
             "xasecure.audit.is.enabled": "true"
-        }, 
+        },
         "ssl-client": {
-            "ssl.client.truststore.reload.interval": "10000", 
-            "ssl.client.keystore.password": "bigdata", 
-            "ssl.client.truststore.type": "jks", 
-            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks", 
-            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks", 
-            "ssl.client.truststore.password": "bigdata", 
+            "ssl.client.truststore.reload.interval": "10000",
+            "ssl.client.keystore.password": "bigdata",
+            "ssl.client.truststore.type": "jks",
+            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks",
+            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks",
+            "ssl.client.truststore.password": "bigdata",
             "ssl.client.keystore.type": "jks"
-        }, 
+        },
         "ranger-admin-site": {
             "ranger.is.solr.kerberised": "true",
-            "ranger.admin.kerberos.cookie.domain": "{{ranger_host}}", 
-            "ranger.kms.service.user.hdfs": "hdfs", 
-            "ranger.spnego.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}", 
-            "ranger.plugins.hive.serviceuser": "hive", 
-            "ranger.lookup.kerberos.keytab": "/etc/security/keytabs/rangerlookup.service.keytab", 
-            "ranger.plugins.kms.serviceuser": "kms", 
-            "ranger.service.https.attrib.ssl.enabled": "false", 
-            "ranger.sso.browser.useragent": "Mozilla,chrome", 
-            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01", 
-            "ranger.plugins.hbase.serviceuser": "hbase", 
-            "ranger.plugins.hdfs.serviceuser": "hdfs", 
-            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}", 
-            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net", 
-            "ranger.plugins.knox.serviceuser": "knox", 
-            "ranger.ldap.base.dn": "dc=example,dc=com", 
-            "ranger.sso.publicKey": "", 
-            "ranger.admin.kerberos.cookie.path": "/", 
-            "ranger.service.https.attrib.clientAuth": "want", 
-            "ranger.jpa.jdbc.user": "{{ranger_db_user}}", 
-            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})", 
-            "ranger.ldap.group.roleattribute": "cn", 
-            "ranger.plugins.kafka.serviceuser": "kafka", 
-            "ranger.admin.kerberos.principal": "rangeradmin/_HOST@EXAMPLE.COM", 
-            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
+            "ranger.admin.kerberos.cookie.domain": "{{ranger_host}}",
+            "ranger.kms.service.user.hdfs": "hdfs",
+            "ranger.spnego.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
+            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}",
+            "ranger.plugins.hive.serviceuser": "hive",
+            "ranger.lookup.kerberos.keytab": "/etc/security/keytabs/rangerlookup.service.keytab",
+            "ranger.plugins.kms.serviceuser": "kms",
+            "ranger.service.https.attrib.ssl.enabled": "false",
+            "ranger.sso.browser.useragent": "Mozilla,chrome",
+            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01",
+            "ranger.plugins.hbase.serviceuser": "hbase",
+            "ranger.plugins.hdfs.serviceuser": "hdfs",
+            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}",
+            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net",
+            "ranger.plugins.knox.serviceuser": "knox",
+            "ranger.ldap.base.dn": "dc=example,dc=com",
+            "ranger.sso.publicKey": "",
+            "ranger.admin.kerberos.cookie.path": "/",
+            "ranger.service.https.attrib.clientAuth": "want",
+            "ranger.jpa.jdbc.user": "{{ranger_db_user}}",
+            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})",
+            "ranger.ldap.group.roleattribute": "cn",
+            "ranger.plugins.kafka.serviceuser": "kafka",
+            "ranger.admin.kerberos.principal": "rangeradmin/_HOST@EXAMPLE.COM",
+            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
             "ranger.credential.provider.path": "/etc/ranger/admin/rangeradmin.jceks",
-            "ranger.ldap.referral": "ignore", 
-            "ranger.service.http.port": "6080", 
-            "ranger.ldap.user.searchfilter": "(uid={0})", 
-            "ranger.plugins.atlas.serviceuser": "atlas", 
-            "ranger.truststore.password": "changeit", 
-            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
-            "ranger.audit.solr.password": "NONE", 
-            "ranger.audit.solr.zookeepers": "c6401.ambari.apache.org:2181/ambari-solr", 
+            "ranger.ldap.referral": "ignore",
+            "ranger.service.http.port": "6080",
+            "ranger.ldap.user.searchfilter": "(uid={0})",
+            "ranger.plugins.atlas.serviceuser": "atlas",
+            "ranger.truststore.password": "changeit",
+            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+            "ranger.audit.solr.password": "NONE",
+            "ranger.audit.solr.zookeepers": "c6401.ambari.apache.org:2181/ambari-solr",
             "ranger.lookup.kerberos.principal": "rangerlookup/_HOST@EXAMPLE.COM",
-            "ranger.service.https.port": "6182", 
-            "ranger.plugins.storm.serviceuser": "storm", 
-            "ranger.externalurl": "{{ranger_external_url}}", 
-            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
-            "ranger.kms.service.user.hive": "", 
-            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
-            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}", 
-            "ranger.service.host": "{{ranger_host}}", 
-            "ranger.service.https.attrib.keystore.keyalias": "rangeradmin", 
-            "ranger.service.https.attrib.keystore.pass": "xasecure", 
-            "ranger.unixauth.remote.login.enabled": "true", 
+            "ranger.service.https.port": "6182",
+            "ranger.plugins.storm.serviceuser": "storm",
+            "ranger.externalurl": "{{ranger_external_url}}",
+            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+            "ranger.kms.service.user.hive": "",
+            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}",
+            "ranger.service.host": "{{ranger_host}}",
+            "ranger.service.https.attrib.keystore.keyalias": "rangeradmin",
+            "ranger.service.https.attrib.keystore.pass": "xasecure",
+            "ranger.unixauth.remote.login.enabled": "true",
             "ranger.jpa.jdbc.credential.alias": "rangeradmin",
-            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
-            "ranger.audit.solr.username": "ranger_solr", 
-            "ranger.sso.enabled": "false", 
-            "ranger.audit.solr.urls": "", 
-            "ranger.ldap.ad.domain": "", 
-            "ranger.plugins.yarn.serviceuser": "yarn", 
-            "ranger.audit.source.type": "solr", 
-            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
-            "ranger.ldap.url": "{{ranger_ug_ldap_url}}", 
-            "ranger.authentication.method": "UNIX", 
-            "ranger.service.http.enabled": "true", 
-            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}", 
-            "ranger.ldap.ad.referral": "ignore", 
-            "ranger.ldap.ad.base.dn": "dc=example,dc=com", 
-            "ranger.jpa.jdbc.password": "_", 
-            "ranger.spnego.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
-            "ranger.sso.providerurl": "", 
-            "ranger.unixauth.service.hostname": "{{ugsync_host}}", 
-            "ranger.admin.kerberos.keytab": "/etc/security/keytabs/rangeradmin.service.keytab", 
-            "ranger.admin.kerberos.token.valid.seconds": "30", 
-            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver", 
+            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+            "ranger.audit.solr.username": "ranger_solr",
+            "ranger.sso.enabled": "false",
+            "ranger.audit.solr.urls": "",
+            "ranger.ldap.ad.domain": "",
+            "ranger.plugins.yarn.serviceuser": "yarn",
+            "ranger.audit.source.type": "solr",
+            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
+            "ranger.ldap.url": "{{ranger_ug_ldap_url}}",
+            "ranger.authentication.method": "UNIX",
+            "ranger.service.http.enabled": "true",
+            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}",
+            "ranger.ldap.ad.referral": "ignore",
+            "ranger.ldap.ad.base.dn": "dc=example,dc=com",
+            "ranger.jpa.jdbc.password": "_",
+            "ranger.spnego.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+            "ranger.sso.providerurl": "",
+            "ranger.unixauth.service.hostname": "{{ugsync_host}}",
+            "ranger.admin.kerberos.keytab": "/etc/security/keytabs/rangeradmin.service.keytab",
+            "ranger.admin.kerberos.token.valid.seconds": "30",
+            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver",
             "ranger.unixauth.service.port": "5151"
-        }, 
+        },
         "ranger-hdfs-policymgr-ssl": {
-            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks", 
-            "xasecure.policymgr.clientssl.truststore.password": "changeit", 
-            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}", 
-            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks", 
-            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}", 
+            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks",
+            "xasecure.policymgr.clientssl.truststore.password": "changeit",
+            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}",
+            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks",
+            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}",
             "xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
-        }, 
+        },
         "tagsync-application-properties": {
-            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181", 
-            "atlas.kafka.security.protocol": "SASL_PLAINTEXT", 
-            "atlas.jaas.KafkaClient.option.principal": "{{tagsync_jaas_principal}}", 
-            "atlas.jaas.KafkaClient.option.keyTab": "{{tagsync_keytab_path}}", 
-            "atlas.kafka.entities.group.id": "ranger_entities_consumer", 
-            "atlas.jaas.KafkaClient.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule", 
-            "atlas.jaas.KafkaClient.option.serviceName": "kafka", 
-            "atlas.kafka.bootstrap.servers": "localhost:6667", 
-            "atlas.jaas.KafkaClient.option.useKeyTab": "true", 
-            "atlas.jaas.KafkaClient.option.storeKey": "true", 
-            "atlas.jaas.KafkaClient.loginModuleControlFlag": "required", 
+            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181",
+            "atlas.kafka.security.protocol": "SASL_PLAINTEXT",
+            "atlas.jaas.KafkaClient.option.principal": "{{tagsync_jaas_principal}}",
+            "atlas.jaas.KafkaClient.option.keyTab": "{{tagsync_keytab_path}}",
+            "atlas.kafka.entities.group.id": "ranger_entities_consumer",
+            "atlas.jaas.KafkaClient.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "atlas.jaas.KafkaClient.option.serviceName": "kafka",
+            "atlas.kafka.bootstrap.servers": "localhost:6667",
+            "atlas.jaas.KafkaClient.option.useKeyTab": "true",
+            "atlas.jaas.KafkaClient.option.storeKey": "true",
+            "atlas.jaas.KafkaClient.loginModuleControlFlag": "required",
             "atlas.kafka.sasl.kerberos.service.name": "kafka"
-        }, 
+        },
         "ranger-env": {
-            "ranger_solr_shards": "1", 
-            "ranger_solr_config_set": "ranger_audits", 
-            "ranger_user": "ranger", 
+            "ranger_solr_shards": "1",
+            "ranger_solr_config_set": "ranger_audits",
+            "ranger_user": "ranger",
             "ranger_solr_replication_factor": "1",
-            "xml_configurations_supported": "true", 
-            "ranger-atlas-plugin-enabled": "No", 
-            "ranger-hbase-plugin-enabled": "No", 
-            "ranger-yarn-plugin-enabled": "No", 
-            "bind_anonymous": "false", 
-            "ranger_admin_username": "amb_ranger_admin", 
-            "admin_password": "admin", 
-            "is_solrCloud_enabled": "true", 
-            "ranger-storm-plugin-enabled": "No", 
-            "ranger-hdfs-plugin-enabled": "No", 
-            "ranger_group": "ranger", 
-            "ranger-knox-plugin-enabled": "No", 
-            "ranger_admin_log_dir": "/var/log/ranger/admin", 
-            "ranger-kafka-plugin-enabled": "No", 
-            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306", 
-            "ranger-hive-plugin-enabled": "No", 
-            "xasecure.audit.destination.solr": "true", 
-            "ranger_pid_dir": "/var/run/ranger", 
-            "xasecure.audit.destination.hdfs": "true", 
-            "admin_username": "admin", 
-            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit", 
-            "create_db_dbuser": "true", 
-            "ranger_solr_collection_name": "ranger_audits", 
-            "ranger_admin_password": "P1!q9xa96SMi5NCl", 
+            "xml_configurations_supported": "true",
+            "ranger-atlas-plugin-enabled": "No",
+            "ranger-hbase-plugin-enabled": "No",
+            "ranger-yarn-plugin-enabled": "No",
+            "bind_anonymous": "false",
+            "ranger_admin_username": "amb_ranger_admin",
+            "admin_password": "admin",
+            "is_solrCloud_enabled": "true",
+            "ranger-storm-plugin-enabled": "No",
+            "ranger-hdfs-plugin-enabled": "No",
+            "ranger_group": "ranger",
+            "ranger-knox-plugin-enabled": "No",
+            "ranger_admin_log_dir": "/var/log/ranger/admin",
+            "ranger-kafka-plugin-enabled": "No",
+            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306",
+            "ranger-hive-plugin-enabled": "No",
+            "xasecure.audit.destination.solr": "true",
+            "ranger_pid_dir": "/var/run/ranger",
+            "xasecure.audit.destination.hdfs": "true",
+            "admin_username": "admin",
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
+            "create_db_dbuser": "true",
+            "ranger_solr_collection_name": "ranger_audits",
+            "ranger_admin_password": "P1!q9xa96SMi5NCl",
             "ranger_usersync_log_dir": "/var/log/ranger/usersync"
-        }, 
+        },
         "usersync-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/usersync.
 log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n"
-        }, 
+        },
         "ranger-hdfs-plugin-properties": {
-            "hadoop.rpc.protection": "authentication", 
-            "ranger-hdfs-plugin-enabled": "No", 
-            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
-            "policy_user": "ambari-qa", 
-            "common.name.for.certificate": "", 
+            "hadoop.rpc.protection": "authentication",
+            "ranger-hdfs-plugin-enabled": "No",
+            "REPOSITORY_CONFIG_USERNAME": "hadoop",
+            "policy_user": "ambari-qa",
+            "common.name.for.certificate": "",
             "REPOSITORY_CONFIG_PASSWORD": "hadoop"
-        }, 
+        },
         "kerberos-env": {
-            "kdc_hosts": "c6401.ambari.apache.org", 
-            "manage_auth_to_local": "true", 
-            "install_packages": "true", 
-            "realm": "EXAMPLE.COM", 
-            "encryption_types": "aes des3-cbc-sha1 rc4 des-cbc-md5", 
-            "ad_create_attributes_template": "\n{\n  \"objectClass\": [\"top\", \"person\", \"organizationalPerson\", \"user\"],\n  \"cn\": \"$principal_name\",\n  #if( $is_service )\n  \"servicePrincipalName\": \"$principal_name\",\n  #end\n  \"userPrincipalName\": \"$normalized_principal\",\n  \"unicodePwd\": \"$password\",\n  \"accountExpires\": \"0\",\n  \"userAccountControl\": \"66048\"\n}", 
-            "kdc_create_attributes": "", 
-            "admin_server_host": "c6401.ambari.apache.org", 
-            "group": "ambari-managed-principals", 
-            "password_length": "20", 
-            "ldap_url": "", 
-            "manage_identities": "true", 
-            "password_min_lowercase_letters": "1", 
-            "create_ambari_principal": "true", 
-            "service_check_principal_name": "${cluster_name|toLower()}-${short_date}", 
-            "executable_search_paths": "/usr/bin, /usr/kerberos/bin, /usr/sbin, /usr/lib/mit/bin, /usr/lib/mit/sbin", 
-            "password_chat_timeout": "5", 
-            "kdc_type": "mit-kdc", 
-            "set_password_expiry": "false", 
-            "password_min_punctuation": "1", 
-            "container_dn": "", 
-            "case_insensitive_username_rules": "false", 
-            "password_min_whitespace": "0", 
-            "password_min_uppercase_letters": "1", 
+            "kdc_hosts": "c6401.ambari.apache.org",
+            "manage_auth_to_local": "true",
+            "install_packages": "true",
+            "realm": "EXAMPLE.COM",
+            "encryption_types": "aes des3-cbc-sha1 rc4 des-cbc-md5",
+            "ad_create_attributes_template": "\n{\n  \"objectClass\": [\"top\", \"person\", \"organizationalPerson\", \"user\"],\n  \"cn\": \"$principal_name\",\n  #if( $is_service )\n  \"servicePrincipalName\": \"$principal_name\",\n  #end\n  \"userPrincipalName\": \"$normalized_principal\",\n  \"unicodePwd\": \"$password\",\n  \"accountExpires\": \"0\",\n  \"userAccountControl\": \"66048\"\n}",
+            "kdc_create_attributes": "",
+            "admin_server_host": "c6401.ambari.apache.org",
+            "group": "ambari-managed-principals",
+            "password_length": "20",
+            "ldap_url": "",
+            "manage_identities": "true",
+            "password_min_lowercase_letters": "1",
+            "create_ambari_principal": "true",
+            "service_check_principal_name": "${cluster_name|toLower()}-${short_date}",
+            "executable_search_paths": "/usr/bin, /usr/kerberos/bin, /usr/sbin, /usr/lib/mit/bin, /usr/lib/mit/sbin",
+            "password_chat_timeout": "5",
+            "kdc_type": "mit-kdc",
+            "set_password_expiry": "false",
+            "password_min_punctuation": "1",
+            "container_dn": "",
+            "case_insensitive_username_rules": "false",
+            "password_min_whitespace": "0",
+            "password_min_uppercase_letters": "1",
             "password_min_digits": "1"
-        }, 
+        },
         "admin-properties": {
-            "db_user": "rangeradmin01", 
-            "DB_FLAVOR": "MYSQL", 
-            "db_password": "rangeradmin01", 
-            "db_root_user": "root", 
-            "policymgr_external_url": "http://c6401.ambari.apache.org:6080", 
-            "db_name": "ranger01", 
-            "db_host": "c6401.ambari.apache.org", 
-            "db_root_password": "vagrant", 
+            "db_user": "rangeradmin01",
+            "DB_FLAVOR": "MYSQL",
+            "db_password": "rangeradmin01",
+            "db_root_user": "root",
+            "policymgr_external_url": "http://c6401.ambari.apache.org:6080",
+            "db_name": "ranger01",
+            "db_host": "c6401.ambari.apache.org",
+            "db_root_password": "vagrant",
             "SQL_CONNECTOR_JAR": "{{driver_curl_target}}"
-        }, 
+        },
         "ranger-ugsync-site": {
-            "ranger.usersync.ldap.binddn": "", 
-            "ranger.usersync.policymgr.username": "rangerusersync", 
-            "ranger.usersync.policymanager.mockrun": "false", 
-            "ranger.usersync.group.searchbase": "", 
-            "ranger.usersync.ldap.bindalias": "testldapalias", 
-            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks", 
-            "ranger.usersync.port": "5151", 
-            "ranger.usersync.pagedresultssize": "500", 
-            "ranger.usersync.group.memberattributename": "", 
-            "ranger.usersync.kerberos.principal": "rangerusersync/_HOST@EXAMPLE.COM", 
-            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder", 
-            "ranger.usersync.ldap.referral": "ignore", 
-            "ranger.usersync.group.searchfilter": "", 
-            "ranger.usersync.ldap.user.objectclass": "person", 
-            "ranger.usersync.logdir": "{{usersync_log_dir}}", 
-            "ranger.usersync.ldap.user.searchfilter": "", 
-            "ranger.usersync.ldap.groupname.caseconversion": "none", 
-            "ranger.usersync.ldap.ldapbindpassword": "", 
-            "ranger.usersync.unix.minUserId": "500", 
-            "ranger.usersync.policymanager.maxrecordsperapicall": "1000", 
-            "ranger.usersync.group.nameattribute": "", 
-            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password", 
-            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks", 
-            "ranger.usersync.user.searchenabled": "false", 
-            "ranger.usersync.group.usermapsyncenabled": "true", 
-            "ranger.usersync.ldap.bindkeystore": "", 
-            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof", 
-            "ranger.usersync.kerberos.keytab": "/etc/security/keytabs/rangerusersync.service.keytab", 
-            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe", 
-            "ranger.usersync.group.objectclass": "", 
-            "ranger.usersync.ldap.user.searchscope": "sub", 
-            "ranger.usersync.unix.password.file": "/etc/passwd", 
-            "ranger.usersync.ldap.user.nameattribute": "", 
-            "ranger.usersync.pagedresultsenabled": "true", 
-            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}", 
-            "ranger.usersync.group.search.first.enabled": "false", 
-            "ranger.usersync.group.searchenabled": "false", 
-            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder", 
-            "ranger.usersync.ssl": "true", 
-            "ranger.usersync.ldap.url": "", 
-            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org", 
-            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
-            "ranger.usersync.ldap.user.searchbase": "", 
-            "ranger.usersync.ldap.username.caseconversion": "none", 
-            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
-            "ranger.usersync.keystore.password": "UnIx529p", 
-            "ranger.usersync.unix.group.file": "/etc/group", 
-            "ranger.usersync.filesource.file": "/tmp/usergroup.txt", 
-            "ranger.usersync.group.searchscope": "", 
-            "ranger.usersync.truststore.password": "changeit", 
-            "ranger.usersync.enabled": "true", 
-            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000", 
+            "ranger.usersync.ldap.binddn": "",
+            "ranger.usersync.policymgr.username": "rangerusersync",
+            "ranger.usersync.policymanager.mockrun": "false",
+            "ranger.usersync.group.searchbase": "",
+            "ranger.usersync.ldap.bindalias": "testldapalias",
+            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks",
+            "ranger.usersync.port": "5151",
+            "ranger.usersync.pagedresultssize": "500",
+            "ranger.usersync.group.memberattributename": "",
+            "ranger.usersync.kerberos.principal": "rangerusersync/_HOST@EXAMPLE.COM",
+            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder",
+            "ranger.usersync.ldap.referral": "ignore",
+            "ranger.usersync.group.searchfilter": "",
+            "ranger.usersync.ldap.user.objectclass": "person",
+            "ranger.usersync.logdir": "{{usersync_log_dir}}",
+            "ranger.usersync.ldap.user.searchfilter": "",
+            "ranger.usersync.ldap.groupname.caseconversion": "none",
+            "ranger.usersync.ldap.ldapbindpassword": "",
+            "ranger.usersync.unix.minUserId": "500",
+            "ranger.usersync.policymanager.maxrecordsperapicall": "1000",
+            "ranger.usersync.group.nameattribute": "",
+            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password",
+            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks",
+            "ranger.usersync.user.searchenabled": "false",
+            "ranger.usersync.group.usermapsyncenabled": "true",
+            "ranger.usersync.ldap.bindkeystore": "",
+            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof",
+            "ranger.usersync.kerberos.keytab": "/etc/security/keytabs/rangerusersync.service.keytab",
+            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe",
+            "ranger.usersync.group.objectclass": "",
+            "ranger.usersync.ldap.user.searchscope": "sub",
+            "ranger.usersync.unix.password.file": "/etc/passwd",
+            "ranger.usersync.ldap.user.nameattribute": "",
+            "ranger.usersync.pagedresultsenabled": "true",
+            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}",
+            "ranger.usersync.group.search.first.enabled": "false",
+            "ranger.usersync.group.searchenabled": "false",
+            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder",
+            "ranger.usersync.ssl": "true",
+            "ranger.usersync.ldap.url": "",
+            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org",
+            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+            "ranger.usersync.ldap.user.searchbase": "",
+            "ranger.usersync.ldap.username.caseconversion": "none",
+            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+            "ranger.usersync.keystore.password": "UnIx529p",
+            "ranger.usersync.unix.group.file": "/etc/group",
+            "ranger.usersync.filesource.file": "/tmp/usergroup.txt",
+            "ranger.usersync.group.searchscope": "",
+            "ranger.usersync.truststore.password": "changeit",
+            "ranger.usersync.enabled": "true",
+            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000",
             "ranger.usersync.filesource.text.delimiter": ","
-        }, 
+        },
         "hdfs-site": {
-            "dfs.namenode.checkpoint.period": "21600", 
-            "dfs.namenode.avoid.write.stale.datanode": "true", 
-            "dfs.permissions.superusergroup": "hdfs", 
-            "dfs.namenode.startup.delay.block.deletion.sec": "3600", 
-            "dfs.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM", 
-            "dfs.heartbeat.interval": "3", 
-            "dfs.content-summary.limit": "5000", 
-            "dfs.support.append": "true", 
-            "dfs.datanode.address": "0.0.0.0:1019", 
-            "dfs.cluster.administrators": " hdfs", 
-            "dfs.namenode.audit.log.async": "true", 
-            "dfs.datanode.balance.bandwidthPerSec": "6250000", 
-            "dfs.namenode.safemode.threshold-pct": "1", 
-            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", 
-            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020", 
-            "dfs.permissions.enabled": "true", 
-            "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", 
-            "dfs.client.read.shortcircuit": "true", 
-            "dfs.https.port": "50470", 
-            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470", 
-            "nfs.file.dump.dir": "/tmp/.hdfs-nfs", 
-            "dfs.blocksize": "134217728", 
-            "dfs.blockreport.initialDelay": "120", 
-            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode", 
-            "dfs.namenode.fslock.fair": "false", 
-            "dfs.datanode.max.transfer.threads": "4096", 
-            "dfs.secondary.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "dfs.replication": "3", 
-            "dfs.namenode.handler.count": "50", 
-            "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
-            "fs.permissions.umask-mode": "022", 
-            "dfs.namenode.stale.datanode.interval": "30000", 
-            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
-            "dfs.datanode.failed.volumes.tolerated": "0", 
-            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data", 
-            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070", 
-            "dfs.webhdfs.enabled": "true", 
-            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding", 
-            "dfs.namenode.accesstime.precision": "0", 
-            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
-            "dfs.datanode.https.address": "0.0.0.0:50475", 
-            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary", 
-            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090", 
-            "nfs.exports.allowed.hosts": "* rw", 
-            "dfs.namenode.checkpoint.txns": "1000000", 
-            "dfs.datanode.http.address": "0.0.0.0:1022", 
-            "dfs.datanode.du.reserved": "33011188224", 
-            "dfs.client.read.shortcircuit.streams.cache.size": "4096", 
-            "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", 
-            "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "dfs.http.policy": "HTTP_ONLY", 
-            "dfs.block.access.token.enable": "true", 
-            "dfs.client.retry.policy.enabled": "false", 
-            "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", 
-            "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab", 
-            "dfs.namenode.name.dir.restore": "true", 
-            "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", 
-            "dfs.journalnode.https-address": "0.0.0.0:8481", 
-            "dfs.journalnode.http-address": "0.0.0.0:8480", 
-            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
-            "dfs.namenode.avoid.read.stale.datanode": "true", 
-            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
-            "dfs.datanode.data.dir.perm": "750", 
-            "dfs.encryption.key.provider.uri": "kms://http@c6401.ambari.apache.org:9292/kms", 
-            "dfs.replication.max": "50", 
+            "dfs.namenode.checkpoint.period": "21600",
+            "dfs.namenode.avoid.write.stale.datanode": "true",
+            "dfs.permissions.superusergroup": "hdfs",
+            "dfs.namenode.startup.delay.block.deletion.sec": "3600",
+            "dfs.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
+            "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM",
+            "dfs.heartbeat.interval": "3",
+            "dfs.content-summary.limit": "5000",
+            "dfs.support.append": "true",
+            "dfs.datanode.address": "0.0.0.0:1019",
+            "dfs.cluster.administrators": " hdfs",
+            "dfs.namenode.audit.log.async": "true",
+            "dfs.datanode.balance.bandwidthPerSec": "6250000",
+            "dfs.namenode.safemode.threshold-pct": "1",
+            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
+            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020",
+            "dfs.permissions.enabled": "true",
+            "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
+            "dfs.client.read.shortcircuit": "true",
+            "dfs.https.port": "50470",
+            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470",
+            "nfs.file.dump.dir": "/tmp/.hdfs-nfs",
+            "dfs.blocksize": "134217728",
+            "dfs.blockreport.initialDelay": "120",
+            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode",
+            "dfs.namenode.fslock.fair": "false",
+            "dfs.datanode.max.transfer.threads": "4096",
+            "dfs.secondary.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
+            "dfs.replication": "3",
+            "dfs.namenode.handler.count": "50",
+            "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+            "fs.permissions.umask-mode": "022",
+            "dfs.namenode.stale.datanode.interval": "30000",
+            "dfs.datanode.ipc.address": "0.0.0.0:8010",
+            "dfs.datanode.failed.volumes.tolerated": "0",
+            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data",
+            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070",
+            "dfs.webhdfs.enabled": "true",
+            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding",
+            "dfs.namenode.accesstime.precision": "0",
+            "dfs.namenode.write.stale.datanode.ratio": "1.0f",
+            "dfs.datanode.https.address": "0.0.0.0:50475",
+            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary",
+            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090",
+            "nfs.exports.allowed.hosts": "* rw",
+            "dfs.namenode.checkpoint.txns": "1000000",
+            "dfs.datanode.http.address": "0.0.0.0:1022",
+            "dfs.datanode.du.reserved": "33011188224",
+            "dfs.client.read.shortcircuit.streams.cache.size": "4096",
+            "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
+            "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
+            "dfs.http.policy": "HTTP_ONLY",
+            "dfs.block.access.token.enable": "true",
+            "dfs.client.retry.policy.enabled": "false",
+            "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
+            "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab",
+            "dfs.namenode.name.dir.restore": "true",
+            "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
+            "dfs.journalnode.https-address": "0.0.0.0:8481",
+            "dfs.journalnode.http-address": "0.0.0.0:8480",
+            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
+            "dfs.namenode.avoid.read.stale.datanode": "true",
+            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
+            "dfs.datanode.data.dir.perm": "750",
+            "dfs.encryption.key.provider.uri": "kms://http@c6401.ambari.apache.org:9292/kms",
+            "dfs.replication.max": "50",
             "dfs.namenode.name.dir": "/grid/0/hadoop/hdfs/namenode"
-        }, 
+        },
         "ranger-tagsync-site": {
-            "ranger.tagsync.dest.ranger.ssl.config.filename": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks", 
-            "ranger.tagsync.source.atlasrest.username": "", 
-            "ranger.tagsync.logdir": "/var/log/ranger/tagsync", 
-            "ranger.tagsync.source.atlasrest.download.interval.millis": "", 
-            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks", 
-            "ranger.tagsync.source.file.check.interval.millis": "", 
-            "ranger.tagsync.source.atlasrest.endpoint": "", 
-            "ranger.tagsync.dest.ranger.username": "rangertagsync", 
-            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}", 
-            "ranger.tagsync.kerberos.principal": "rangertagsync/_HOST@EXAMPLE.COM", 
-            "ranger.tagsync.kerberos.keytab": "/etc/security/keytabs/rangertagsync.service.keytab", 
-            "ranger.tagsync.source.atlas": "false", 
-            "ranger.tagsync.source.atlasrest": "false", 
-            "ranger.tagsync.source.file": "false", 
+            "ranger.tagsync.dest.ranger.ssl.config.filename": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks",
+            "ranger.tagsync.source.atlasrest.username": "",
+            "ranger.tagsync.logdir": "/var/log/ranger/tagsync",
+            "ranger.tagsync.source.atlasrest.download.interval.millis": "",
+            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks",
+            "ranger.tagsync.source.file.check.interval.millis": "",
+            "ranger.tagsync.source.atlasrest.endpoint": "",
+            "ranger.tagsync.dest.ranger.username": "rangertagsync",
+            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}",
+            "ranger.tagsync.kerberos.principal": "rangertagsync/_HOST@EXAMPLE.COM",
+            "ranger.tagsync.kerberos.keytab": "/etc/security/keytabs/rangertagsync.service.keytab",
+            "ranger.tagsync.source.atlas": "false",
+            "ranger.tagsync.source.atlasrest": "false",
+            "ranger.tagsync.source.file": "false",
             "ranger.tagsync.source.file.filename": ""
-        }, 
+        },
         "zoo.cfg": {
-            "clientPort": "2181", 
-            "autopurge.purgeInterval": "24", 
-            "syncLimit": "5", 
-            "dataDir": "/grid/0/hadoop/zookeeper", 
-            "initLimit": "10", 
-            "tickTime": "2000", 
+            "clientPort": "2181",
+            "autopurge.purgeInterval": "24",
+            "syncLimit": "5",
+            "dataDir": "/grid/0/hadoop/zookeeper",
+            "initLimit": "10",
+            "tickTime": "2000",
             "autopurge.snapRetainCount": "30"
-        }, 
+        },
         "hadoop-policy": {
-            "security.job.client.protocol.acl": "*", 
-            "security.job.task.protocol.acl": "*", 
-            "security.datanode.protocol.acl": "*", 
-            "security.namenode.protocol.acl": "*", 
-            "security.client.datanode.protocol.acl": "*", 
-            "security.inter.tracker.protocol.acl": "*", 
-            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop", 
-            "security.client.protocol.acl": "*", 
-            "security.refresh.policy.protocol.acl": "hadoop", 
-            "security.admin.operations.protocol.acl": "hadoop", 
+            "security.job.client.protocol.acl": "*",
+            "security.job.task.protocol.acl": "*",
+            "security.datanode.protocol.acl": "*",
+            "security.namenode.protocol.acl": "*",
+            "security.client.datanode.protocol.acl": "*",
+            "security.inter.tracker.protocol.acl": "*",
+            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
+            "security.client.protocol.acl": "*",
+            "security.refresh.policy.protocol.acl": "hadoop",
+            "security.admin.operations.protocol.acl": "hadoop",
             "security.inter.datanode.protocol.acl": "*"
-        }, 
+        },
         "hdfs-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.lo
 gger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender
 .console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logg
 er}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=
 false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# NameNode metrics logging.\n# The default is to retain two namenode-metrics.log files up to 64MB each.\n#\nnamenode.metrics.logger=INFO,NullAppender\nlog4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}\nlog4j.additivity.NameNodeMetricsLog=false\nlog4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log\nlog4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n\nlog4j.appender.NNMETRICSRFA.MaxBackupIndex=1\nlog4j.appender.NNMETRICSRFA.MaxFileSize=64MB\n\n#\n# mapred audit logging\n#\nmapred
 .audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging leve
 ls\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateCh
 ange=WARN"
-        }, 
+        },
         "krb5-conf": {
-            "domains": "", 
-            "manage_krb5_conf": "true", 
-            "content": "\n[libdefaults]\n  renew_lifetime = 7d\n  forwardable = true\n  default_realm = {{realm}}\n  ticket_lifetime = 24h\n  dns_lookup_realm = false\n  dns_lookup_kdc = false\n  default_ccache_name = /tmp/krb5cc_%{uid}\n  #default_tgs_enctypes = {{encryption_types}}\n  #default_tkt_enctypes = {{encryption_types}}\n{% if domains %}\n[domain_realm]\n{%- for domain in domains.split(',') %}\n  {{domain|trim()}} = {{realm}}\n{%- endfor %}\n{% endif %}\n[logging]\n  default = FILE:/var/log/krb5kdc.log\n  admin_server = FILE:/var/log/kadmind.log\n  kdc = FILE:/var/log/krb5kdc.log\n\n[realms]\n  {{realm}} = {\n{%- if kdc_hosts > 0 -%}\n{%- set kdc_host_list = kdc_hosts.split(',')  -%}\n{%- if kdc_host_list and kdc_host_list|length > 0 %}\n    admin_server = {{admin_server_host|default(kdc_host_list[0]|trim(), True)}}\n{%- if kdc_host_list -%}\n{% for kdc_host in kdc_host_list %}\n    kdc = {{kdc_host|trim()}}\n{%- endfor -%}\n{% endif %}\n{%- endif %}\n{%- endif %}\n  }\n\
 n{# Append additional realm declarations below #}", 
+            "domains": "",
+            "manage_krb5_conf": "true",
+            "content": "\n[libdefaults]\n  renew_lifetime = 7d\n  forwardable = true\n  default_realm = {{realm}}\n  ticket_lifetime = 24h\n  dns_lookup_realm = false\n  dns_lookup_kdc = false\n  default_ccache_name = /tmp/krb5cc_%{uid}\n  #default_tgs_enctypes = {{encryption_types}}\n  #default_tkt_enctypes = {{encryption_types}}\n{% if domains %}\n[domain_realm]\n{%- for domain in domains.split(',') %}\n  {{domain|trim()}} = {{realm}}\n{%- endfor %}\n{% endif %}\n[logging]\n  default = FILE:/var/log/krb5kdc.log\n  admin_server = FILE:/var/log/kadmind.log\n  kdc = FILE:/var/log/krb5kdc.log\n\n[realms]\n  {{realm}} = {\n{%- if kdc_hosts > 0 -%}\n{%- set kdc_host_list = kdc_hosts.split(',')  -%}\n{%- if kdc_host_list and kdc_host_list|length > 0 %}\n    admin_server = {{admin_server_host|default(kdc_host_list[0]|trim(), True)}}\n{%- if kdc_host_list -%}\n{% for kdc_host in kdc_host_list %}\n    kdc = {{kdc_host|trim()}}\n{%- endfor -%}\n{% endif %}\n{%- endif %}\n{%- endif %}\n  }\n\
 n{# Append additional realm declarations below #}",
             "conf_dir": "/etc"
-        }, 
+        },
         "core-site": {
-            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py", 
-            "hadoop.proxyuser.hdfs.groups": "*", 
-            "fs.trash.interval": "360", 
-            "ipc.server.tcpnodelay": "true", 
-            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec", 
-            "ipc.client.idlethreshold": "8000", 
-            "io.file.buffer.size": "131072", 
-            "hadoop.proxyuser.ambari-server-test_cluster01.groups": "*", 
-            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", 
-            "hadoop.security.authentication": "kerberos", 
-            "mapreduce.jobtracker.webinterface.trusted": "false", 
-            "hadoop.proxyuser.hdfs.hosts": "*", 
-            "hadoop.proxyuser.HTTP.groups": "users", 
-            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020", 
-            "ha.failover-controller.active-standby-elector.zk.op.retries": "120", 
-            "hadoop.security.key.provider.path": "kms://http@c6401.ambari.apache.org:9292/kms", 
-            "hadoop.security.authorization": "true", 
-            "hadoop.http.authentication.simple.anonymous.allowed": "true", 
-            "ipc.client.connect.max.retries": "50", 
-            "hadoop.security.auth_to_local": "RULE:[1:$1@$0](ambari-qa-test_cluster01@EXAMPLE.COM)s/.*/ambari-qa/\nRULE:[1:$1@$0](hdfs-test_cluster01@EXAMPLE.COM)s/.*/hdfs/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nRULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](rangeradmin@EXAMPLE.COM)s/.*/ranger/\nRULE:[2:$1@$0](rangertagsync@EXAMPLE.COM)s/.*/rangertagsync/\nRULE:[2:$1@$0](rangerusersync@EXAMPLE.COM)s/.*/rangerusersync/\nDEFAULT", 
-            "hadoop.proxyuser.ambari-server-test_cluster01.hosts": "c6401.ambari.apache.org", 
+            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py",
+            "hadoop.proxyuser.hdfs.groups": "*",
+            "fs.trash.interval": "360",
+            "ipc.server.tcpnodelay": "true",
+            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec",
+            "ipc.client.idlethreshold": "8000",
+            "io.file.buffer.size": "131072",
+            "hadoop.proxyuser.ambari-server-test_cluster01.groups": "*",
+            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
+            "hadoop.security.authentication": "kerberos",
+            "mapreduce.jobtracker.webinterface.trusted": "false",
+            "hadoop.proxyuser.hdfs.hosts": "*",
+            "hadoop.proxyuser.HTTP.groups": "users",
+            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020",
+            "ha.failover-controller.active-standby-elector.zk.op.retries": "120",
+            "hadoop.security.key.provider.path": "kms://http@c6401.ambari.apache.org:9292/kms",
+            "hadoop.security.authorization": "true",
+            "hadoop.http.authentication.simple.anonymous.allowed": "true",
+            "ipc.client.connect.max.retries": "50",
+            "hadoop.security.auth_to_local": "RULE:[1:$1@$0](ambari-qa-test_cluster01@EXAMPLE.COM)s/.*/ambari-qa/\nRULE:[1:$1@$0](hdfs-test_cluster01@EXAMPLE.COM)s/.*/hdfs/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nRULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](rangeradmin@EXAMPLE.COM)s/.*/ranger/\nRULE:[2:$1@$0](rangertagsync@EXAMPLE.COM)s/.*/rangertagsync/\nRULE:[2:$1@$0](rangerusersync@EXAMPLE.COM)s/.*/rangerusersync/\nDEFAULT",
+            "hadoop.proxyuser.ambari-server-test_cluster01.hosts": "c6401.ambari.apache.org",
             "ipc.client.connection.maxidletime": "30000"
-        }, 
+        },
         "hadoop-env": {
-            "keyserver_port": "", 
-            "proxyuser_group": "users", 
-            "hdfs_user_nproc_limit": "65536", 
-            "hdfs_log_dir_prefix": "/var/log/hadoop", 
-            "hdfs_user_nofile_limit": "128000", 
-            "hdfs_user": "hdfs", 
-            "hdfs_principal_name": "hdfs-test_cluster01@EXAMPLE.COM", 
-            "keyserver_host": " ", 
-            "namenode_opt_maxnewsize": "128m", 
-            "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab", 
-            "namenode_opt_maxpermsize": "256m", 
-            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
 {{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
 node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
 NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
 N_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
 ing priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n  for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n  do\n    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n  done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
 ECURE_DN_USER\" ]; then\n  ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}", 
-            "namenode_heapsize": "1024m", 
-            "namenode_opt_newsize": "128m", 
-            "nfsgateway_heapsize": "1024", 
-            "dtnode_heapsize": "1024m", 
-            "hadoop_root_logger": "INFO,RFA", 
-            "hadoop_heapsize": "1024", 
-            "hadoop_pid_dir_prefix": "/var/run/hadoop", 
-            "namenode_opt_permsize": "128m", 
+            "keyserver_port": "",
+            "proxyuser_group": "users",
+            "hdfs_user_nproc_limit": "65536",
+            "hdfs_log_dir_prefix": "/var/log/hadoop",
+            "hdfs_user_nofile_limit": "128000",
+            "hdfs_user": "hdfs",
+            "hdfs_principal_name": "hdfs-test_cluster01@EXAMPLE.COM",
+            "keyserver_host": " ",
+            "namenode_opt_maxnewsize": "128m",
+            "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab",
+            "namenode_opt_maxpermsize": "256m",
+            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
 {{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
 node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
 NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
 N_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
 ing priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n  for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n  do\n    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n  done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
 ECURE_DN_USER\" ]; then\n  ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}",
+            "namenode_heapsize": "1024m",
+            "namenode_opt_newsize": "128m",
+            "nfsgateway_heapsize": "1024",
+            "dtnode_heapsize": "1024m",
+            "hadoop_root_logger": "INFO,RFA",
+            "hadoop_heapsize": "1024",
+            "hadoop_pid_dir_prefix": "/var/run/hadoop",
+            "namenode_opt_permsize": "128m",
             "hdfs_tmp_dir": "/tmp"
-        }, 
+        },
         "zookeeper-log4j": {
             "content": "\n#\n#\n# Licensed to the Apache Softwa

<TRUNCATED>