You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ab...@apache.org on 2017/07/14 12:37:51 UTC

[01/31] ambari git commit: AMBARI-21430 - Allow Multiple Versions of Stack Tools to Co-Exist (jonathanhurley)

Repository: ambari
Updated Branches:
  refs/heads/branch-feature-logsearch-ui 2d5b75618 -> 0256fb7f7


http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
index 282b542..2f3794d 100644
--- a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
+++ b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
@@ -102,8 +102,12 @@ class RMFTestCase(TestCase):
     else:
       raise RuntimeError("Please specify either config_file_path or config_dict parameter")
 
-    self.config_dict["configurations"]["cluster-env"]["stack_tools"] = RMFTestCase.get_stack_tools()
-    self.config_dict["configurations"]["cluster-env"]["stack_features"] = RMFTestCase.get_stack_features()
+    # add the stack tools & features from the stack if the test case's JSON file didn't have them
+    if "stack_tools" not in self.config_dict["configurations"]["cluster-env"]:
+      self.config_dict["configurations"]["cluster-env"]["stack_tools"] = RMFTestCase.get_stack_tools()
+
+    if "stack_features" not in self.config_dict["configurations"]["cluster-env"]:
+      self.config_dict["configurations"]["cluster-env"]["stack_features"] = RMFTestCase.get_stack_features()
 
     if config_overrides:
       for key, value in config_overrides.iteritems():


[06/31] ambari git commit: AMBARI-21430 - Allow Multiple Versions of Stack Tools to Co-Exist (jonathanhurley)

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-secured.json b/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-secured.json
index f959b1f..7f1e549 100644
--- a/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-secured.json
+++ b/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-secured.json
@@ -1,159 +1,159 @@
 {
     "localComponents": [
-        "SECONDARY_NAMENODE", 
-        "HDFS_CLIENT", 
-        "DATANODE", 
-        "NAMENODE", 
-        "RANGER_ADMIN", 
-        "RANGER_TAGSYNC", 
-        "RANGER_USERSYNC", 
-        "ZOOKEEPER_SERVER", 
-        "ZOOKEEPER_CLIENT", 
+        "SECONDARY_NAMENODE",
+        "HDFS_CLIENT",
+        "DATANODE",
+        "NAMENODE",
+        "RANGER_ADMIN",
+        "RANGER_TAGSYNC",
+        "RANGER_USERSYNC",
+        "ZOOKEEPER_SERVER",
+        "ZOOKEEPER_CLIENT",
         "KERBEROS_CLIENT",
         "LOGSEARCH_SOLR",
         "LOGSEARCH_SOLR_CLIENT"
-    ], 
+    ],
     "configuration_attributes": {
-        "ranger-hdfs-audit": {}, 
-        "ssl-client": {}, 
-        "ranger-admin-site": {}, 
-        "ranger-hdfs-policymgr-ssl": {}, 
-        "tagsync-application-properties": {}, 
-        "ranger-env": {}, 
-        "usersync-log4j": {}, 
-        "ranger-hdfs-plugin-properties": {}, 
-        "kerberos-env": {}, 
-        "admin-properties": {}, 
-        "ranger-ugsync-site": {}, 
+        "ranger-hdfs-audit": {},
+        "ssl-client": {},
+        "ranger-admin-site": {},
+        "ranger-hdfs-policymgr-ssl": {},
+        "tagsync-application-properties": {},
+        "ranger-env": {},
+        "usersync-log4j": {},
+        "ranger-hdfs-plugin-properties": {},
+        "kerberos-env": {},
+        "admin-properties": {},
+        "ranger-ugsync-site": {},
         "hdfs-site": {
             "final": {
-                "dfs.datanode.data.dir": "true", 
-                "dfs.namenode.http-address": "true", 
-                "dfs.datanode.failed.volumes.tolerated": "true", 
-                "dfs.support.append": "true", 
-                "dfs.namenode.name.dir": "true", 
+                "dfs.datanode.data.dir": "true",
+                "dfs.namenode.http-address": "true",
+                "dfs.datanode.failed.volumes.tolerated": "true",
+                "dfs.support.append": "true",
+                "dfs.namenode.name.dir": "true",
                 "dfs.webhdfs.enabled": "true"
             }
-        }, 
-        "ranger-tagsync-site": {}, 
-        "zoo.cfg": {}, 
-        "hadoop-policy": {}, 
-        "hdfs-log4j": {}, 
-        "krb5-conf": {}, 
+        },
+        "ranger-tagsync-site": {},
+        "zoo.cfg": {},
+        "hadoop-policy": {},
+        "hdfs-log4j": {},
+        "krb5-conf": {},
         "core-site": {
             "final": {
                 "fs.defaultFS": "true"
             }
-        }, 
-        "hadoop-env": {}, 
-        "zookeeper-log4j": {}, 
-        "ssl-server": {}, 
-        "ranger-site": {}, 
-        "admin-log4j": {}, 
-        "tagsync-log4j": {}, 
-        "ranger-hdfs-security": {}, 
-        "usersync-properties": {}, 
+        },
+        "hadoop-env": {},
+        "zookeeper-log4j": {},
+        "ssl-server": {},
+        "ranger-site": {},
+        "admin-log4j": {},
+        "tagsync-log4j": {},
+        "ranger-hdfs-security": {},
+        "usersync-properties": {},
         "zookeeper-env": {},
         "infra-solr-env": {},
         "infra-solr-client-log4j": {},
         "cluster-env": {}
-    }, 
-    "public_hostname": "c6401.ambari.apache.org", 
-    "commandId": "41-2", 
-    "hostname": "c6401.ambari.apache.org", 
-    "kerberosCommandParams": [], 
-    "serviceName": "RANGER", 
-    "role": "RANGER_ADMIN", 
-    "forceRefreshConfigTagsBeforeExecution": [], 
-    "requestId": 41, 
+    },
+    "public_hostname": "c6401.ambari.apache.org",
+    "commandId": "41-2",
+    "hostname": "c6401.ambari.apache.org",
+    "kerberosCommandParams": [],
+    "serviceName": "RANGER",
+    "role": "RANGER_ADMIN",
+    "forceRefreshConfigTagsBeforeExecution": [],
+    "requestId": 41,
     "agentConfigParams": {
         "agent": {
             "parallel_execution": 0
         }
-    }, 
-    "clusterName": "test_Cluster01", 
-    "commandType": "EXECUTION_COMMAND", 
-    "taskId": 186, 
-    "roleParams": {}, 
+    },
+    "clusterName": "test_Cluster01",
+    "commandType": "EXECUTION_COMMAND",
+    "taskId": 186,
+    "roleParams": {},
     "configurationTags": {
         "ranger-hdfs-audit": {
             "tag": "version1466705299922"
-        }, 
+        },
         "ssl-client": {
             "tag": "version1"
-        }, 
+        },
         "ranger-admin-site": {
             "tag": "version1467016680635"
-        }, 
+        },
         "ranger-hdfs-policymgr-ssl": {
             "tag": "version1466705299922"
-        }, 
+        },
         "tagsync-application-properties": {
             "tag": "version1467016680511"
-        }, 
+        },
         "ranger-env": {
             "tag": "version1466705299949"
-        }, 
+        },
         "ranger-ugsync-site": {
             "tag": "version1467016680537"
-        }, 
+        },
         "ranger-hdfs-plugin-properties": {
             "tag": "version1466705299922"
-        }, 
+        },
         "kerberos-env": {
             "tag": "version1467016537243"
-        }, 
+        },
         "admin-properties": {
             "tag": "version1466705299949"
-        }, 
+        },
         "hdfs-site": {
             "tag": "version1467016680401"
-        }, 
+        },
         "ranger-tagsync-site": {
             "tag": "version1467016680586"
-        }, 
+        },
         "zoo.cfg": {
             "tag": "version1"
-        }, 
+        },
         "hadoop-policy": {
             "tag": "version1"
-        }, 
+        },
         "hdfs-log4j": {
             "tag": "version1"
-        }, 
+        },
         "usersync-log4j": {
             "tag": "version1466705299949"
-        }, 
+        },
         "krb5-conf": {
             "tag": "version1467016537243"
-        }, 
+        },
         "core-site": {
             "tag": "version1467016680612"
-        }, 
+        },
         "hadoop-env": {
             "tag": "version1467016680446"
-        }, 
+        },
         "zookeeper-log4j": {
             "tag": "version1"
-        }, 
+        },
         "ssl-server": {
             "tag": "version1"
-        }, 
+        },
         "ranger-site": {
             "tag": "version1466705299949"
-        }, 
+        },
         "admin-log4j": {
             "tag": "version1466705299949"
-        }, 
+        },
         "tagsync-log4j": {
             "tag": "version1466705299949"
-        }, 
+        },
         "ranger-hdfs-security": {
             "tag": "version1466705299922"
-        }, 
+        },
         "usersync-properties": {
             "tag": "version1466705299949"
-        }, 
+        },
         "zookeeper-env": {
             "tag": "version1467016680492"
         },
@@ -166,550 +166,550 @@
         "cluster-env": {
             "tag": "version1467016680567"
         }
-    }, 
-    "roleCommand": "START", 
+    },
+    "roleCommand": "START",
     "hostLevelParams": {
-        "agent_stack_retry_on_unavailability": "false", 
-        "stack_name": "HDP", 
-        "package_version": "2_5_0_0_*", 
+        "agent_stack_retry_on_unavailability": "false",
+        "stack_name": "HDP",
+        "package_version": "2_5_0_0_*",
         "custom_mysql_jdbc_name": "mysql-connector-java.jar",
         "previous_custom_mysql_jdbc_name": "mysql-connector-java-old.jar",
-        "host_sys_prepped": "false", 
-        "ambari_db_rca_username": "mapred", 
-        "current_version": "2.5.0.0-801", 
-        "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar", 
-        "agent_stack_retry_count": "5", 
-        "stack_version": "2.5", 
-        "jdk_name": "jdk-8u60-linux-x64.tar.gz", 
-        "ambari_db_rca_driver": "org.postgresql.Driver", 
+        "host_sys_prepped": "false",
+        "ambari_db_rca_username": "mapred",
+        "current_version": "2.5.0.0-801",
+        "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar",
+        "agent_stack_retry_count": "5",
+        "stack_version": "2.5",
+        "jdk_name": "jdk-8u60-linux-x64.tar.gz",
+        "ambari_db_rca_driver": "org.postgresql.Driver",
         "java_home": "/usr/jdk64/jdk1.7.0_45",
-        "repository_version_id": "1", 
-        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
-        "not_managed_hdfs_path_list": "[\"/tmp\"]", 
-        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
-        "java_version": "8", 
-        "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.5\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]", 
-        "package_list": "[{\"name\":\"ranger_${stack_version}-admin\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-usersync\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-tagsync\",\"condition\":\"should_install_ranger_tagsync\",\"skipUpgrade\":false},{\"name\":\"ambari-logsearch-solr-client\",\"condition\":\"should_install_logsearch_solr_client\",\"skipUpgrade\":false}]", 
-        "db_name": "ambari", 
-        "group_list": "[\"ranger\",\"hadoop\",\"users\"]", 
-        "agentCacheDir": "/var/lib/ambari-agent/cache", 
-        "ambari_db_rca_password": "mapred", 
-        "jce_name": "jce_policy-8.zip", 
-        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar", 
-        "db_driver_filename": "mysql-connector-java.jar", 
-        "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]", 
+        "repository_version_id": "1",
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
+        "not_managed_hdfs_path_list": "[\"/tmp\"]",
+        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
+        "java_version": "8",
+        "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.5\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]",
+        "package_list": "[{\"name\":\"ranger_${stack_version}-admin\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-usersync\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-tagsync\",\"condition\":\"should_install_ranger_tagsync\",\"skipUpgrade\":false},{\"name\":\"ambari-logsearch-solr-client\",\"condition\":\"should_install_logsearch_solr_client\",\"skipUpgrade\":false}]",
+        "db_name": "ambari",
+        "group_list": "[\"ranger\",\"hadoop\",\"users\"]",
+        "agentCacheDir": "/var/lib/ambari-agent/cache",
+        "ambari_db_rca_password": "mapred",
+        "jce_name": "jce_policy-8.zip",
+        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
+        "db_driver_filename": "mysql-connector-java.jar",
+        "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]",
         "clientsToUpdateConfigs": "[\"*\"]"
-    }, 
+    },
     "commandParams": {
-        "service_package_folder": "common-services/RANGER/0.4.0/package", 
-        "script": "scripts/ranger_admin.py", 
-        "hooks_folder": "HDP/2.0.6/hooks", 
-        "version": "2.5.0.0-801", 
-        "max_duration_for_retries": "0", 
-        "command_retry_enabled": "false", 
-        "command_timeout": "600", 
+        "service_package_folder": "common-services/RANGER/0.4.0/package",
+        "script": "scripts/ranger_admin.py",
+        "hooks_folder": "HDP/2.0.6/hooks",
+        "version": "2.5.0.0-801",
+        "max_duration_for_retries": "0",
+        "command_retry_enabled": "false",
+        "command_timeout": "600",
         "script_type": "PYTHON"
-    }, 
-    "forceRefreshConfigTags": [], 
-    "stageId": 2, 
+    },
+    "forceRefreshConfigTags": [],
+    "stageId": 2,
     "clusterHostInfo": {
         "snamenode_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ambari_server_use_ssl": [
             "false"
-        ], 
+        ],
         "all_ping_ports": [
             "8670"
-        ], 
+        ],
         "ranger_tagsync_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ranger_usersync_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "all_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "slave_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "namenode_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ambari_server_port": [
             "8080"
-        ], 
+        ],
         "ranger_admin_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "all_racks": [
             "/default-rack"
-        ], 
+        ],
         "all_ipv4_ips": [
             "172.22.83.73"
-        ], 
+        ],
         "ambari_server_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "zookeeper_hosts": [
             "c6401.ambari.apache.org"
         ],
         "infra_solr_hosts": [
             "c6401.ambari.apache.org"
         ]
-    }, 
+    },
     "configurations": {
         "ranger-hdfs-audit": {
-            "xasecure.audit.destination.solr.zookeepers": "NONE", 
-            "xasecure.audit.destination.solr.urls": "", 
-            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool", 
+            "xasecure.audit.destination.solr.zookeepers": "NONE",
+            "xasecure.audit.destination.solr.urls": "",
+            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool",
             "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool",
-            "xasecure.audit.destination.hdfs": "true", 
+            "xasecure.audit.destination.hdfs": "true",
             "xasecure.audit.destination.solr": "false",
-            "xasecure.audit.provider.summary.enabled": "false", 
-            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit", 
+            "xasecure.audit.provider.summary.enabled": "false",
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
             "xasecure.audit.is.enabled": "true"
-        }, 
+        },
         "ssl-client": {
-            "ssl.client.truststore.reload.interval": "10000", 
-            "ssl.client.keystore.password": "bigdata", 
-            "ssl.client.truststore.type": "jks", 
-            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks", 
-            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks", 
-            "ssl.client.truststore.password": "bigdata", 
+            "ssl.client.truststore.reload.interval": "10000",
+            "ssl.client.keystore.password": "bigdata",
+            "ssl.client.truststore.type": "jks",
+            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks",
+            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks",
+            "ssl.client.truststore.password": "bigdata",
             "ssl.client.keystore.type": "jks"
-        }, 
+        },
         "ranger-admin-site": {
             "ranger.is.solr.kerberised": "true",
-            "ranger.admin.kerberos.cookie.domain": "{{ranger_host}}", 
-            "ranger.kms.service.user.hdfs": "hdfs", 
-            "ranger.spnego.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}", 
-            "ranger.plugins.hive.serviceuser": "hive", 
-            "ranger.lookup.kerberos.keytab": "/etc/security/keytabs/rangerlookup.service.keytab", 
-            "ranger.plugins.kms.serviceuser": "kms", 
-            "ranger.service.https.attrib.ssl.enabled": "false", 
-            "ranger.sso.browser.useragent": "Mozilla,chrome", 
-            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01", 
-            "ranger.plugins.hbase.serviceuser": "hbase", 
-            "ranger.plugins.hdfs.serviceuser": "hdfs", 
-            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}", 
-            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net", 
-            "ranger.plugins.knox.serviceuser": "knox", 
-            "ranger.ldap.base.dn": "dc=example,dc=com", 
-            "ranger.sso.publicKey": "", 
-            "ranger.admin.kerberos.cookie.path": "/", 
-            "ranger.service.https.attrib.clientAuth": "want", 
-            "ranger.jpa.jdbc.user": "{{ranger_db_user}}", 
-            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})", 
-            "ranger.ldap.group.roleattribute": "cn", 
-            "ranger.plugins.kafka.serviceuser": "kafka", 
-            "ranger.admin.kerberos.principal": "rangeradmin/_HOST@EXAMPLE.COM", 
-            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
+            "ranger.admin.kerberos.cookie.domain": "{{ranger_host}}",
+            "ranger.kms.service.user.hdfs": "hdfs",
+            "ranger.spnego.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
+            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}",
+            "ranger.plugins.hive.serviceuser": "hive",
+            "ranger.lookup.kerberos.keytab": "/etc/security/keytabs/rangerlookup.service.keytab",
+            "ranger.plugins.kms.serviceuser": "kms",
+            "ranger.service.https.attrib.ssl.enabled": "false",
+            "ranger.sso.browser.useragent": "Mozilla,chrome",
+            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01",
+            "ranger.plugins.hbase.serviceuser": "hbase",
+            "ranger.plugins.hdfs.serviceuser": "hdfs",
+            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}",
+            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net",
+            "ranger.plugins.knox.serviceuser": "knox",
+            "ranger.ldap.base.dn": "dc=example,dc=com",
+            "ranger.sso.publicKey": "",
+            "ranger.admin.kerberos.cookie.path": "/",
+            "ranger.service.https.attrib.clientAuth": "want",
+            "ranger.jpa.jdbc.user": "{{ranger_db_user}}",
+            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})",
+            "ranger.ldap.group.roleattribute": "cn",
+            "ranger.plugins.kafka.serviceuser": "kafka",
+            "ranger.admin.kerberos.principal": "rangeradmin/_HOST@EXAMPLE.COM",
+            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
             "ranger.credential.provider.path": "/etc/ranger/admin/rangeradmin.jceks",
-            "ranger.ldap.referral": "ignore", 
-            "ranger.service.http.port": "6080", 
-            "ranger.ldap.user.searchfilter": "(uid={0})", 
-            "ranger.plugins.atlas.serviceuser": "atlas", 
-            "ranger.truststore.password": "changeit", 
-            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
-            "ranger.audit.solr.password": "NONE", 
-            "ranger.audit.solr.zookeepers": "c6401.ambari.apache.org:2181/ambari-solr", 
+            "ranger.ldap.referral": "ignore",
+            "ranger.service.http.port": "6080",
+            "ranger.ldap.user.searchfilter": "(uid={0})",
+            "ranger.plugins.atlas.serviceuser": "atlas",
+            "ranger.truststore.password": "changeit",
+            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+            "ranger.audit.solr.password": "NONE",
+            "ranger.audit.solr.zookeepers": "c6401.ambari.apache.org:2181/ambari-solr",
             "ranger.lookup.kerberos.principal": "rangerlookup/_HOST@EXAMPLE.COM",
-            "ranger.service.https.port": "6182", 
-            "ranger.plugins.storm.serviceuser": "storm", 
-            "ranger.externalurl": "{{ranger_external_url}}", 
-            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
-            "ranger.kms.service.user.hive": "", 
-            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
-            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}", 
-            "ranger.service.host": "{{ranger_host}}", 
-            "ranger.service.https.attrib.keystore.keyalias": "rangeradmin", 
-            "ranger.service.https.attrib.keystore.pass": "xasecure", 
-            "ranger.unixauth.remote.login.enabled": "true", 
+            "ranger.service.https.port": "6182",
+            "ranger.plugins.storm.serviceuser": "storm",
+            "ranger.externalurl": "{{ranger_external_url}}",
+            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+            "ranger.kms.service.user.hive": "",
+            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}",
+            "ranger.service.host": "{{ranger_host}}",
+            "ranger.service.https.attrib.keystore.keyalias": "rangeradmin",
+            "ranger.service.https.attrib.keystore.pass": "xasecure",
+            "ranger.unixauth.remote.login.enabled": "true",
             "ranger.jpa.jdbc.credential.alias": "rangeradmin",
-            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
-            "ranger.audit.solr.username": "ranger_solr", 
-            "ranger.sso.enabled": "false", 
-            "ranger.audit.solr.urls": "", 
-            "ranger.ldap.ad.domain": "", 
-            "ranger.plugins.yarn.serviceuser": "yarn", 
-            "ranger.audit.source.type": "solr", 
-            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
-            "ranger.ldap.url": "{{ranger_ug_ldap_url}}", 
-            "ranger.authentication.method": "UNIX", 
-            "ranger.service.http.enabled": "true", 
-            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}", 
-            "ranger.ldap.ad.referral": "ignore", 
-            "ranger.ldap.ad.base.dn": "dc=example,dc=com", 
-            "ranger.jpa.jdbc.password": "_", 
-            "ranger.spnego.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
-            "ranger.sso.providerurl": "", 
-            "ranger.unixauth.service.hostname": "{{ugsync_host}}", 
-            "ranger.admin.kerberos.keytab": "/etc/security/keytabs/rangeradmin.service.keytab", 
-            "ranger.admin.kerberos.token.valid.seconds": "30", 
-            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver", 
+            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+            "ranger.audit.solr.username": "ranger_solr",
+            "ranger.sso.enabled": "false",
+            "ranger.audit.solr.urls": "",
+            "ranger.ldap.ad.domain": "",
+            "ranger.plugins.yarn.serviceuser": "yarn",
+            "ranger.audit.source.type": "solr",
+            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
+            "ranger.ldap.url": "{{ranger_ug_ldap_url}}",
+            "ranger.authentication.method": "UNIX",
+            "ranger.service.http.enabled": "true",
+            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}",
+            "ranger.ldap.ad.referral": "ignore",
+            "ranger.ldap.ad.base.dn": "dc=example,dc=com",
+            "ranger.jpa.jdbc.password": "_",
+            "ranger.spnego.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+            "ranger.sso.providerurl": "",
+            "ranger.unixauth.service.hostname": "{{ugsync_host}}",
+            "ranger.admin.kerberos.keytab": "/etc/security/keytabs/rangeradmin.service.keytab",
+            "ranger.admin.kerberos.token.valid.seconds": "30",
+            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver",
             "ranger.unixauth.service.port": "5151"
-        }, 
+        },
         "ranger-hdfs-policymgr-ssl": {
-            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks", 
-            "xasecure.policymgr.clientssl.truststore.password": "changeit", 
-            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}", 
-            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks", 
-            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}", 
+            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks",
+            "xasecure.policymgr.clientssl.truststore.password": "changeit",
+            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}",
+            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks",
+            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}",
             "xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
-        }, 
+        },
         "tagsync-application-properties": {
-            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181", 
-            "atlas.kafka.security.protocol": "SASL_PLAINTEXT", 
-            "atlas.jaas.KafkaClient.option.principal": "{{tagsync_jaas_principal}}", 
-            "atlas.jaas.KafkaClient.option.keyTab": "{{tagsync_keytab_path}}", 
-            "atlas.kafka.entities.group.id": "ranger_entities_consumer", 
-            "atlas.jaas.KafkaClient.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule", 
-            "atlas.jaas.KafkaClient.option.serviceName": "kafka", 
-            "atlas.kafka.bootstrap.servers": "localhost:6667", 
-            "atlas.jaas.KafkaClient.option.useKeyTab": "true", 
-            "atlas.jaas.KafkaClient.option.storeKey": "true", 
-            "atlas.jaas.KafkaClient.loginModuleControlFlag": "required", 
+            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181",
+            "atlas.kafka.security.protocol": "SASL_PLAINTEXT",
+            "atlas.jaas.KafkaClient.option.principal": "{{tagsync_jaas_principal}}",
+            "atlas.jaas.KafkaClient.option.keyTab": "{{tagsync_keytab_path}}",
+            "atlas.kafka.entities.group.id": "ranger_entities_consumer",
+            "atlas.jaas.KafkaClient.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "atlas.jaas.KafkaClient.option.serviceName": "kafka",
+            "atlas.kafka.bootstrap.servers": "localhost:6667",
+            "atlas.jaas.KafkaClient.option.useKeyTab": "true",
+            "atlas.jaas.KafkaClient.option.storeKey": "true",
+            "atlas.jaas.KafkaClient.loginModuleControlFlag": "required",
             "atlas.kafka.sasl.kerberos.service.name": "kafka"
-        }, 
+        },
         "ranger-env": {
-            "ranger_solr_shards": "1", 
-            "ranger_solr_config_set": "ranger_audits", 
-            "ranger_user": "ranger", 
+            "ranger_solr_shards": "1",
+            "ranger_solr_config_set": "ranger_audits",
+            "ranger_user": "ranger",
             "ranger_solr_replication_factor": "1",
-            "xml_configurations_supported": "true", 
-            "ranger-atlas-plugin-enabled": "No", 
-            "ranger-hbase-plugin-enabled": "No", 
-            "ranger-yarn-plugin-enabled": "No", 
-            "bind_anonymous": "false", 
-            "ranger_admin_username": "amb_ranger_admin", 
-            "admin_password": "admin", 
-            "is_solrCloud_enabled": "true", 
-            "ranger-storm-plugin-enabled": "No", 
-            "ranger-hdfs-plugin-enabled": "No", 
-            "ranger_group": "ranger", 
-            "ranger-knox-plugin-enabled": "No", 
-            "ranger_admin_log_dir": "/var/log/ranger/admin", 
-            "ranger-kafka-plugin-enabled": "No", 
-            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306", 
-            "ranger-hive-plugin-enabled": "No", 
-            "xasecure.audit.destination.solr": "true", 
-            "ranger_pid_dir": "/var/run/ranger", 
-            "xasecure.audit.destination.hdfs": "true", 
-            "admin_username": "admin", 
-            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit", 
-            "create_db_dbuser": "true", 
-            "ranger_solr_collection_name": "ranger_audits", 
-            "ranger_admin_password": "P1!q9xa96SMi5NCl", 
+            "xml_configurations_supported": "true",
+            "ranger-atlas-plugin-enabled": "No",
+            "ranger-hbase-plugin-enabled": "No",
+            "ranger-yarn-plugin-enabled": "No",
+            "bind_anonymous": "false",
+            "ranger_admin_username": "amb_ranger_admin",
+            "admin_password": "admin",
+            "is_solrCloud_enabled": "true",
+            "ranger-storm-plugin-enabled": "No",
+            "ranger-hdfs-plugin-enabled": "No",
+            "ranger_group": "ranger",
+            "ranger-knox-plugin-enabled": "No",
+            "ranger_admin_log_dir": "/var/log/ranger/admin",
+            "ranger-kafka-plugin-enabled": "No",
+            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306",
+            "ranger-hive-plugin-enabled": "No",
+            "xasecure.audit.destination.solr": "true",
+            "ranger_pid_dir": "/var/run/ranger",
+            "xasecure.audit.destination.hdfs": "true",
+            "admin_username": "admin",
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
+            "create_db_dbuser": "true",
+            "ranger_solr_collection_name": "ranger_audits",
+            "ranger_admin_password": "P1!q9xa96SMi5NCl",
             "ranger_usersync_log_dir": "/var/log/ranger/usersync"
-        }, 
+        },
         "usersync-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/usersync.
 log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n"
-        }, 
+        },
         "ranger-hdfs-plugin-properties": {
-            "hadoop.rpc.protection": "authentication", 
-            "ranger-hdfs-plugin-enabled": "No", 
-            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
-            "policy_user": "ambari-qa", 
-            "common.name.for.certificate": "", 
+            "hadoop.rpc.protection": "authentication",
+            "ranger-hdfs-plugin-enabled": "No",
+            "REPOSITORY_CONFIG_USERNAME": "hadoop",
+            "policy_user": "ambari-qa",
+            "common.name.for.certificate": "",
             "REPOSITORY_CONFIG_PASSWORD": "hadoop"
-        }, 
+        },
         "kerberos-env": {
-            "kdc_hosts": "c6401.ambari.apache.org", 
-            "manage_auth_to_local": "true", 
-            "install_packages": "true", 
-            "realm": "EXAMPLE.COM", 
-            "encryption_types": "aes des3-cbc-sha1 rc4 des-cbc-md5", 
-            "ad_create_attributes_template": "\n{\n  \"objectClass\": [\"top\", \"person\", \"organizationalPerson\", \"user\"],\n  \"cn\": \"$principal_name\",\n  #if( $is_service )\n  \"servicePrincipalName\": \"$principal_name\",\n  #end\n  \"userPrincipalName\": \"$normalized_principal\",\n  \"unicodePwd\": \"$password\",\n  \"accountExpires\": \"0\",\n  \"userAccountControl\": \"66048\"\n}", 
-            "kdc_create_attributes": "", 
-            "admin_server_host": "c6401.ambari.apache.org", 
-            "group": "ambari-managed-principals", 
-            "password_length": "20", 
-            "ldap_url": "", 
-            "manage_identities": "true", 
-            "password_min_lowercase_letters": "1", 
-            "create_ambari_principal": "true", 
-            "service_check_principal_name": "${cluster_name|toLower()}-${short_date}", 
-            "executable_search_paths": "/usr/bin, /usr/kerberos/bin, /usr/sbin, /usr/lib/mit/bin, /usr/lib/mit/sbin", 
-            "password_chat_timeout": "5", 
-            "kdc_type": "mit-kdc", 
-            "set_password_expiry": "false", 
-            "password_min_punctuation": "1", 
-            "container_dn": "", 
-            "case_insensitive_username_rules": "false", 
-            "password_min_whitespace": "0", 
-            "password_min_uppercase_letters": "1", 
+            "kdc_hosts": "c6401.ambari.apache.org",
+            "manage_auth_to_local": "true",
+            "install_packages": "true",
+            "realm": "EXAMPLE.COM",
+            "encryption_types": "aes des3-cbc-sha1 rc4 des-cbc-md5",
+            "ad_create_attributes_template": "\n{\n  \"objectClass\": [\"top\", \"person\", \"organizationalPerson\", \"user\"],\n  \"cn\": \"$principal_name\",\n  #if( $is_service )\n  \"servicePrincipalName\": \"$principal_name\",\n  #end\n  \"userPrincipalName\": \"$normalized_principal\",\n  \"unicodePwd\": \"$password\",\n  \"accountExpires\": \"0\",\n  \"userAccountControl\": \"66048\"\n}",
+            "kdc_create_attributes": "",
+            "admin_server_host": "c6401.ambari.apache.org",
+            "group": "ambari-managed-principals",
+            "password_length": "20",
+            "ldap_url": "",
+            "manage_identities": "true",
+            "password_min_lowercase_letters": "1",
+            "create_ambari_principal": "true",
+            "service_check_principal_name": "${cluster_name|toLower()}-${short_date}",
+            "executable_search_paths": "/usr/bin, /usr/kerberos/bin, /usr/sbin, /usr/lib/mit/bin, /usr/lib/mit/sbin",
+            "password_chat_timeout": "5",
+            "kdc_type": "mit-kdc",
+            "set_password_expiry": "false",
+            "password_min_punctuation": "1",
+            "container_dn": "",
+            "case_insensitive_username_rules": "false",
+            "password_min_whitespace": "0",
+            "password_min_uppercase_letters": "1",
             "password_min_digits": "1"
-        }, 
+        },
         "admin-properties": {
-            "db_user": "rangeradmin01", 
-            "DB_FLAVOR": "MYSQL", 
-            "db_password": "rangeradmin01", 
-            "db_root_user": "root", 
-            "policymgr_external_url": "http://c6401.ambari.apache.org:6080", 
-            "db_name": "ranger01", 
-            "db_host": "c6401.ambari.apache.org", 
-            "db_root_password": "vagrant", 
+            "db_user": "rangeradmin01",
+            "DB_FLAVOR": "MYSQL",
+            "db_password": "rangeradmin01",
+            "db_root_user": "root",
+            "policymgr_external_url": "http://c6401.ambari.apache.org:6080",
+            "db_name": "ranger01",
+            "db_host": "c6401.ambari.apache.org",
+            "db_root_password": "vagrant",
             "SQL_CONNECTOR_JAR": "{{driver_curl_target}}"
-        }, 
+        },
         "ranger-ugsync-site": {
-            "ranger.usersync.ldap.binddn": "", 
-            "ranger.usersync.policymgr.username": "rangerusersync", 
-            "ranger.usersync.policymanager.mockrun": "false", 
-            "ranger.usersync.group.searchbase": "", 
-            "ranger.usersync.ldap.bindalias": "testldapalias", 
-            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks", 
-            "ranger.usersync.port": "5151", 
-            "ranger.usersync.pagedresultssize": "500", 
-            "ranger.usersync.group.memberattributename": "", 
-            "ranger.usersync.kerberos.principal": "rangerusersync/_HOST@EXAMPLE.COM", 
-            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder", 
-            "ranger.usersync.ldap.referral": "ignore", 
-            "ranger.usersync.group.searchfilter": "", 
-            "ranger.usersync.ldap.user.objectclass": "person", 
-            "ranger.usersync.logdir": "{{usersync_log_dir}}", 
-            "ranger.usersync.ldap.user.searchfilter": "", 
-            "ranger.usersync.ldap.groupname.caseconversion": "none", 
-            "ranger.usersync.ldap.ldapbindpassword": "", 
-            "ranger.usersync.unix.minUserId": "500", 
-            "ranger.usersync.policymanager.maxrecordsperapicall": "1000", 
-            "ranger.usersync.group.nameattribute": "", 
-            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password", 
-            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks", 
-            "ranger.usersync.user.searchenabled": "false", 
-            "ranger.usersync.group.usermapsyncenabled": "true", 
-            "ranger.usersync.ldap.bindkeystore": "", 
-            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof", 
-            "ranger.usersync.kerberos.keytab": "/etc/security/keytabs/rangerusersync.service.keytab", 
-            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe", 
-            "ranger.usersync.group.objectclass": "", 
-            "ranger.usersync.ldap.user.searchscope": "sub", 
-            "ranger.usersync.unix.password.file": "/etc/passwd", 
-            "ranger.usersync.ldap.user.nameattribute": "", 
-            "ranger.usersync.pagedresultsenabled": "true", 
-            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}", 
-            "ranger.usersync.group.search.first.enabled": "false", 
-            "ranger.usersync.group.searchenabled": "false", 
-            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder", 
-            "ranger.usersync.ssl": "true", 
-            "ranger.usersync.ldap.url": "", 
-            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org", 
-            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
-            "ranger.usersync.ldap.user.searchbase": "", 
-            "ranger.usersync.ldap.username.caseconversion": "none", 
-            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
-            "ranger.usersync.keystore.password": "UnIx529p", 
-            "ranger.usersync.unix.group.file": "/etc/group", 
-            "ranger.usersync.filesource.file": "/tmp/usergroup.txt", 
-            "ranger.usersync.group.searchscope": "", 
-            "ranger.usersync.truststore.password": "changeit", 
-            "ranger.usersync.enabled": "true", 
-            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000", 
+            "ranger.usersync.ldap.binddn": "",
+            "ranger.usersync.policymgr.username": "rangerusersync",
+            "ranger.usersync.policymanager.mockrun": "false",
+            "ranger.usersync.group.searchbase": "",
+            "ranger.usersync.ldap.bindalias": "testldapalias",
+            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks",
+            "ranger.usersync.port": "5151",
+            "ranger.usersync.pagedresultssize": "500",
+            "ranger.usersync.group.memberattributename": "",
+            "ranger.usersync.kerberos.principal": "rangerusersync/_HOST@EXAMPLE.COM",
+            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder",
+            "ranger.usersync.ldap.referral": "ignore",
+            "ranger.usersync.group.searchfilter": "",
+            "ranger.usersync.ldap.user.objectclass": "person",
+            "ranger.usersync.logdir": "{{usersync_log_dir}}",
+            "ranger.usersync.ldap.user.searchfilter": "",
+            "ranger.usersync.ldap.groupname.caseconversion": "none",
+            "ranger.usersync.ldap.ldapbindpassword": "",
+            "ranger.usersync.unix.minUserId": "500",
+            "ranger.usersync.policymanager.maxrecordsperapicall": "1000",
+            "ranger.usersync.group.nameattribute": "",
+            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password",
+            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks",
+            "ranger.usersync.user.searchenabled": "false",
+            "ranger.usersync.group.usermapsyncenabled": "true",
+            "ranger.usersync.ldap.bindkeystore": "",
+            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof",
+            "ranger.usersync.kerberos.keytab": "/etc/security/keytabs/rangerusersync.service.keytab",
+            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe",
+            "ranger.usersync.group.objectclass": "",
+            "ranger.usersync.ldap.user.searchscope": "sub",
+            "ranger.usersync.unix.password.file": "/etc/passwd",
+            "ranger.usersync.ldap.user.nameattribute": "",
+            "ranger.usersync.pagedresultsenabled": "true",
+            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}",
+            "ranger.usersync.group.search.first.enabled": "false",
+            "ranger.usersync.group.searchenabled": "false",
+            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder",
+            "ranger.usersync.ssl": "true",
+            "ranger.usersync.ldap.url": "",
+            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org",
+            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+            "ranger.usersync.ldap.user.searchbase": "",
+            "ranger.usersync.ldap.username.caseconversion": "none",
+            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+            "ranger.usersync.keystore.password": "UnIx529p",
+            "ranger.usersync.unix.group.file": "/etc/group",
+            "ranger.usersync.filesource.file": "/tmp/usergroup.txt",
+            "ranger.usersync.group.searchscope": "",
+            "ranger.usersync.truststore.password": "changeit",
+            "ranger.usersync.enabled": "true",
+            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000",
             "ranger.usersync.filesource.text.delimiter": ","
-        }, 
+        },
         "hdfs-site": {
-            "dfs.namenode.checkpoint.period": "21600", 
-            "dfs.namenode.avoid.write.stale.datanode": "true", 
-            "dfs.permissions.superusergroup": "hdfs", 
-            "dfs.namenode.startup.delay.block.deletion.sec": "3600", 
-            "dfs.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM", 
-            "dfs.heartbeat.interval": "3", 
-            "dfs.content-summary.limit": "5000", 
-            "dfs.support.append": "true", 
-            "dfs.datanode.address": "0.0.0.0:1019", 
-            "dfs.cluster.administrators": " hdfs", 
-            "dfs.namenode.audit.log.async": "true", 
-            "dfs.datanode.balance.bandwidthPerSec": "6250000", 
-            "dfs.namenode.safemode.threshold-pct": "1", 
-            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", 
-            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020", 
-            "dfs.permissions.enabled": "true", 
-            "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", 
-            "dfs.client.read.shortcircuit": "true", 
-            "dfs.https.port": "50470", 
-            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470", 
-            "nfs.file.dump.dir": "/tmp/.hdfs-nfs", 
-            "dfs.blocksize": "134217728", 
-            "dfs.blockreport.initialDelay": "120", 
-            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode", 
-            "dfs.namenode.fslock.fair": "false", 
-            "dfs.datanode.max.transfer.threads": "4096", 
-            "dfs.secondary.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "dfs.replication": "3", 
-            "dfs.namenode.handler.count": "50", 
-            "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
-            "fs.permissions.umask-mode": "022", 
-            "dfs.namenode.stale.datanode.interval": "30000", 
-            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
-            "dfs.datanode.failed.volumes.tolerated": "0", 
-            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data", 
-            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070", 
-            "dfs.webhdfs.enabled": "true", 
-            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding", 
-            "dfs.namenode.accesstime.precision": "0", 
-            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
-            "dfs.datanode.https.address": "0.0.0.0:50475", 
-            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary", 
-            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090", 
-            "nfs.exports.allowed.hosts": "* rw", 
-            "dfs.namenode.checkpoint.txns": "1000000", 
-            "dfs.datanode.http.address": "0.0.0.0:1022", 
-            "dfs.datanode.du.reserved": "33011188224", 
-            "dfs.client.read.shortcircuit.streams.cache.size": "4096", 
-            "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", 
-            "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "dfs.http.policy": "HTTP_ONLY", 
-            "dfs.block.access.token.enable": "true", 
-            "dfs.client.retry.policy.enabled": "false", 
-            "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", 
-            "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab", 
-            "dfs.namenode.name.dir.restore": "true", 
-            "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", 
-            "dfs.journalnode.https-address": "0.0.0.0:8481", 
-            "dfs.journalnode.http-address": "0.0.0.0:8480", 
-            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
-            "dfs.namenode.avoid.read.stale.datanode": "true", 
-            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
-            "dfs.datanode.data.dir.perm": "750", 
-            "dfs.encryption.key.provider.uri": "kms://http@c6401.ambari.apache.org:9292/kms", 
-            "dfs.replication.max": "50", 
+            "dfs.namenode.checkpoint.period": "21600",
+            "dfs.namenode.avoid.write.stale.datanode": "true",
+            "dfs.permissions.superusergroup": "hdfs",
+            "dfs.namenode.startup.delay.block.deletion.sec": "3600",
+            "dfs.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
+            "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM",
+            "dfs.heartbeat.interval": "3",
+            "dfs.content-summary.limit": "5000",
+            "dfs.support.append": "true",
+            "dfs.datanode.address": "0.0.0.0:1019",
+            "dfs.cluster.administrators": " hdfs",
+            "dfs.namenode.audit.log.async": "true",
+            "dfs.datanode.balance.bandwidthPerSec": "6250000",
+            "dfs.namenode.safemode.threshold-pct": "1",
+            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
+            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020",
+            "dfs.permissions.enabled": "true",
+            "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
+            "dfs.client.read.shortcircuit": "true",
+            "dfs.https.port": "50470",
+            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470",
+            "nfs.file.dump.dir": "/tmp/.hdfs-nfs",
+            "dfs.blocksize": "134217728",
+            "dfs.blockreport.initialDelay": "120",
+            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode",
+            "dfs.namenode.fslock.fair": "false",
+            "dfs.datanode.max.transfer.threads": "4096",
+            "dfs.secondary.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
+            "dfs.replication": "3",
+            "dfs.namenode.handler.count": "50",
+            "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+            "fs.permissions.umask-mode": "022",
+            "dfs.namenode.stale.datanode.interval": "30000",
+            "dfs.datanode.ipc.address": "0.0.0.0:8010",
+            "dfs.datanode.failed.volumes.tolerated": "0",
+            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data",
+            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070",
+            "dfs.webhdfs.enabled": "true",
+            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding",
+            "dfs.namenode.accesstime.precision": "0",
+            "dfs.namenode.write.stale.datanode.ratio": "1.0f",
+            "dfs.datanode.https.address": "0.0.0.0:50475",
+            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary",
+            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090",
+            "nfs.exports.allowed.hosts": "* rw",
+            "dfs.namenode.checkpoint.txns": "1000000",
+            "dfs.datanode.http.address": "0.0.0.0:1022",
+            "dfs.datanode.du.reserved": "33011188224",
+            "dfs.client.read.shortcircuit.streams.cache.size": "4096",
+            "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
+            "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
+            "dfs.http.policy": "HTTP_ONLY",
+            "dfs.block.access.token.enable": "true",
+            "dfs.client.retry.policy.enabled": "false",
+            "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
+            "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab",
+            "dfs.namenode.name.dir.restore": "true",
+            "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
+            "dfs.journalnode.https-address": "0.0.0.0:8481",
+            "dfs.journalnode.http-address": "0.0.0.0:8480",
+            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
+            "dfs.namenode.avoid.read.stale.datanode": "true",
+            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
+            "dfs.datanode.data.dir.perm": "750",
+            "dfs.encryption.key.provider.uri": "kms://http@c6401.ambari.apache.org:9292/kms",
+            "dfs.replication.max": "50",
             "dfs.namenode.name.dir": "/grid/0/hadoop/hdfs/namenode"
-        }, 
+        },
         "ranger-tagsync-site": {
-            "ranger.tagsync.dest.ranger.ssl.config.filename": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks", 
-            "ranger.tagsync.source.atlasrest.username": "", 
-            "ranger.tagsync.logdir": "/var/log/ranger/tagsync", 
-            "ranger.tagsync.source.atlasrest.download.interval.millis": "", 
-            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks", 
-            "ranger.tagsync.source.file.check.interval.millis": "", 
-            "ranger.tagsync.source.atlasrest.endpoint": "", 
-            "ranger.tagsync.dest.ranger.username": "rangertagsync", 
-            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}", 
-            "ranger.tagsync.kerberos.principal": "rangertagsync/_HOST@EXAMPLE.COM", 
-            "ranger.tagsync.kerberos.keytab": "/etc/security/keytabs/rangertagsync.service.keytab", 
-            "ranger.tagsync.source.atlas": "false", 
-            "ranger.tagsync.source.atlasrest": "false", 
-            "ranger.tagsync.source.file": "false", 
+            "ranger.tagsync.dest.ranger.ssl.config.filename": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks",
+            "ranger.tagsync.source.atlasrest.username": "",
+            "ranger.tagsync.logdir": "/var/log/ranger/tagsync",
+            "ranger.tagsync.source.atlasrest.download.interval.millis": "",
+            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks",
+            "ranger.tagsync.source.file.check.interval.millis": "",
+            "ranger.tagsync.source.atlasrest.endpoint": "",
+            "ranger.tagsync.dest.ranger.username": "rangertagsync",
+            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}",
+            "ranger.tagsync.kerberos.principal": "rangertagsync/_HOST@EXAMPLE.COM",
+            "ranger.tagsync.kerberos.keytab": "/etc/security/keytabs/rangertagsync.service.keytab",
+            "ranger.tagsync.source.atlas": "false",
+            "ranger.tagsync.source.atlasrest": "false",
+            "ranger.tagsync.source.file": "false",
             "ranger.tagsync.source.file.filename": ""
-        }, 
+        },
         "zoo.cfg": {
-            "clientPort": "2181", 
-            "autopurge.purgeInterval": "24", 
-            "syncLimit": "5", 
-            "dataDir": "/grid/0/hadoop/zookeeper", 
-            "initLimit": "10", 
-            "tickTime": "2000", 
+            "clientPort": "2181",
+            "autopurge.purgeInterval": "24",
+            "syncLimit": "5",
+            "dataDir": "/grid/0/hadoop/zookeeper",
+            "initLimit": "10",
+            "tickTime": "2000",
             "autopurge.snapRetainCount": "30"
-        }, 
+        },
         "hadoop-policy": {
-            "security.job.client.protocol.acl": "*", 
-            "security.job.task.protocol.acl": "*", 
-            "security.datanode.protocol.acl": "*", 
-            "security.namenode.protocol.acl": "*", 
-            "security.client.datanode.protocol.acl": "*", 
-            "security.inter.tracker.protocol.acl": "*", 
-            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop", 
-            "security.client.protocol.acl": "*", 
-            "security.refresh.policy.protocol.acl": "hadoop", 
-            "security.admin.operations.protocol.acl": "hadoop", 
+            "security.job.client.protocol.acl": "*",
+            "security.job.task.protocol.acl": "*",
+            "security.datanode.protocol.acl": "*",
+            "security.namenode.protocol.acl": "*",
+            "security.client.datanode.protocol.acl": "*",
+            "security.inter.tracker.protocol.acl": "*",
+            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
+            "security.client.protocol.acl": "*",
+            "security.refresh.policy.protocol.acl": "hadoop",
+            "security.admin.operations.protocol.acl": "hadoop",
             "security.inter.datanode.protocol.acl": "*"
-        }, 
+        },
         "hdfs-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.lo
 gger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender
 .console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logg
 er}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=
 false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# NameNode metrics logging.\n# The default is to retain two namenode-metrics.log files up to 64MB each.\n#\nnamenode.metrics.logger=INFO,NullAppender\nlog4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}\nlog4j.additivity.NameNodeMetricsLog=false\nlog4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log\nlog4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n\nlog4j.appender.NNMETRICSRFA.MaxBackupIndex=1\nlog4j.appender.NNMETRICSRFA.MaxFileSize=64MB\n\n#\n# mapred audit logging\n#\nmapred
 .audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging leve
 ls\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateCh
 ange=WARN"
-        }, 
+        },
         "krb5-conf": {
-            "domains": "", 
-            "manage_krb5_conf": "true", 
-            "content": "\n[libdefaults]\n  renew_lifetime = 7d\n  forwardable = true\n  default_realm = {{realm}}\n  ticket_lifetime = 24h\n  dns_lookup_realm = false\n  dns_lookup_kdc = false\n  default_ccache_name = /tmp/krb5cc_%{uid}\n  #default_tgs_enctypes = {{encryption_types}}\n  #default_tkt_enctypes = {{encryption_types}}\n{% if domains %}\n[domain_realm]\n{%- for domain in domains.split(',') %}\n  {{domain|trim()}} = {{realm}}\n{%- endfor %}\n{% endif %}\n[logging]\n  default = FILE:/var/log/krb5kdc.log\n  admin_server = FILE:/var/log/kadmind.log\n  kdc = FILE:/var/log/krb5kdc.log\n\n[realms]\n  {{realm}} = {\n{%- if kdc_hosts > 0 -%}\n{%- set kdc_host_list = kdc_hosts.split(',')  -%}\n{%- if kdc_host_list and kdc_host_list|length > 0 %}\n    admin_server = {{admin_server_host|default(kdc_host_list[0]|trim(), True)}}\n{%- if kdc_host_list -%}\n{% for kdc_host in kdc_host_list %}\n    kdc = {{kdc_host|trim()}}\n{%- endfor -%}\n{% endif %}\n{%- endif %}\n{%- endif %}\n  }\n\
 n{# Append additional realm declarations below #}", 
+            "domains": "",
+            "manage_krb5_conf": "true",
+            "content": "\n[libdefaults]\n  renew_lifetime = 7d\n  forwardable = true\n  default_realm = {{realm}}\n  ticket_lifetime = 24h\n  dns_lookup_realm = false\n  dns_lookup_kdc = false\n  default_ccache_name = /tmp/krb5cc_%{uid}\n  #default_tgs_enctypes = {{encryption_types}}\n  #default_tkt_enctypes = {{encryption_types}}\n{% if domains %}\n[domain_realm]\n{%- for domain in domains.split(',') %}\n  {{domain|trim()}} = {{realm}}\n{%- endfor %}\n{% endif %}\n[logging]\n  default = FILE:/var/log/krb5kdc.log\n  admin_server = FILE:/var/log/kadmind.log\n  kdc = FILE:/var/log/krb5kdc.log\n\n[realms]\n  {{realm}} = {\n{%- if kdc_hosts > 0 -%}\n{%- set kdc_host_list = kdc_hosts.split(',')  -%}\n{%- if kdc_host_list and kdc_host_list|length > 0 %}\n    admin_server = {{admin_server_host|default(kdc_host_list[0]|trim(), True)}}\n{%- if kdc_host_list -%}\n{% for kdc_host in kdc_host_list %}\n    kdc = {{kdc_host|trim()}}\n{%- endfor -%}\n{% endif %}\n{%- endif %}\n{%- endif %}\n  }\n\
 n{# Append additional realm declarations below #}",
             "conf_dir": "/etc"
-        }, 
+        },
         "core-site": {
-            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py", 
-            "hadoop.proxyuser.hdfs.groups": "*", 
-            "fs.trash.interval": "360", 
-            "ipc.server.tcpnodelay": "true", 
-            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec", 
-            "ipc.client.idlethreshold": "8000", 
-            "io.file.buffer.size": "131072", 
-            "hadoop.proxyuser.ambari-server-test_cluster01.groups": "*", 
-            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", 
-            "hadoop.security.authentication": "kerberos", 
-            "mapreduce.jobtracker.webinterface.trusted": "false", 
-            "hadoop.proxyuser.hdfs.hosts": "*", 
-            "hadoop.proxyuser.HTTP.groups": "users", 
-            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020", 
-            "ha.failover-controller.active-standby-elector.zk.op.retries": "120", 
-            "hadoop.security.key.provider.path": "kms://http@c6401.ambari.apache.org:9292/kms", 
-            "hadoop.security.authorization": "true", 
-            "hadoop.http.authentication.simple.anonymous.allowed": "true", 
-            "ipc.client.connect.max.retries": "50", 
-            "hadoop.security.auth_to_local": "RULE:[1:$1@$0](ambari-qa-test_cluster01@EXAMPLE.COM)s/.*/ambari-qa/\nRULE:[1:$1@$0](hdfs-test_cluster01@EXAMPLE.COM)s/.*/hdfs/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nRULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](rangeradmin@EXAMPLE.COM)s/.*/ranger/\nRULE:[2:$1@$0](rangertagsync@EXAMPLE.COM)s/.*/rangertagsync/\nRULE:[2:$1@$0](rangerusersync@EXAMPLE.COM)s/.*/rangerusersync/\nDEFAULT", 
-            "hadoop.proxyuser.ambari-server-test_cluster01.hosts": "c6401.ambari.apache.org", 
+            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py",
+            "hadoop.proxyuser.hdfs.groups": "*",
+            "fs.trash.interval": "360",
+            "ipc.server.tcpnodelay": "true",
+            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec",
+            "ipc.client.idlethreshold": "8000",
+            "io.file.buffer.size": "131072",
+            "hadoop.proxyuser.ambari-server-test_cluster01.groups": "*",
+            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
+            "hadoop.security.authentication": "kerberos",
+            "mapreduce.jobtracker.webinterface.trusted": "false",
+            "hadoop.proxyuser.hdfs.hosts": "*",
+            "hadoop.proxyuser.HTTP.groups": "users",
+            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020",
+            "ha.failover-controller.active-standby-elector.zk.op.retries": "120",
+            "hadoop.security.key.provider.path": "kms://http@c6401.ambari.apache.org:9292/kms",
+            "hadoop.security.authorization": "true",
+            "hadoop.http.authentication.simple.anonymous.allowed": "true",
+            "ipc.client.connect.max.retries": "50",
+            "hadoop.security.auth_to_local": "RULE:[1:$1@$0](ambari-qa-test_cluster01@EXAMPLE.COM)s/.*/ambari-qa/\nRULE:[1:$1@$0](hdfs-test_cluster01@EXAMPLE.COM)s/.*/hdfs/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nRULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](rangeradmin@EXAMPLE.COM)s/.*/ranger/\nRULE:[2:$1@$0](rangertagsync@EXAMPLE.COM)s/.*/rangertagsync/\nRULE:[2:$1@$0](rangerusersync@EXAMPLE.COM)s/.*/rangerusersync/\nDEFAULT",
+            "hadoop.proxyuser.ambari-server-test_cluster01.hosts": "c6401.ambari.apache.org",
             "ipc.client.connection.maxidletime": "30000"
-        }, 
+        },
         "hadoop-env": {
-            "keyserver_port": "", 
-            "proxyuser_group": "users", 
-            "hdfs_user_nproc_limit": "65536", 
-            "hdfs_log_dir_prefix": "/var/log/hadoop", 
-            "hdfs_user_nofile_limit": "128000", 
-            "hdfs_user": "hdfs", 
-            "hdfs_principal_name": "hdfs-test_cluster01@EXAMPLE.COM", 
-            "keyserver_host": " ", 
-            "namenode_opt_maxnewsize": "128m", 
-            "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab", 
-            "namenode_opt_maxpermsize": "256m", 
-            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
 {{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
 node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
 NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
 N_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
 ing priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n  for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n  do\n    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n  done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
 ECURE_DN_USER\" ]; then\n  ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}", 
-            "namenode_heapsize": "1024m", 
-            "namenode_opt_newsize": "128m", 
-            "nfsgateway_heapsize": "1024", 
-            "dtnode_heapsize": "1024m", 
-            "hadoop_root_logger": "INFO,RFA", 
-            "hadoop_heapsize": "1024", 
-            "hadoop_pid_dir_prefix": "/var/run/hadoop", 
-            "namenode_opt_permsize": "128m", 
+            "keyserver_port": "",
+            "proxyuser_group": "users",
+            "hdfs_user_nproc_limit": "65536",
+            "hdfs_log_dir_prefix": "/var/log/hadoop",
+            "hdfs_user_nofile_limit": "128000",
+            "hdfs_user": "hdfs",
+            "hdfs_principal_name": "hdfs-test_cluster01@EXAMPLE.COM",
+            "keyserver_host": " ",
+            "namenode_opt_maxnewsize": "128m",
+            "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab",
+            "namenode_opt_maxpermsize": "256m",
+            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
 {{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
 node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
 NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
 N_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
 ing priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n  for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n  do\n    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n  done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
 ECURE_DN_USER\" ]; then\n  ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}",
+            "namenode_heapsize": "1024m",
+            "namenode_opt_newsize": "128m",
+            "nfsgateway_heapsize": "1024",
+            "dtnode_heapsize": "1024m",
+            "hadoop_root_logger": "INFO,RFA",
+            "hadoop_heapsize": "1024",
+            "hadoop_pid_dir_prefix": "/var/run/hadoop",
+            "namenode_opt_permsize": "128m",
             "hdfs_tmp_dir": "/tmp"
-        }, 
+        },
         "zookeeper-log4j": {
             "content": "\n#\n#\n# Licensed to the Apache Softwa

<TRUNCATED>

[10/31] ambari git commit: AMBARI-21430. Allow Multiple Versions of Stack Tools to Co-Exist - fix illegal import

Posted by ab...@apache.org.
AMBARI-21430. Allow Multiple Versions of Stack Tools to Co-Exist - fix illegal import


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d0f7a515
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d0f7a515
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d0f7a515

Branch: refs/heads/branch-feature-logsearch-ui
Commit: d0f7a51537469740e5397486b1e2c19862c26c01
Parents: f33a250
Author: Attila Doroszlai <ad...@hortonworks.com>
Authored: Sun Jul 9 12:15:28 2017 +0200
Committer: Attila Doroszlai <ad...@hortonworks.com>
Committed: Sun Jul 9 12:16:54 2017 +0200

----------------------------------------------------------------------
 .../java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/d0f7a515/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
index fa3aea3..0656f68 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
@@ -29,7 +29,7 @@ import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.PropertyInfo;
-import org.apache.hadoop.metrics2.sink.relocated.commons.lang.StringUtils;
+import org.apache.commons.lang.StringUtils;
 
 import com.google.common.collect.Sets;
 import com.google.inject.Inject;


[19/31] ambari git commit: AMBARI-21443. Start All service not getting invoked after regenerate keytabs (akovalenko)

Posted by ab...@apache.org.
AMBARI-21443. Start All service not getting invoked after regenerate keytabs (akovalenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/0b397cdf
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/0b397cdf
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/0b397cdf

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 0b397cdff959e04947fd6b15ada7a7c6a06aa55b
Parents: 880853a
Author: Aleksandr Kovalenko <ak...@hortonworks.com>
Authored: Tue Jul 11 17:22:37 2017 +0300
Committer: Aleksandr Kovalenko <ak...@hortonworks.com>
Committed: Tue Jul 11 19:39:22 2017 +0300

----------------------------------------------------------------------
 ambari-web/app/controllers/main/service.js      | 13 +++++++++++-
 ambari-web/app/utils/ajax/ajax.js               | 22 ++++++++++++++++++++
 .../test/controllers/main/service_test.js       |  4 ++--
 3 files changed, 36 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/0b397cdf/ambari-web/app/controllers/main/service.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/service.js b/ambari-web/app/controllers/main/service.js
index eb9df0d..343105f 100644
--- a/ambari-web/app/controllers/main/service.js
+++ b/ambari-web/app/controllers/main/service.js
@@ -177,9 +177,20 @@ App.MainServiceController = Em.ArrayController.extend(App.SupportClientConfigsDo
   },
 
   /**
-   * Restart all services - stops all services, then starts them back
+   * Restart all services - restarts by sending one RESTART command
    */
   restartAllServices: function () {
+    App.ajax.send({
+      name: 'restart.allServices',
+      sender: this,
+      showLoadingPopup: true
+    });
+  },
+
+  /**
+   * Restart all services - stops all services, then starts them back
+   */
+  stopAndStartAllServices: function () {
     this.silentStopAllServices();
   },
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/0b397cdf/ambari-web/app/utils/ajax/ajax.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/ajax/ajax.js b/ambari-web/app/utils/ajax/ajax.js
index 5919091..d6e6dfa 100644
--- a/ambari-web/app/utils/ajax/ajax.js
+++ b/ambari-web/app/utils/ajax/ajax.js
@@ -2356,6 +2356,28 @@ var urls = {
     }
   },
 
+  'restart.allServices': {
+    'real': '/clusters/{clusterName}/requests',
+    'mock': '',
+    'format': function (data) {
+      return {
+        type: 'POST',
+        data: JSON.stringify({
+          "RequestInfo": {
+            "command": "RESTART",
+            "context": 'Restart all services',
+            "operation_level": 'host_component'
+          },
+          "Requests/resource_filters": [
+            {
+              "hosts_predicate": "HostRoles/cluster_name=" + data.clusterName
+            }
+          ]
+        })
+      }
+    }
+  },
+
   'restart.staleConfigs': {
     'real': "/clusters/{clusterName}/requests",
     'mock': "",

http://git-wip-us.apache.org/repos/asf/ambari/blob/0b397cdf/ambari-web/test/controllers/main/service_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/main/service_test.js b/ambari-web/test/controllers/main/service_test.js
index 7ed7641..57a3eb4 100644
--- a/ambari-web/test/controllers/main/service_test.js
+++ b/ambari-web/test/controllers/main/service_test.js
@@ -433,7 +433,7 @@ describe('App.MainServiceController', function () {
 
   });
 
-  describe("#restartAllServices()", function() {
+  describe("#stopAndStartAllServices()", function() {
 
     beforeEach(function() {
       sinon.stub(mainServiceController, 'silentStopAllServices');
@@ -443,7 +443,7 @@ describe('App.MainServiceController', function () {
     });
 
     it("silentStopAllServices should be called", function() {
-      mainServiceController.restartAllServices();
+      mainServiceController.stopAndStartAllServices();
       expect(mainServiceController.silentStopAllServices.calledOnce).to.be.true;
     });
   });


[09/31] ambari git commit: AMBARI-21430 - Allow Multiple Versions of Stack Tools to Co-Exist (jonathanhurley)

Posted by ab...@apache.org.
AMBARI-21430 - Allow Multiple Versions of Stack Tools to Co-Exist (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f33a250c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f33a250c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f33a250c

Branch: refs/heads/branch-feature-logsearch-ui
Commit: f33a250c0e7624b6cbc0a11ffce12506eaa95d9a
Parents: a795f38
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Fri Jul 7 14:36:05 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Fri Jul 7 23:00:23 2017 -0400

----------------------------------------------------------------------
 .../libraries/functions/stack_features.py       |   13 +
 .../libraries/functions/stack_tools.py          |   39 +
 .../libraries/script/script.py                  |   19 +-
 .../server/api/query/JpaPredicateVisitor.java   |    8 +-
 .../controller/ActionExecutionContext.java      |   26 +
 .../controller/AmbariActionExecutionHelper.java |   26 +-
 .../BlueprintConfigurationProcessor.java        |   59 +-
 .../ClusterStackVersionResourceProvider.java    |  163 ++-
 .../ambari/server/state/ConfigHelper.java       |   32 +
 .../ambari/server/topology/AmbariContext.java   |   18 +
 .../server/upgrade/UpgradeCatalog252.java       |   61 +
 .../package/alerts/alert_hive_metastore.py      |   11 +-
 .../package/alerts/alert_llap_app_status.py     |   12 +-
 .../package/alerts/alert_check_oozie_server.py  |    8 +-
 .../resources/host_scripts/alert_disk_space.py  |   10 +-
 .../host_scripts/alert_version_select.py        |   16 +-
 .../HDP/2.0.6/configuration/cluster-env.xml     |   16 +-
 .../HDP/2.0.6/properties/stack_features.json    |  852 +++++------
 .../HDP/2.0.6/properties/stack_tools.json       |   16 +-
 .../PERF/1.0/configuration/cluster-env.xml      |   16 +-
 .../PERF/1.0/properties/stack_features.json     |   38 +-
 .../stacks/PERF/1.0/properties/stack_tools.json |   16 +-
 .../BlueprintConfigurationProcessorTest.java    |   41 +-
 ...ClusterStackVersionResourceProviderTest.java |    4 +-
 .../ClusterConfigurationRequestTest.java        |   60 +-
 .../common-services/configs/hawq_default.json   |    6 +-
 .../python/host_scripts/TestAlertDiskSpace.py   |   16 +-
 .../2.5/configs/ranger-admin-default.json       |  990 ++++++-------
 .../2.5/configs/ranger-admin-secured.json       | 1108 +++++++--------
 .../stacks/2.5/configs/ranger-kms-default.json  | 1158 +++++++--------
 .../stacks/2.5/configs/ranger-kms-secured.json  | 1320 +++++++++---------
 .../2.6/configs/ranger-admin-default.json       |  953 +++++++------
 .../2.6/configs/ranger-admin-secured.json       | 1066 +++++++-------
 .../src/test/python/stacks/utils/RMFTestCase.py |    8 +-
 34 files changed, 4353 insertions(+), 3852 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py b/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
index cbd32e7..576c138 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
@@ -43,6 +43,12 @@ def check_stack_feature(stack_feature, stack_version):
 
   from resource_management.libraries.functions.default import default
   from resource_management.libraries.functions.version import compare_versions
+
+  stack_name = default("/hostLevelParams/stack_name", None)
+  if stack_name is None:
+    Logger.warning("Cannot find the stack name in the command. Stack features cannot be loaded")
+    return False
+
   stack_features_config = default("/configurations/cluster-env/stack_features", None)
 
   if not stack_version:
@@ -51,6 +57,13 @@ def check_stack_feature(stack_feature, stack_version):
 
   if stack_features_config:
     data = json.loads(stack_features_config)
+
+    if stack_name not in data:
+      Logger.warning("Cannot find stack features for the stack named {0}".format(stack_name))
+      return False
+
+    data = data[stack_name]
+
     for feature in data["stack_features"]:
       if feature["name"] == stack_feature:
         if "min_version" in feature:

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py b/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
index 02ae62d..420ae11 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
@@ -39,15 +39,33 @@ def get_stack_tool(name):
   :return: tool_name, tool_path, tool_package
   """
   from resource_management.libraries.functions.default import default
+
+  stack_name = default("/hostLevelParams/stack_name", None)
+  if stack_name is None:
+    Logger.warning("Cannot find the stack name in the command. Stack tools cannot be loaded")
+    return (None, None, None)
+
   stack_tools = None
   stack_tools_config = default("/configurations/cluster-env/stack_tools", None)
   if stack_tools_config:
     stack_tools = json.loads(stack_tools_config)
 
+  if stack_tools is None:
+    Logger.warning("The stack tools could not be found in cluster-env")
+    return (None, None, None)
+
+  if stack_name not in stack_tools:
+    Logger.warning("Cannot find stack tools for the stack named {0}".format(stack_name))
+    return (None, None, None)
+
+  # load the stack tooks keyed by the stack name
+  stack_tools = stack_tools[stack_name]
+
   if not stack_tools or not name or name.lower() not in stack_tools:
     Logger.warning("Cannot find config for {0} stack tool in {1}".format(str(name), str(stack_tools)))
     return (None, None, None)
 
+
   tool_config = stack_tools[name.lower()]
 
   # Return fixed length (tool_name, tool_path tool_package) tuple
@@ -81,3 +99,24 @@ def get_stack_tool_package(name):
   """
   (tool_name, tool_path, tool_package) = get_stack_tool(name)
   return tool_package
+
+
+def get_stack_root(stack_name, stack_root_json):
+  """
+  Get the stack-specific install root directory from the raw, JSON-escaped properties.
+  :param stack_name:
+  :param stack_root_json:
+  :return: stack_root
+  """
+  from resource_management.libraries.functions.default import default
+
+  if stack_root_json is None:
+    return "/usr/{0}".format(stack_name.lower())
+
+  stack_root = json.loads(stack_root_json)
+
+  if stack_name not in stack_root:
+    Logger.warning("Cannot determine stack root for stack named {0}".format(stack_name))
+    return "/usr/{0}".format(stack_name.lower())
+
+  return stack_root[stack_name]

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-common/src/main/python/resource_management/libraries/script/script.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/script/script.py b/ambari-common/src/main/python/resource_management/libraries/script/script.py
index 2c56a13..2b374c5 100644
--- a/ambari-common/src/main/python/resource_management/libraries/script/script.py
+++ b/ambari-common/src/main/python/resource_management/libraries/script/script.py
@@ -597,7 +597,11 @@ class Script(object):
     :return: a stack name or None
     """
     from resource_management.libraries.functions.default import default
-    return default("/hostLevelParams/stack_name", "HDP")
+    stack_name = default("/hostLevelParams/stack_name", None)
+    if stack_name is None:
+      stack_name = default("/configurations/cluster-env/stack_name", "HDP")
+
+    return stack_name
 
   @staticmethod
   def get_stack_root():
@@ -607,7 +611,18 @@ class Script(object):
     """
     from resource_management.libraries.functions.default import default
     stack_name = Script.get_stack_name()
-    return default("/configurations/cluster-env/stack_root", "/usr/{0}".format(stack_name.lower()))
+    stack_root_json = default("/configurations/cluster-env/stack_root", None)
+
+    if stack_root_json is None:
+      return "/usr/{0}".format(stack_name.lower())
+
+    stack_root = json.loads(stack_root_json)
+
+    if stack_name not in stack_root:
+      Logger.warning("Cannot determine stack root for stack named {0}".format(stack_name))
+      return "/usr/{0}".format(stack_name.lower())
+
+    return stack_root[stack_name]
 
   @staticmethod
   def get_stack_version():

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaPredicateVisitor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaPredicateVisitor.java b/ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaPredicateVisitor.java
index 984dc3b..84e9dd9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaPredicateVisitor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaPredicateVisitor.java
@@ -63,11 +63,6 @@ public abstract class JpaPredicateVisitor<T> implements PredicateVisitor {
   final private CriteriaQuery<T> m_query;
 
   /**
-   * The entity class that the root of the query is built from.
-   */
-  final private Class<T> m_entityClass;
-
-  /**
    * The last calculated predicate.
    */
   private javax.persistence.criteria.Predicate m_lastPredicate = null;
@@ -92,7 +87,6 @@ public abstract class JpaPredicateVisitor<T> implements PredicateVisitor {
   public JpaPredicateVisitor(EntityManager entityManager, Class<T> entityClass) {
     m_entityManager = entityManager;
     m_builder = m_entityManager.getCriteriaBuilder();
-    m_entityClass = entityClass;
     m_query = m_builder.createQuery(entityClass);
     m_root = m_query.from(entityClass);
   }
@@ -178,7 +172,7 @@ public abstract class JpaPredicateVisitor<T> implements PredicateVisitor {
     }
 
     String operator = predicate.getOperator();
-    Comparable<?> value = predicate.getValue();
+    Comparable value = predicate.getValue();
 
     // convert string to enum for proper JPA comparisons
     if (lastSingularAttribute != null) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
index 42a95c0..34d6db9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
@@ -27,6 +27,7 @@ import org.apache.ambari.server.actionmanager.TargetHostType;
 import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.controller.internal.RequestOperationLevel;
 import org.apache.ambari.server.controller.internal.RequestResourceFilter;
+import org.apache.ambari.server.state.StackId;
 
 /**
  * The context required to create tasks and stages for a custom action
@@ -43,6 +44,7 @@ public class ActionExecutionContext {
   private String expectedComponentName;
   private boolean hostsInMaintenanceModeExcluded = true;
   private boolean allowRetry = false;
+  private StackId stackId;
 
   private List<ExecutionCommandVisitor> m_visitors = new ArrayList<>();
 
@@ -173,6 +175,30 @@ public class ActionExecutionContext {
   }
 
   /**
+   * Gets the stack to use for generating stack-associated values for a command.
+   * In some cases the cluster's stack is not the correct one to use, such as
+   * when distributing a repository.
+   *
+   * @return the stackId the stack to use when generating stack-specific content
+   *         for the command.
+   */
+  public StackId getStackId() {
+    return stackId;
+  }
+
+  /**
+   * Sets the stack to use for generating stack-associated values for a command.
+   * In some cases the cluster's stack is not the correct one to use, such as
+   * when distributing a repository.
+   *
+   * @param stackId
+   *          the stackId to use for stack-based properties on the command.
+   */
+  public void setStackId(StackId stackId) {
+    this.stackId = stackId;
+  }
+
+  /**
    * Adds a command visitor that will be invoked after a command is created.  Provides access
    * to the command.
    *

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
index 8f522b0..391daa9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
@@ -465,7 +465,10 @@ public class AmbariActionExecutionHelper {
 
       if (StringUtils.isNotBlank(serviceName)) {
         Service service = cluster.getService(serviceName);
-        addRepoInfoToHostLevelParams(service.getDesiredRepositoryVersion(), hostLevelParams, hostName);
+        addRepoInfoToHostLevelParams(actionContext, service.getDesiredRepositoryVersion(),
+            hostLevelParams, hostName);
+      } else {
+        addRepoInfoToHostLevelParams(actionContext, null, hostLevelParams, hostName);
       }
 
 
@@ -529,9 +532,19 @@ public class AmbariActionExecutionHelper {
   *
   * */
 
-  private void addRepoInfoToHostLevelParams(RepositoryVersionEntity repositoryVersion,
-      Map<String, String> hostLevelParams, String hostName) throws AmbariException {
+  private void addRepoInfoToHostLevelParams(ActionExecutionContext actionContext,
+      RepositoryVersionEntity repositoryVersion, Map<String, String> hostLevelParams,
+      String hostName) throws AmbariException {
+
+    // if the repo is null, see if any values from the context should go on the
+    // host params and then return
     if (null == repositoryVersion) {
+      if (null != actionContext.getStackId()) {
+        StackId stackId = actionContext.getStackId();
+        hostLevelParams.put(STACK_NAME, stackId.getStackName());
+        hostLevelParams.put(STACK_VERSION, stackId.getStackVersion());
+      }
+
       return;
     }
 
@@ -557,7 +570,10 @@ public class AmbariActionExecutionHelper {
 
     hostLevelParams.put(REPO_INFO, rootJsonObject.toString());
 
-    hostLevelParams.put(STACK_NAME, repositoryVersion.getStackName());
-    hostLevelParams.put(STACK_VERSION, repositoryVersion.getStackVersion());
+    // set the host level params if not already set by whoever is creating this command
+    if (!hostLevelParams.containsKey(STACK_NAME) || !hostLevelParams.containsKey(STACK_VERSION)) {
+      hostLevelParams.put(STACK_NAME, repositoryVersion.getStackName());
+      hostLevelParams.put(STACK_VERSION, repositoryVersion.getStackVersion());
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
index e93b2f7..37284be 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
@@ -36,7 +36,9 @@ import java.util.regex.Pattern;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.PropertyDependencyInfo;
+import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.ValueAttributesInfo;
 import org.apache.ambari.server.topology.AdvisedConfiguration;
 import org.apache.ambari.server.topology.Blueprint;
@@ -356,7 +358,7 @@ public class BlueprintConfigurationProcessor {
             final String originalValue = typeMap.get(propertyName);
             final String updatedValue =
               updater.updateForClusterCreate(propertyName, originalValue, clusterProps, clusterTopology);
-            
+
             if(updatedValue == null ) {
               continue;
             }
@@ -419,6 +421,7 @@ public class BlueprintConfigurationProcessor {
     }
 
     // Explicitly set any properties that are required but not currently provided in the stack definition.
+    setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
     setRetryConfiguration(clusterConfig, configTypesUpdated);
     setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
     addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
@@ -531,7 +534,7 @@ public class BlueprintConfigurationProcessor {
     try {
       String clusterName = clusterTopology.getAmbariContext().getClusterName(clusterTopology.getClusterId());
       Cluster cluster = clusterTopology.getAmbariContext().getController().getClusters().getCluster(clusterName);
-      authToLocalPerClusterMap = new HashMap<Long, Set<String>>();
+      authToLocalPerClusterMap = new HashMap<>();
       authToLocalPerClusterMap.put(Long.valueOf(clusterTopology.getClusterId()), clusterTopology.getAmbariContext().getController().getKerberosHelper().getKerberosDescriptor(cluster).getAllAuthToLocalProperties());
       } catch (AmbariException e) {
         LOG.error("Error while getting authToLocal properties. ", e);
@@ -2186,8 +2189,9 @@ public class BlueprintConfigurationProcessor {
       StringBuilder sb = new StringBuilder();
 
       Matcher m = REGEX_IN_BRACKETS.matcher(origValue);
-      if (m.matches())
+      if (m.matches()) {
         origValue = m.group("INNER");
+      }
 
       if (origValue != null) {
         sb.append("[");
@@ -2195,8 +2199,9 @@ public class BlueprintConfigurationProcessor {
         for (String value : origValue.split(",")) {
 
           m = REGEX_IN_QUOTES.matcher(value);
-          if (m.matches())
+          if (m.matches()) {
             value = m.group("INNER");
+          }
 
           if (!isFirst) {
             sb.append(",");
@@ -2230,6 +2235,7 @@ public class BlueprintConfigurationProcessor {
    */
   private static class OriginalValuePropertyUpdater implements PropertyUpdater {
 
+    @Override
     public String updateForClusterCreate(String propertyName,
                                          String origValue,
                                          Map<String, Map<String, String>> properties,
@@ -2950,6 +2956,49 @@ public class BlueprintConfigurationProcessor {
 
 
   /**
+   * Sets the read-only properties for stack features & tools, overriding
+   * anything provided in the blueprint.
+   *
+   * @param configuration
+   *          the configuration to update with values from the stack.
+   * @param configTypesUpdated
+   *          the list of configuration types updated (cluster-env will be added
+   *          to this).
+   * @throws ConfigurationTopologyException
+   */
+  private void setStackToolsAndFeatures(Configuration configuration, Set<String> configTypesUpdated)
+      throws ConfigurationTopologyException {
+    ConfigHelper configHelper = clusterTopology.getAmbariContext().getConfigHelper();
+    Stack stack = clusterTopology.getBlueprint().getStack();
+    String stackName = stack.getName();
+    String stackVersion = stack.getVersion();
+
+    StackId stackId = new StackId(stackName, stackVersion);
+
+    Set<String> properties = Sets.newHashSet(ConfigHelper.CLUSTER_ENV_STACK_NAME_PROPERTY,
+        ConfigHelper.CLUSTER_ENV_STACK_ROOT_PROPERTY, ConfigHelper.CLUSTER_ENV_STACK_TOOLS_PROPERTY,
+        ConfigHelper.CLUSTER_ENV_STACK_FEATURES_PROPERTY);
+
+    try {
+      Map<String, Map<String, String>> defaultStackProperties = configHelper.getDefaultStackProperties(stackId);
+      Map<String,String> clusterEnvDefaultProperties = defaultStackProperties.get(CLUSTER_ENV_CONFIG_TYPE_NAME);
+
+      for( String property : properties ){
+        if (defaultStackProperties.containsKey(property)) {
+          configuration.setProperty(CLUSTER_ENV_CONFIG_TYPE_NAME, property,
+              clusterEnvDefaultProperties.get(property));
+
+          // make sure to include the configuration type as being updated
+          configTypesUpdated.add(CLUSTER_ENV_CONFIG_TYPE_NAME);
+        }
+      }
+    } catch( AmbariException ambariException ){
+      throw new ConfigurationTopologyException("Unable to retrieve the stack tools and features",
+          ambariException);
+    }
+  }
+
+  /**
    * Ensure that the specified property exists.
    * If not, set a default value.
    *
@@ -3099,7 +3148,7 @@ public class BlueprintConfigurationProcessor {
 
     @Override
     public boolean isPropertyIncluded(String propertyName, String propertyValue, String configType, ClusterTopology topology) {
-      return !(this.propertyConfigType.equals(configType) &&
+      return !(propertyConfigType.equals(configType) &&
              this.propertyName.equals(propertyName));
     }
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
index 93c02be..c4fce8a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
@@ -67,11 +67,13 @@ import org.apache.ambari.server.security.authorization.RoleAuthorization;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.ComponentInfo;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.RepositoryType;
 import org.apache.ambari.server.state.RepositoryVersionState;
-import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.ServiceOsSpecific;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.repository.VersionDefinitionXml;
 import org.apache.ambari.server.state.stack.upgrade.RepositoryVersionHelper;
@@ -83,6 +85,7 @@ import org.apache.commons.lang.math.NumberUtils;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
+import com.google.gson.Gson;
 import com.google.inject.Inject;
 import com.google.inject.Provider;
 import com.google.inject.persist.Transactional;
@@ -171,12 +174,20 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
   @Inject
   private static RepositoryVersionHelper repoVersionHelper;
 
-
+  @Inject
+  private static Gson gson;
 
   @Inject
   private static Provider<Clusters> clusters;
 
   /**
+   * Used for updating the existing stack tools with those of the stack being
+   * distributed.
+   */
+  @Inject
+  private static Provider<ConfigHelper> configHelperProvider;
+
+  /**
    * Constructor.
    */
   public ClusterStackVersionResourceProvider(
@@ -287,8 +298,6 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
 
     String clName;
     final String desiredRepoVersion;
-    String stackName;
-    String stackVersion;
 
     Map<String, Object> propertyMap = iterator.next();
 
@@ -327,30 +336,30 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
           cluster.getClusterName(), entity.getDirection().getText(false)));
     }
 
-    Set<StackId> stackIds = new HashSet<>();
-    if (propertyMap.containsKey(CLUSTER_STACK_VERSION_STACK_PROPERTY_ID) &&
-            propertyMap.containsKey(CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID)) {
-      stackName = (String) propertyMap.get(CLUSTER_STACK_VERSION_STACK_PROPERTY_ID);
-      stackVersion = (String) propertyMap.get(CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID);
-      StackId stackId = new StackId(stackName, stackVersion);
-      if (! ami.isSupportedStack(stackName, stackVersion)) {
-        throw new NoSuchParentResourceException(String.format("Stack %s is not supported",
-                stackId));
-      }
-      stackIds.add(stackId);
-    } else { // Using stack that is current for cluster
-      for (Service service : cluster.getServices().values()) {
-        stackIds.add(service.getDesiredStackId());
-      }
+    String stackName = (String) propertyMap.get(CLUSTER_STACK_VERSION_STACK_PROPERTY_ID);
+    String stackVersion = (String) propertyMap.get(CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID);
+    if (StringUtils.isBlank(stackName) || StringUtils.isBlank(stackVersion)) {
+      String message = String.format(
+          "Both the %s and %s properties are required when distributing a new stack",
+          CLUSTER_STACK_VERSION_STACK_PROPERTY_ID, CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID);
+
+      throw new SystemException(message);
     }
 
-    if (stackIds.size() > 1) {
-      throw new SystemException("Could not determine stack to add out of " + StringUtils.join(stackIds, ','));
+    StackId stackId = new StackId(stackName, stackVersion);
+
+    if (!ami.isSupportedStack(stackName, stackVersion)) {
+      throw new NoSuchParentResourceException(String.format("Stack %s is not supported", stackId));
     }
 
-    StackId stackId = stackIds.iterator().next();
-    stackName = stackId.getStackName();
-    stackVersion = stackId.getStackVersion();
+    // bootstrap the stack tools if necessary for the stack which is being
+    // distributed
+    try {
+      bootstrapStackTools(stackId, cluster);
+    } catch (AmbariException ambariException) {
+      throw new SystemException("Unable to modify stack tools for new stack being distributed",
+          ambariException);
+    }
 
     RepositoryVersionEntity repoVersionEntity = repositoryVersionDAO.findByStackAndVersion(
         stackId, desiredRepoVersion);
@@ -580,6 +589,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
     }
 
     // determine packages for all services that are installed on host
+    List<ServiceOsSpecific.Package> packages = new ArrayList<>();
     Set<String> servicesOnHost = new HashSet<>();
     List<ServiceComponentHost> components = cluster.getServiceComponentHosts(host.getHostName());
     for (ServiceComponentHost component : components) {
@@ -600,16 +610,15 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
     RequestResourceFilter filter = new RequestResourceFilter(null, null,
             Collections.singletonList(host.getHostName()));
 
-    ActionExecutionContext actionContext = new ActionExecutionContext(
-            cluster.getClusterName(), INSTALL_PACKAGES_ACTION,
-            Collections.singletonList(filter),
-            roleParams);
+    ActionExecutionContext actionContext = new ActionExecutionContext(cluster.getClusterName(),
+        INSTALL_PACKAGES_ACTION, Collections.singletonList(filter), roleParams);
+
+    actionContext.setStackId(stackId);
     actionContext.setTimeout(Short.valueOf(configuration.getDefaultAgentTaskTimeout(true)));
 
     repoVersionHelper.addCommandRepository(actionContext, osFamily, repoVersion, repoInfo);
 
     return actionContext;
-
   }
 
 
@@ -698,4 +707,100 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
   }
 
 
+  /**
+   * Ensures that the stack tools and stack features are set on
+   * {@link ConfigHelper#CLUSTER_ENV} for the stack of the repository being
+   * distributed. This step ensures that the new repository can be distributed
+   * with the correct tools.
+   * <p/>
+   * If the cluster's current stack name matches that of the new stack or the
+   * new stack's tools are already added in the configuration, then this method
+   * will not change anything.
+   *
+   * @param stackId
+   *          the stack of the repository being distributed (not {@code null}).
+   * @param cluster
+   *          the cluster the new stack/repo is being distributed for (not
+   *          {@code null}).
+   * @throws AmbariException
+   */
+  private void bootstrapStackTools(StackId stackId, Cluster cluster) throws AmbariException {
+    // if the stack name is the same as the cluster's current stack name, then
+    // there's no work to do
+    if (StringUtils.equals(stackId.getStackName(),
+        cluster.getCurrentStackVersion().getStackName())) {
+      return;
+    }
+
+    ConfigHelper configHelper = configHelperProvider.get();
+
+    // get the stack tools/features for the stack being distributed
+    Map<String, Map<String, String>> defaultStackConfigurationsByType = configHelper.getDefaultStackProperties(stackId);
+
+    Map<String, String> clusterEnvDefaults = defaultStackConfigurationsByType.get(
+        ConfigHelper.CLUSTER_ENV);
+
+    Config clusterEnv = cluster.getDesiredConfigByType(ConfigHelper.CLUSTER_ENV);
+    Map<String, String> clusterEnvProperties = clusterEnv.getProperties();
+
+    // the 3 properties we need to check and update
+    Set<String> properties = Sets.newHashSet(ConfigHelper.CLUSTER_ENV_STACK_ROOT_PROPERTY,
+        ConfigHelper.CLUSTER_ENV_STACK_TOOLS_PROPERTY,
+        ConfigHelper.CLUSTER_ENV_STACK_FEATURES_PROPERTY);
+
+    // any updates are stored here and merged into the existing config type
+    Map<String, String> updatedProperties = new HashMap<>();
+
+    for (String property : properties) {
+      // determine if the property exists in the stack being distributed (it
+      // kind of has to, but we'll be safe if it's not found)
+      String newStackDefaultJson = clusterEnvDefaults.get(property);
+      if (StringUtils.isBlank(newStackDefaultJson)) {
+        continue;
+      }
+
+      String existingPropertyJson = clusterEnvProperties.get(property);
+
+      // if the stack tools/features property doesn't exist, then just set the
+      // one from the new stack
+      if (StringUtils.isBlank(existingPropertyJson)) {
+        updatedProperties.put(property, newStackDefaultJson);
+        continue;
+      }
+
+      // now is the hard part - we need to check to see if the new stack tools
+      // exists alongside the current tools and if it doesn't, then add the new
+      // tools in
+      final Map<String, Object> existingJson;
+      final Map<String, ?> newStackJsonAsObject;
+      if (StringUtils.equals(property, ConfigHelper.CLUSTER_ENV_STACK_ROOT_PROPERTY)) {
+        existingJson = gson.<Map<String, Object>> fromJson(existingPropertyJson, Map.class);
+        newStackJsonAsObject = gson.<Map<String, String>> fromJson(newStackDefaultJson, Map.class);
+      } else {
+        existingJson = gson.<Map<String, Object>> fromJson(existingPropertyJson,
+            Map.class);
+
+        newStackJsonAsObject = gson.<Map<String, Map<Object, Object>>> fromJson(newStackDefaultJson,
+            Map.class);
+      }
+
+      if (existingJson.keySet().contains(stackId.getStackName())) {
+        continue;
+      }
+
+      existingJson.put(stackId.getStackName(), newStackJsonAsObject.get(stackId.getStackName()));
+
+      String newJson = gson.toJson(existingJson);
+      updatedProperties.put(property, newJson);
+    }
+
+    if (!updatedProperties.isEmpty()) {
+      AmbariManagementController amc = getManagementController();
+      String serviceNote = String.format(
+          "Adding stack tools for %s while distributing a new repository", stackId.toString());
+
+      configHelper.updateConfigType(cluster, stackId, amc, clusterEnv.getType(), updatedProperties,
+          null, amc.getAuthName(), serviceNote);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
index 9f75bf9..a3a676d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
@@ -88,8 +88,10 @@ public class ConfigHelper {
   public static final String CLUSTER_ENV_RETRY_COMMANDS = "commands_to_retry";
   public static final String CLUSTER_ENV_RETRY_MAX_TIME_IN_SEC = "command_retry_max_time_in_sec";
   public static final String COMMAND_RETRY_MAX_TIME_IN_SEC_DEFAULT = "600";
+  public static final String CLUSTER_ENV_STACK_NAME_PROPERTY = "stack_name";
   public static final String CLUSTER_ENV_STACK_FEATURES_PROPERTY = "stack_features";
   public static final String CLUSTER_ENV_STACK_TOOLS_PROPERTY = "stack_tools";
+  public static final String CLUSTER_ENV_STACK_ROOT_PROPERTY = "stack_root";
 
   public static final String HTTP_ONLY = "HTTP_ONLY";
   public static final String HTTPS_ONLY = "HTTPS_ONLY";
@@ -1148,6 +1150,36 @@ public class ConfigHelper {
    *
    * @param stack
    *          the stack to pull stack-values from (not {@code null})
+   * @return a mapping of configuration type to map of key/value pairs for the
+   *         default configurations.
+   * @throws AmbariException
+   */
+  public Map<String, Map<String, String>> getDefaultStackProperties(StackId stack)
+      throws AmbariException {
+    Map<String, Map<String, String>> defaultPropertiesByType = new HashMap<>();
+
+    // populate the stack (non-service related) properties
+    Set<org.apache.ambari.server.state.PropertyInfo> stackConfigurationProperties = ambariMetaInfo.getStackProperties(
+        stack.getStackName(), stack.getStackVersion());
+
+    for (PropertyInfo stackDefaultProperty : stackConfigurationProperties) {
+      String type = ConfigHelper.fileNameToConfigType(stackDefaultProperty.getFilename());
+
+      if (!defaultPropertiesByType.containsKey(type)) {
+        defaultPropertiesByType.put(type, new HashMap<String, String>());
+      }
+
+      defaultPropertiesByType.get(type).put(stackDefaultProperty.getName(),
+          stackDefaultProperty.getValue());
+    }
+
+    return defaultPropertiesByType;
+  }
+
+  /**
+   *
+   * @param stack
+   *          the stack to pull stack-values from (not {@code null})
    * @param serviceName
    *          the service name {@code null}).
    * @return a mapping of configuration type to map of key/value pairs for the

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
index 0467b9b..9b64edc 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
@@ -69,6 +69,7 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.ConfigFactory;
+import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.SecurityType;
@@ -80,6 +81,7 @@ import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Sets;
+import com.google.inject.Provider;
 
 
 /**
@@ -100,6 +102,12 @@ public class AmbariContext {
   @Inject
   ConfigFactory configFactory;
 
+  /**
+   * Used for getting configuration property values from stack and services.
+   */
+  @Inject
+  private Provider<ConfigHelper> configHelper;
+
   private static AmbariManagementController controller;
   private static ClusterController clusterController;
   //todo: task id's.  Use existing mechanism for getting next task id sequence
@@ -674,6 +682,16 @@ public class AmbariContext {
     return String.format("%s:%s", bpName, hostGroupName);
   }
 
+  /**
+   * Gets an instance of {@link ConfigHelper} for classes which are not
+   * dependency injected.
+   *
+   * @return a {@link ConfigHelper} instance.
+   */
+  public ConfigHelper getConfigHelper() {
+    return configHelper.get();
+  }
+
   private synchronized HostResourceProvider getHostResourceProvider() {
     if (hostResourceProvider == null) {
       hostResourceProvider = (HostResourceProvider)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
index 74f8f35..fa3aea3 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
@@ -18,10 +18,20 @@
 package org.apache.ambari.server.upgrade;
 
 import java.sql.SQLException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigHelper;
+import org.apache.ambari.server.state.PropertyInfo;
+import org.apache.hadoop.metrics2.sink.relocated.commons.lang.StringUtils;
 
+import com.google.common.collect.Sets;
 import com.google.inject.Inject;
 import com.google.inject.Injector;
 
@@ -33,6 +43,8 @@ public class UpgradeCatalog252 extends AbstractUpgradeCatalog {
   static final String CLUSTERCONFIG_TABLE = "clusterconfig";
   static final String SERVICE_DELETED_COLUMN = "service_deleted";
 
+  private static final String CLUSTER_ENV = "cluster-env";
+
   /**
    * Constructor.
    *
@@ -79,6 +91,7 @@ public class UpgradeCatalog252 extends AbstractUpgradeCatalog {
    */
   @Override
   protected void executeDMLUpdates() throws AmbariException, SQLException {
+    resetStackToolsAndFeatures();
   }
 
   /**
@@ -91,4 +104,52 @@ public class UpgradeCatalog252 extends AbstractUpgradeCatalog {
     dbAccessor.addColumn(CLUSTERCONFIG_TABLE,
         new DBColumnInfo(SERVICE_DELETED_COLUMN, Short.class, null, 0, false));
   }
+
+  /**
+   * Resets the following properties in {@code cluster-env} to their new
+   * defaults:
+   * <ul>
+   * <li>stack_root
+   * <li>stack_tools
+   * <li>stack_features
+   * <ul>
+   *
+   * @throws AmbariException
+   */
+  private void resetStackToolsAndFeatures() throws AmbariException {
+    Set<String> propertiesToReset = Sets.newHashSet("stack_tools", "stack_features", "stack_root");
+
+    Clusters clusters = injector.getInstance(Clusters.class);
+    ConfigHelper configHelper = injector.getInstance(ConfigHelper.class);
+
+    Map<String, Cluster> clusterMap = clusters.getClusters();
+    for (Cluster cluster : clusterMap.values()) {
+      Config clusterEnv = cluster.getDesiredConfigByType(CLUSTER_ENV);
+      if (null == clusterEnv) {
+        continue;
+      }
+
+      Map<String, String> newStackProperties = new HashMap<>();
+      Set<PropertyInfo> stackProperties = configHelper.getStackProperties(cluster);
+      if (null == stackProperties) {
+        continue;
+      }
+
+      for (PropertyInfo propertyInfo : stackProperties) {
+        String fileName = propertyInfo.getFilename();
+        if (StringUtils.isEmpty(fileName)) {
+          continue;
+        }
+
+        if (StringUtils.equals(ConfigHelper.fileNameToConfigType(fileName), CLUSTER_ENV)) {
+          String stackPropertyName = propertyInfo.getName();
+          if (propertiesToReset.contains(stackPropertyName)) {
+            newStackProperties.put(stackPropertyName, propertyInfo.getValue());
+          }
+        }
+      }
+
+      updateConfigurationPropertiesForCluster(cluster, CLUSTER_ENV, newStackProperties, true, false);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_metastore.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_metastore.py
index 32df7d3..5b4fd68 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_metastore.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_metastore.py
@@ -27,6 +27,7 @@ import logging
 from resource_management.core import global_lock
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions import stack_tools
 from resource_management.core.resources import Execute
 from resource_management.core.signal_utils import TerminateStrategy
 from ambari_commons.os_check import OSConst
@@ -56,6 +57,7 @@ SMOKEUSER_PRINCIPAL_DEFAULT = 'ambari-qa@EXAMPLE.COM'
 SMOKEUSER_SCRIPT_PARAM_KEY = 'default.smoke.user'
 SMOKEUSER_DEFAULT = 'ambari-qa'
 
+STACK_NAME = '{{cluster-env/stack_name}}'
 STACK_ROOT = '{{cluster-env/stack_root}}'
 
 HIVE_CONF_DIR_LEGACY = '/etc/hive/conf.server'
@@ -78,7 +80,7 @@ def get_tokens():
   """
   return (SECURITY_ENABLED_KEY,SMOKEUSER_KEYTAB_KEY,SMOKEUSER_PRINCIPAL_KEY,
     HIVE_METASTORE_URIS_KEY, SMOKEUSER_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY,
-    STACK_ROOT)
+    STACK_NAME, STACK_ROOT)
 
 @OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
 def get_tokens():
@@ -175,9 +177,10 @@ def execute(configurations={}, parameters={}, host_name=None):
     bin_dir = HIVE_BIN_DIR_LEGACY
 
 
-    if STACK_ROOT in configurations:
-      hive_conf_dir = configurations[STACK_ROOT] + format("/current/hive-metastore/conf")
-      hive_bin_dir = configurations[STACK_ROOT] + format("/current/hive-metastore/bin")
+    if STACK_NAME in configurations and STACK_ROOT in configurations:
+      stack_root = stack_tools.get_stack_root(configurations[STACK_NAME], configurations[STACK_ROOT])
+      hive_conf_dir = stack_root + format("/current/hive-metastore/conf")
+      hive_bin_dir = stack_root + format("/current/hive-metastore/bin")
 
       if os.path.exists(hive_conf_dir):
         conf_dir = hive_conf_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py
index 98d1899..e46c896 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py
@@ -26,7 +26,7 @@ import subprocess
 
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions import get_kinit_path
-from ambari_commons.os_check import OSConst
+from resource_management.libraries.functions import stack_tools
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 from resource_management.core import shell
 from resource_management.core.resources import Execute
@@ -58,6 +58,7 @@ HIVE_AUTHENTICATION_DEFAULT = 'NOSASL'
 HIVE_USER_KEY = '{{hive-env/hive_user}}'
 HIVE_USER_DEFAULT = 'default.smoke.user'
 
+STACK_NAME = '{{cluster-env/stack_name}}'
 STACK_ROOT = '{{cluster-env/stack_root}}'
 STACK_ROOT_DEFAULT = Script.get_stack_root()
 
@@ -88,7 +89,7 @@ def get_tokens():
   to build the dictionary passed into execute
   """
   return (SECURITY_ENABLED_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY, HIVE_PRINCIPAL_KEY, HIVE_PRINCIPAL_KEYTAB_KEY,
-          HIVE_USER_KEY, STACK_ROOT, LLAP_APP_NAME_KEY)
+          HIVE_USER_KEY, STACK_NAME, STACK_ROOT, LLAP_APP_NAME_KEY)
 
 
 @OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
@@ -159,8 +160,11 @@ def execute(configurations={}, parameters={}, host_name=None):
 
 
     start_time = time.time()
-    if STACK_ROOT in configurations:
-      llap_status_cmd = configurations[STACK_ROOT] + format("/current/hive-server2-hive2/bin/hive --service llapstatus --name {llap_app_name}  --findAppTimeout {LLAP_APP_STATUS_CMD_TIMEOUT}")
+    if STACK_NAME in configurations and STACK_ROOT in configurations:
+      stack_root = stack_tools.get_stack_root(configurations[STACK_NAME],
+        configurations[STACK_ROOT])
+
+      llap_status_cmd = stack_root + format("/current/hive-server2-hive2/bin/hive --service llapstatus --name {llap_app_name}  --findAppTimeout {LLAP_APP_STATUS_CMD_TIMEOUT}")
     else:
       llap_status_cmd = STACK_ROOT_DEFAULT + format("/current/hive-server2-hive2/bin/hive --service llapstatus --name {llap_app_name} --findAppTimeout {LLAP_APP_STATUS_CMD_TIMEOUT}")
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py
index 0e9fe74..54eef18 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py
@@ -26,6 +26,7 @@ from resource_management.core.resources import Execute
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions import get_kinit_path
 from resource_management.libraries.functions import get_klist_path
+from resource_management.libraries.functions import stack_tools
 from ambari_commons.os_check import OSConst, OSCheck
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 from urlparse import urlparse
@@ -66,6 +67,7 @@ USER_PRINCIPAL_DEFAULT = 'oozie@EXAMPLE.COM'
 # default user
 USER_DEFAULT = 'oozie'
 
+STACK_NAME_KEY = '{{cluster-env/stack_name}}'
 STACK_ROOT_KEY = '{{cluster-env/stack_root}}'
 STACK_ROOT_DEFAULT = '/usr/hdp'
 
@@ -86,7 +88,7 @@ def get_tokens():
   to build the dictionary passed into execute
   """
   return (OOZIE_URL_KEY, USER_PRINCIPAL_KEY, SECURITY_ENABLED, USER_KEYTAB_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY,
-          USER_KEY, OOZIE_HTTPS_PORT, OOZIE_ENV_CONTENT, STACK_ROOT_KEY)
+          USER_KEY, OOZIE_HTTPS_PORT, OOZIE_ENV_CONTENT, STACK_NAME_KEY, STACK_ROOT_KEY)
 
 @OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
 def get_check_command(oozie_url, host_name, configurations):
@@ -158,8 +160,8 @@ def get_check_command(oozie_url, host_name, configurations, parameters, only_kin
 
   # Configure stack root
   stack_root = STACK_ROOT_DEFAULT
-  if STACK_ROOT_KEY in configurations:
-    stack_root = configurations[STACK_ROOT_KEY].lower()
+  if STACK_NAME_KEY in configurations and STACK_ROOT_KEY in configurations:
+    stack_root = stack_tools.get_stack_root(configurations[STACK_NAME_KEY], configurations[STACK_ROOT_KEY]).lower()
 
   # oozie configuration directory using a symlink
   oozie_config_directory = OOZIE_CONF_DIR.replace(STACK_ROOT_PATTERN, stack_root)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/host_scripts/alert_disk_space.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/host_scripts/alert_disk_space.py b/ambari-server/src/main/resources/host_scripts/alert_disk_space.py
index 4c5834f..f3c6406 100644
--- a/ambari-server/src/main/resources/host_scripts/alert_disk_space.py
+++ b/ambari-server/src/main/resources/host_scripts/alert_disk_space.py
@@ -23,6 +23,7 @@ import os
 import platform
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 from ambari_commons import OSConst
+from resource_management.libraries.functions import stack_tools
 
 DiskInfo = collections.namedtuple('DiskInfo', 'total used free path')
 
@@ -36,6 +37,7 @@ MIN_FREE_SPACE_DEFAULT = 5000000000L
 PERCENT_USED_WARNING_DEFAULT = 50
 PERCENT_USED_CRITICAL_DEFAULT = 80
 
+STACK_NAME = '{{cluster-env/stack_name}}'
 STACK_ROOT = '{{cluster-env/stack_root}}'
 
 def get_tokens():
@@ -43,7 +45,7 @@ def get_tokens():
   Returns a tuple of tokens in the format {{site/property}} that will be used
   to build the dictionary passed into execute
   """
-  return (STACK_ROOT, )
+  return (STACK_NAME, STACK_ROOT)
 
 
 @OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
@@ -64,10 +66,10 @@ def execute(configurations={}, parameters={}, host_name=None):
   if configurations is None:
     return (('UNKNOWN', ['There were no configurations supplied to the script.']))
 
-  if not STACK_ROOT in configurations:
-    return (('STACK_ROOT', ['cluster-env/stack_root is not specified']))
+  if not STACK_NAME in configurations or not STACK_ROOT in configurations:
+    return (('STACK_ROOT', ['cluster-env/stack_name and cluster-env/stack_root are required']))
 
-  path = configurations[STACK_ROOT]
+  path = stack_tools.get_stack_root(configurations[STACK_NAME], configurations[STACK_ROOT])
 
   try:
     disk_usage = _get_disk_usage(path)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/host_scripts/alert_version_select.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/host_scripts/alert_version_select.py b/ambari-server/src/main/resources/host_scripts/alert_version_select.py
index 0ce79e7..f54ccad 100644
--- a/ambari-server/src/main/resources/host_scripts/alert_version_select.py
+++ b/ambari-server/src/main/resources/host_scripts/alert_version_select.py
@@ -31,6 +31,7 @@ RESULT_STATE_WARNING = 'WARNING'
 RESULT_STATE_CRITICAL = 'CRITICAL'
 RESULT_STATE_UNKNOWN = 'UNKNOWN'
 
+STACK_NAME = '{{cluster-env/stack_name}}'
 STACK_TOOLS = '{{cluster-env/stack_tools}}'
 
 
@@ -42,7 +43,7 @@ def get_tokens():
   Returns a tuple of tokens in the format {{site/property}} that will be used
   to build the dictionary passed into execute
   """
-  return (STACK_TOOLS,)
+  return (STACK_NAME, STACK_TOOLS)
 
 
 def execute(configurations={}, parameters={}, host_name=None):
@@ -65,8 +66,10 @@ def execute(configurations={}, parameters={}, host_name=None):
     if STACK_TOOLS not in configurations:
       return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(STACK_TOOLS)])
 
+    stack_name = Script.get_stack_name()
+
     # Of the form,
-    # { "stack_selector": ["hdp-select", "/usr/bin/hdp-select", "hdp-select"], "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"] }
+    # { "HDP" : { "stack_selector": ["hdp-select", "/usr/bin/hdp-select", "hdp-select"], "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"] } }
     stack_tools_str = configurations[STACK_TOOLS]
 
     if stack_tools_str is None:
@@ -75,6 +78,7 @@ def execute(configurations={}, parameters={}, host_name=None):
     distro_select = "unknown-distro-select"
     try:
       stack_tools = json.loads(stack_tools_str)
+      stack_tools = stack_tools[stack_name]
       distro_select = stack_tools["stack_selector"][0]
     except:
       pass
@@ -87,18 +91,18 @@ def execute(configurations={}, parameters={}, host_name=None):
       (code, out, versions) = unsafe_get_stack_versions()
 
       if code == 0:
-        msg.append("Ok. {0}".format(distro_select))
+        msg.append("{0} ".format(distro_select))
         if versions is not None and type(versions) is list and len(versions) > 0:
-          msg.append("Versions: {0}".format(", ".join(versions)))
+          msg.append("reported the following versions: {0}".format(", ".join(versions)))
         return (RESULT_STATE_OK, ["\n".join(msg)])
       else:
-        msg.append("Failed, check dir {0} for unexpected contents.".format(stack_root_dir))
+        msg.append("{0} could not properly read {1}. Check this directory for unexpected contents.".format(distro_select, stack_root_dir))
         if out is not None:
           msg.append(out)
 
         return (RESULT_STATE_CRITICAL, ["\n".join(msg)])
     else:
-      msg.append("Ok. No stack root {0} to check.".format(stack_root_dir))
+      msg.append("No stack root {0} to check.".format(stack_root_dir))
       return (RESULT_STATE_OK, ["\n".join(msg)])
   except Exception, e:
     return (RESULT_STATE_CRITICAL, [e.message])

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
index f7d5de5..e6ec285 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
@@ -220,6 +220,18 @@ gpgcheck=0</value>
     </value-attributes>
     <on-ambari-upgrade add="true"/>
   </property>
+  <!-- Define stack_name property in the base stack. DO NOT override this property for each stack version -->
+  <property>
+    <name>stack_name</name>
+    <value>HDP</value>
+    <description>The name of the stack.</description>
+    <value-attributes>
+      <read-only>true</read-only>
+      <overridable>false</overridable>
+      <visible>false</visible>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
   <!-- Define stack_tools property in the base stack. DO NOT override this property for each stack version -->
   <property>
     <name>stack_tools</name>
@@ -252,8 +264,8 @@ gpgcheck=0</value>
   </property>
   <property>
     <name>stack_root</name>
-    <value>/usr/hdp</value>
-    <description>Stack root folder</description>
+    <value>{"HDP":"/usr/hdp"}</value>
+    <description>JSON which defines the stack root by stack name</description>
     <value-attributes>
       <read-only>true</read-only>
       <overridable>false</overridable>

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
index 878645b..31cf0c8 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
@@ -1,427 +1,429 @@
 {
-  "stack_features": [
-    {
-      "name": "snappy",
-      "description": "Snappy compressor/decompressor support",
-      "min_version": "2.0.0.0",
-      "max_version": "2.2.0.0"
-    },
-    {
-      "name": "lzo",
-      "description": "LZO libraries support",
-      "min_version": "2.2.1.0"
-    },
-    {
-      "name": "express_upgrade",
-      "description": "Express upgrade support",
-      "min_version": "2.1.0.0"
-    },
-    {
-      "name": "rolling_upgrade",
-      "description": "Rolling upgrade support",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "kafka_acl_migration_support",
-      "description": "ACL migration support",
-      "min_version": "2.3.4.0"
-    },
-    {
-      "name": "secure_zookeeper",
-      "description": "Protect ZNodes with SASL acl in secure clusters",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "config_versioning",
-      "description": "Configurable versions support",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "datanode_non_root",
-      "description": "DataNode running as non-root support (AMBARI-7615)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "remove_ranger_hdfs_plugin_env",
-      "description": "HDFS removes Ranger env files (AMBARI-14299)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "ranger",
-      "description": "Ranger Service support",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "ranger_tagsync_component",
-      "description": "Ranger Tagsync component support (AMBARI-14383)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "phoenix",
-      "description": "Phoenix Service support",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "nfs",
-      "description": "NFS support",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "tez_for_spark",
-      "description": "Tez dependency for Spark",
-      "min_version": "2.2.0.0",
-      "max_version": "2.3.0.0"
-    },
-    {
-      "name": "timeline_state_store",
-      "description": "Yarn application timeline-service supports state store property (AMBARI-11442)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "copy_tarball_to_hdfs",
-      "description": "Copy tarball to HDFS support (AMBARI-12113)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "spark_16plus",
-      "description": "Spark 1.6+",
-      "min_version": "2.4.0.0"
-    },
-    {
-      "name": "spark_thriftserver",
-      "description": "Spark Thrift Server",
-      "min_version": "2.3.2.0"
-    },
-    {
-      "name": "storm_kerberos",
-      "description": "Storm Kerberos support (AMBARI-7570)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "storm_ams",
-      "description": "Storm AMS integration (AMBARI-10710)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "create_kafka_broker_id",
-      "description": "Ambari should create Kafka Broker Id (AMBARI-12678)",
-      "min_version": "2.2.0.0",
-      "max_version": "2.3.0.0"
-    },
-    {
-      "name": "kafka_listeners",
-      "description": "Kafka listeners (AMBARI-10984)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "kafka_kerberos",
-      "description": "Kafka Kerberos support (AMBARI-10984)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "pig_on_tez",
-      "description": "Pig on Tez support (AMBARI-7863)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "ranger_usersync_non_root",
-      "description": "Ranger Usersync as non-root user (AMBARI-10416)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "ranger_audit_db_support",
-      "description": "Ranger Audit to DB support",
-      "min_version": "2.2.0.0",
-      "max_version": "2.4.99.99"
-    },
-    {
-      "name": "accumulo_kerberos_user_auth",
-      "description": "Accumulo Kerberos User Auth (AMBARI-10163)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "knox_versioned_data_dir",
-      "description": "Use versioned data dir for Knox (AMBARI-13164)",
-      "min_version": "2.3.2.0"
-    },
-    {
-      "name": "knox_sso_topology",
-      "description": "Knox SSO Topology support (AMBARI-13975)",
-      "min_version": "2.3.8.0"
-    },
-    {
-      "name": "atlas_rolling_upgrade",
-      "description": "Rolling upgrade support for Atlas",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "oozie_admin_user",
-      "description": "Oozie install user as an Oozie admin user (AMBARI-7976)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "oozie_create_hive_tez_configs",
-      "description": "Oozie create configs for Ambari Hive and Tez deployments (AMBARI-8074)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "oozie_setup_shared_lib",
-      "description": "Oozie setup tools used to shared Oozie lib to HDFS (AMBARI-7240)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "oozie_host_kerberos",
-      "description": "Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)",
-      "min_version": "2.0.0.0"
-    },
-    {
-      "name": "falcon_extensions",
-      "description": "Falcon Extension",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "hive_metastore_upgrade_schema",
-      "description": "Hive metastore upgrade schema support (AMBARI-11176)",
-      "min_version": "2.3.0.0"
-     },
-    {
-      "name": "hive_server_interactive",
-      "description": "Hive server interactive support (AMBARI-15573)",
-      "min_version": "2.5.0.0"
-     },
-    {
-      "name": "hive_webhcat_specific_configs",
-      "description": "Hive webhcat specific configurations support (AMBARI-12364)",
-      "min_version": "2.3.0.0"
-     },
-    {
-      "name": "hive_purge_table",
-      "description": "Hive purge table support (AMBARI-12260)",
-      "min_version": "2.3.0.0"
-     },
-    {
-      "name": "hive_server2_kerberized_env",
-      "description": "Hive server2 working on kerberized environment (AMBARI-13749)",
-      "min_version": "2.2.3.0",
-      "max_version": "2.2.5.0"
-     },
-    {
-      "name": "hive_env_heapsize",
-      "description": "Hive heapsize property defined in hive-env (AMBARI-12801)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "ranger_kms_hsm_support",
-      "description": "Ranger KMS HSM support (AMBARI-15752)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_log4j_support",
-      "description": "Ranger supporting log-4j properties (AMBARI-15681)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_kerberos_support",
-      "description": "Ranger Kerberos support",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "hive_metastore_site_support",
-      "description": "Hive Metastore site support",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_usersync_password_jceks",
-      "description": "Saving Ranger Usersync credentials in jceks",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_install_infra_client",
-      "description": "Ambari Infra Service support",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "falcon_atlas_support_2_3",
-      "description": "Falcon Atlas integration support for 2.3 stack",
-      "min_version": "2.3.99.0",
-      "max_version": "2.4.0.0"
-    },
-    {
-      "name": "falcon_atlas_support",
-      "description": "Falcon Atlas integration",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "hbase_home_directory",
-      "description": "Hbase home directory in HDFS needed for HBASE backup",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "spark_livy",
-      "description": "Livy as slave component of spark",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "spark_livy2",
-      "description": "Livy as slave component of spark",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "atlas_ranger_plugin_support",
-      "description": "Atlas Ranger plugin support",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "atlas_conf_dir_in_path",
-      "description": "Prepend the Atlas conf dir (/etc/atlas/conf) to the classpath of Storm and Falcon",
-      "min_version": "2.3.0.0",
-      "max_version": "2.4.99.99"
-    },
-    {
-      "name": "atlas_upgrade_support",
-      "description": "Atlas supports express and rolling upgrades",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "atlas_hook_support",
-      "description": "Atlas support for hooks in Hive, Storm, Falcon, and Sqoop",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_pid_support",
-      "description": "Ranger Service support pid generation AMBARI-16756",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_kms_pid_support",
-      "description": "Ranger KMS Service support pid generation",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_admin_password_change",
-      "description": "Allow ranger admin credentials to be specified during cluster creation (AMBARI-17000)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_setup_db_on_start",
-      "description": "Allows setup of ranger db and java patches to be called multiple times on each START",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "storm_metrics_apache_classes",
-      "description": "Metrics sink for Storm that uses Apache class names",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "spark_java_opts_support",
-      "description": "Allow Spark to generate java-opts file",
-      "min_version": "2.2.0.0",
-      "max_version": "2.4.0.0"
-    },
-    {
-      "name": "atlas_hbase_setup",
-      "description": "Use script to create Atlas tables in Hbase and set permissions for Atlas user.",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_hive_plugin_jdbc_url",
-      "description": "Handle Ranger hive repo config jdbc url change for stack 2.5 (AMBARI-18386)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "zkfc_version_advertised",
-      "description": "ZKFC advertise version",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "phoenix_core_hdfs_site_required",
-      "description": "HDFS and CORE site required for Phoenix",
-      "max_version": "2.5.9.9"
-    },
-    {
-      "name": "ranger_tagsync_ssl_xml_support",
-      "description": "Ranger Tagsync ssl xml support.",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "ranger_xml_configuration",
-      "description": "Ranger code base support xml configurations",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "kafka_ranger_plugin_support",
-      "description": "Ambari stack changes for Ranger Kafka Plugin (AMBARI-11299)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "yarn_ranger_plugin_support",
-      "description": "Implement Stack changes for Ranger Yarn Plugin integration (AMBARI-10866)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "ranger_solr_config_support",
-      "description": "Showing Ranger solrconfig.xml on UI",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "hive_interactive_atlas_hook_required",
-      "description": "Registering Atlas Hook for Hive Interactive.",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "core_site_for_ranger_plugins",
-      "description": "Adding core-site.xml in when Ranger plugin is enabled for Storm, Kafka, and Knox.",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "atlas_install_hook_package_support",
-      "description": "Stop installing packages from 2.6",
-      "max_version": "2.5.9.9"
-    },
-    {
-      "name": "atlas_hdfs_site_on_namenode_ha",
-      "description": "Need to create hdfs-site under atlas-conf dir when Namenode-HA is enabled.",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "hive_interactive_ga",
-      "description": "Hive Interactive GA support",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "secure_ranger_ssl_password",
-      "description": "Securing Ranger Admin and Usersync SSL and Trustore related passwords in jceks",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "ranger_kms_ssl",
-      "description": "Ranger KMS SSL properties in ambari stack",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "nifi_encrypt_config",
-      "description": "Encrypt sensitive properties written to nifi property file",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "toolkit_config_update",
-      "description": "Support separate input and output for toolkit configuration",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "admin_toolkit_support",
-      "description": "Supports the nifi admin toolkit",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "tls_toolkit_san",
-      "description": "Support subject alternative name flag",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "nifi_jaas_conf_create",
-      "description": "Create NIFI jaas configuration when kerberos is enabled",
-      "min_version": "2.6.0.0"
-    }
-  ]
+  "HDP": {
+    "stack_features": [
+      {
+        "name": "snappy",
+        "description": "Snappy compressor/decompressor support",
+        "min_version": "2.0.0.0",
+        "max_version": "2.2.0.0"
+      },
+      {
+        "name": "lzo",
+        "description": "LZO libraries support",
+        "min_version": "2.2.1.0"
+      },
+      {
+        "name": "express_upgrade",
+        "description": "Express upgrade support",
+        "min_version": "2.1.0.0"
+      },
+      {
+        "name": "rolling_upgrade",
+        "description": "Rolling upgrade support",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "kafka_acl_migration_support",
+        "description": "ACL migration support",
+        "min_version": "2.3.4.0"
+      },
+      {
+        "name": "secure_zookeeper",
+        "description": "Protect ZNodes with SASL acl in secure clusters",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "config_versioning",
+        "description": "Configurable versions support",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "datanode_non_root",
+        "description": "DataNode running as non-root support (AMBARI-7615)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "remove_ranger_hdfs_plugin_env",
+        "description": "HDFS removes Ranger env files (AMBARI-14299)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "ranger",
+        "description": "Ranger Service support",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "ranger_tagsync_component",
+        "description": "Ranger Tagsync component support (AMBARI-14383)",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "phoenix",
+        "description": "Phoenix Service support",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "nfs",
+        "description": "NFS support",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "tez_for_spark",
+        "description": "Tez dependency for Spark",
+        "min_version": "2.2.0.0",
+        "max_version": "2.3.0.0"
+      },
+      {
+        "name": "timeline_state_store",
+        "description": "Yarn application timeline-service supports state store property (AMBARI-11442)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "copy_tarball_to_hdfs",
+        "description": "Copy tarball to HDFS support (AMBARI-12113)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "spark_16plus",
+        "description": "Spark 1.6+",
+        "min_version": "2.4.0.0"
+      },
+      {
+        "name": "spark_thriftserver",
+        "description": "Spark Thrift Server",
+        "min_version": "2.3.2.0"
+      },
+      {
+        "name": "storm_kerberos",
+        "description": "Storm Kerberos support (AMBARI-7570)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "storm_ams",
+        "description": "Storm AMS integration (AMBARI-10710)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "create_kafka_broker_id",
+        "description": "Ambari should create Kafka Broker Id (AMBARI-12678)",
+        "min_version": "2.2.0.0",
+        "max_version": "2.3.0.0"
+      },
+      {
+        "name": "kafka_listeners",
+        "description": "Kafka listeners (AMBARI-10984)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "kafka_kerberos",
+        "description": "Kafka Kerberos support (AMBARI-10984)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "pig_on_tez",
+        "description": "Pig on Tez support (AMBARI-7863)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "ranger_usersync_non_root",
+        "description": "Ranger Usersync as non-root user (AMBARI-10416)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "ranger_audit_db_support",
+        "description": "Ranger Audit to DB support",
+        "min_version": "2.2.0.0",
+        "max_version": "2.4.99.99"
+      },
+      {
+        "name": "accumulo_kerberos_user_auth",
+        "description": "Accumulo Kerberos User Auth (AMBARI-10163)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "knox_versioned_data_dir",
+        "description": "Use versioned data dir for Knox (AMBARI-13164)",
+        "min_version": "2.3.2.0"
+      },
+      {
+        "name": "knox_sso_topology",
+        "description": "Knox SSO Topology support (AMBARI-13975)",
+        "min_version": "2.3.8.0"
+      },
+      {
+        "name": "atlas_rolling_upgrade",
+        "description": "Rolling upgrade support for Atlas",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "oozie_admin_user",
+        "description": "Oozie install user as an Oozie admin user (AMBARI-7976)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "oozie_create_hive_tez_configs",
+        "description": "Oozie create configs for Ambari Hive and Tez deployments (AMBARI-8074)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "oozie_setup_shared_lib",
+        "description": "Oozie setup tools used to shared Oozie lib to HDFS (AMBARI-7240)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "oozie_host_kerberos",
+        "description": "Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)",
+        "min_version": "2.0.0.0"
+      },
+      {
+        "name": "falcon_extensions",
+        "description": "Falcon Extension",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "hive_metastore_upgrade_schema",
+        "description": "Hive metastore upgrade schema support (AMBARI-11176)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "hive_server_interactive",
+        "description": "Hive server interactive support (AMBARI-15573)",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "hive_webhcat_specific_configs",
+        "description": "Hive webhcat specific configurations support (AMBARI-12364)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "hive_purge_table",
+        "description": "Hive purge table support (AMBARI-12260)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "hive_server2_kerberized_env",
+        "description": "Hive server2 working on kerberized environment (AMBARI-13749)",
+        "min_version": "2.2.3.0",
+        "max_version": "2.2.5.0"
+      },
+      {
+        "name": "hive_env_heapsize",
+        "description": "Hive heapsize property defined in hive-env (AMBARI-12801)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "ranger_kms_hsm_support",
+        "description": "Ranger KMS HSM support (AMBARI-15752)",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_log4j_support",
+        "description": "Ranger supporting log-4j properties (AMBARI-15681)",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_kerberos_support",
+        "description": "Ranger Kerberos support",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "hive_metastore_site_support",
+        "description": "Hive Metastore site support",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_usersync_password_jceks",
+        "description": "Saving Ranger Usersync credentials in jceks",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_install_infra_client",
+        "description": "Ambari Infra Service support",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "falcon_atlas_support_2_3",
+        "description": "Falcon Atlas integration support for 2.3 stack",
+        "min_version": "2.3.99.0",
+        "max_version": "2.4.0.0"
+      },
+      {
+        "name": "falcon_atlas_support",
+        "description": "Falcon Atlas integration",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "hbase_home_directory",
+        "description": "Hbase home directory in HDFS needed for HBASE backup",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "spark_livy",
+        "description": "Livy as slave component of spark",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "spark_livy2",
+        "description": "Livy as slave component of spark",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "atlas_ranger_plugin_support",
+        "description": "Atlas Ranger plugin support",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "atlas_conf_dir_in_path",
+        "description": "Prepend the Atlas conf dir (/etc/atlas/conf) to the classpath of Storm and Falcon",
+        "min_version": "2.3.0.0",
+        "max_version": "2.4.99.99"
+      },
+      {
+        "name": "atlas_upgrade_support",
+        "description": "Atlas supports express and rolling upgrades",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "atlas_hook_support",
+        "description": "Atlas support for hooks in Hive, Storm, Falcon, and Sqoop",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_pid_support",
+        "description": "Ranger Service support pid generation AMBARI-16756",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_kms_pid_support",
+        "description": "Ranger KMS Service support pid generation",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_admin_password_change",
+        "description": "Allow ranger admin credentials to be specified during cluster creation (AMBARI-17000)",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_setup_db_on_start",
+        "description": "Allows setup of ranger db and java patches to be called multiple times on each START",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "storm_metrics_apache_classes",
+        "description": "Metrics sink for Storm that uses Apache class names",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "spark_java_opts_support",
+        "description": "Allow Spark to generate java-opts file",
+        "min_version": "2.2.0.0",
+        "max_version": "2.4.0.0"
+      },
+      {
+        "name": "atlas_hbase_setup",
+        "description": "Use script to create Atlas tables in Hbase and set permissions for Atlas user.",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_hive_plugin_jdbc_url",
+        "description": "Handle Ranger hive repo config jdbc url change for stack 2.5 (AMBARI-18386)",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "zkfc_version_advertised",
+        "description": "ZKFC advertise version",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "phoenix_core_hdfs_site_required",
+        "description": "HDFS and CORE site required for Phoenix",
+        "max_version": "2.5.9.9"
+      },
+      {
+        "name": "ranger_tagsync_ssl_xml_support",
+        "description": "Ranger Tagsync ssl xml support.",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "ranger_xml_configuration",
+        "description": "Ranger code base support xml configurations",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "kafka_ranger_plugin_support",
+        "description": "Ambari stack changes for Ranger Kafka Plugin (AMBARI-11299)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "yarn_ranger_plugin_support",
+        "description": "Implement Stack changes for Ranger Yarn Plugin integration (AMBARI-10866)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "ranger_solr_config_support",
+        "description": "Showing Ranger solrconfig.xml on UI",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "hive_interactive_atlas_hook_required",
+        "description": "Registering Atlas Hook for Hive Interactive.",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "core_site_for_ranger_plugins",
+        "description": "Adding core-site.xml in when Ranger plugin is enabled for Storm, Kafka, and Knox.",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "atlas_install_hook_package_support",
+        "description": "Stop installing packages from 2.6",
+        "max_version": "2.5.9.9"
+      },
+      {
+        "name": "atlas_hdfs_site_on_namenode_ha",
+        "description": "Need to create hdfs-site under atlas-conf dir when Namenode-HA is enabled.",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "hive_interactive_ga",
+        "description": "Hive Interactive GA support",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "secure_ranger_ssl_password",
+        "description": "Securing Ranger Admin and Usersync SSL and Trustore related passwords in jceks",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "ranger_kms_ssl",
+        "description": "Ranger KMS SSL properties in ambari stack",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "nifi_encrypt_config",
+        "description": "Encrypt sensitive properties written to nifi property file",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "toolkit_config_update",
+        "description": "Support separate input and output for toolkit configuration",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "admin_toolkit_support",
+        "description": "Supports the nifi admin toolkit",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "tls_toolkit_san",
+        "description": "Support subject alternative name flag",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "nifi_jaas_conf_create",
+        "description": "Create NIFI jaas configuration when kerberos is enabled",
+        "min_version": "2.6.0.0"
+      }
+    ]
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_tools.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_tools.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_tools.json
index d1aab4b..c515d57 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_tools.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_tools.json
@@ -1,4 +1,14 @@
 {
-  "stack_selector": ["hdp-select", "/usr/bin/hdp-select", "hdp-select"],
-  "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"]
-}
\ No newline at end of file
+  "HDP": {
+    "stack_selector": [
+      "hdp-select",
+      "/usr/bin/hdp-select",
+      "hdp-select"
+    ],
+    "conf_selector": [
+      "conf-select",
+      "/usr/bin/conf-select",
+      "conf-select"
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml
index 7df00ee..f19ac52 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml
@@ -20,6 +20,18 @@
  */
 -->
 <configuration>
+  <!-- Define stack_name property in the base stack. DO NOT override this property for each stack version -->
+  <property>
+    <name>stack_name</name>
+    <value>PERF</value>
+    <description>The name of the stack.</description>
+    <value-attributes>
+      <read-only>true</read-only>
+      <overridable>false</overridable>
+      <visible>false</visible>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
 
   <!-- Define stack_tools property in the base stack. DO NOT override this property for each stack version -->
   <property>
@@ -55,8 +67,8 @@
 
   <property>
     <name>stack_root</name>
-    <value>/usr/perf</value>
-    <description>Stack root folder</description>
+    <value>{"PERF":"/usr/perf"}</value>
+    <description>JSON which defines the stack root by stack name</description>  
     <value-attributes>
       <read-only>true</read-only>
       <overridable>false</overridable>

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_features.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_features.json b/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_features.json
index e9e0ed2..839e8e6 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_features.json
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_features.json
@@ -1,19 +1,21 @@
 {
-  "stack_features": [
-    {
-      "name": "rolling_upgrade",
-      "description": "Rolling upgrade support",
-      "min_version": "1.0.0.0"
-    },
-    {
-      "name": "secure_zookeeper",
-      "description": "Protect ZNodes with SASL acl in secure clusters",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "config_versioning",
-      "description": "Configurable versions support",
-      "min_version": "1.0.0.0"
-    }
-  ]
-}
+  "PERF": {
+    "stack_features": [
+      {
+        "name": "rolling_upgrade",
+        "description": "Rolling upgrade support",
+        "min_version": "1.0.0.0"
+      },
+      {
+        "name": "secure_zookeeper",
+        "description": "Protect ZNodes with SASL acl in secure clusters",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "config_versioning",
+        "description": "Configurable versions support",
+        "min_version": "1.0.0.0"
+      }
+    ]
+  }
+}
\ No newline at end of file


[15/31] ambari git commit: Revert "AMBARI-21427. Assigning hosts concurrently to same config group may fail with "org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException: Config group already exist". (stoader)"

Posted by ab...@apache.org.
Revert "AMBARI-21427. Assigning hosts concurrently to same config group may fail with "org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException: Config group already exist". (stoader)"


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/70cf77e4
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/70cf77e4
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/70cf77e4

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 70cf77e4087840e89fab50a741d36bf8747ba416
Parents: 15dd999
Author: Vitaly Brodetskyi <vb...@hortonworks.com>
Authored: Mon Jul 10 23:11:38 2017 +0300
Committer: Vitaly Brodetskyi <vb...@hortonworks.com>
Committed: Mon Jul 10 23:19:34 2017 +0300

----------------------------------------------------------------------
 .../ambari/server/topology/AmbariContext.java   | 81 +++++---------------
 1 file changed, 19 insertions(+), 62 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/70cf77e4/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
index dee0e6c..106d7c8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
@@ -1,4 +1,4 @@
-/*
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -30,7 +30,6 @@ import java.util.SortedSet;
 import java.util.TreeSet;
 import java.util.concurrent.Callable;
 import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.locks.Lock;
 
 import javax.annotation.Nullable;
 import javax.inject.Inject;
@@ -70,11 +69,9 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.ConfigFactory;
-import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.SecurityType;
-import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.configgroup.ConfigGroup;
 import org.apache.ambari.server.utils.RetryHelper;
 import org.slf4j.Logger;
@@ -82,8 +79,6 @@ import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Sets;
-import com.google.common.util.concurrent.Striped;
-import com.google.inject.Provider;
 
 
 /**
@@ -104,12 +99,6 @@ public class AmbariContext {
   @Inject
   ConfigFactory configFactory;
 
-  /**
-   * Used for getting configuration property values from stack and services.
-   */
-  @Inject
-  private Provider<ConfigHelper> configHelper;
-
   private static AmbariManagementController controller;
   private static ClusterController clusterController;
   //todo: task id's.  Use existing mechanism for getting next task id sequence
@@ -123,16 +112,6 @@ public class AmbariContext {
 
   private final static Logger LOG = LoggerFactory.getLogger(AmbariContext.class);
 
-
-  /**
-   * When config groups are created using Blueprints these are created when
-   * hosts join a hostgroup and are added to the corresponding config group.
-   * Since hosts join in parallel there might be a race condition in creating
-   * the config group a host is to be added to. Thus we need to synchronize
-   * the creation of config groups with the same name.
-   */
-  private Striped<Lock> configGroupCreateLock = Striped.lazyWeakLock(1);
-
   public boolean isClusterKerberosEnabled(long clusterId) {
     Cluster cluster;
     try {
@@ -188,10 +167,9 @@ public class AmbariContext {
 
   public void createAmbariResources(ClusterTopology topology, String clusterName, SecurityType securityType, String repoVersion) {
     Stack stack = topology.getBlueprint().getStack();
-    StackId stackId = new StackId(stack.getName(), stack.getVersion());
 
     createAmbariClusterResource(clusterName, stack.getName(), stack.getVersion(), securityType, repoVersion);
-    createAmbariServiceAndComponentResources(topology, clusterName, stackId, repoVersion);
+    createAmbariServiceAndComponentResources(topology, clusterName);
   }
 
   public void createAmbariClusterResource(String clusterName, String stackName, String stackVersion, SecurityType securityType, String repoVersion) {
@@ -218,8 +196,7 @@ public class AmbariContext {
     }
   }
 
-  public void createAmbariServiceAndComponentResources(ClusterTopology topology, String clusterName,
-      StackId stackId, String repositoryVersion) {
+  public void createAmbariServiceAndComponentResources(ClusterTopology topology, String clusterName) {
     Collection<String> services = topology.getBlueprint().getServices();
 
     try {
@@ -228,13 +205,11 @@ public class AmbariContext {
     } catch (AmbariException e) {
       throw new RuntimeException("Failed to persist service and component resources: " + e, e);
     }
-    Set<ServiceRequest> serviceRequests = new HashSet<>();
-    Set<ServiceComponentRequest> componentRequests = new HashSet<>();
+    Set<ServiceRequest> serviceRequests = new HashSet<ServiceRequest>();
+    Set<ServiceComponentRequest> componentRequests = new HashSet<ServiceComponentRequest>();
     for (String service : services) {
       String credentialStoreEnabled = topology.getBlueprint().getCredentialStoreEnabled(service);
-      serviceRequests.add(new ServiceRequest(clusterName, service, stackId.getStackId(),
-          repositoryVersion, null, credentialStoreEnabled));
-
+      serviceRequests.add(new ServiceRequest(clusterName, service, null, credentialStoreEnabled));
       for (String component : topology.getBlueprint().getComponents(service)) {
         String recoveryEnabled = topology.getBlueprint().getRecoveryEnabled(service, component);
         componentRequests.add(new ServiceComponentRequest(clusterName, service, component, null, recoveryEnabled));
@@ -248,14 +223,14 @@ public class AmbariContext {
     }
     // set all services state to INSTALLED->STARTED
     // this is required so the user can start failed services at the service level
-    Map<String, Object> installProps = new HashMap<>();
+    Map<String, Object> installProps = new HashMap<String, Object>();
     installProps.put(ServiceResourceProvider.SERVICE_SERVICE_STATE_PROPERTY_ID, "INSTALLED");
     installProps.put(ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
-    Map<String, Object> startProps = new HashMap<>();
+    Map<String, Object> startProps = new HashMap<String, Object>();
     startProps.put(ServiceResourceProvider.SERVICE_SERVICE_STATE_PROPERTY_ID, "STARTED");
     startProps.put(ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
-    Predicate predicate = new EqualsPredicate<>(
-      ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
+    Predicate predicate = new EqualsPredicate<String>(
+        ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
     try {
       getServiceResourceProvider().updateResources(
           new RequestImpl(null, Collections.singleton(installProps), null, null), predicate);
@@ -287,9 +262,9 @@ public class AmbariContext {
     }
     String clusterName = cluster.getClusterName();
 
-    Map<String, Object> properties = new HashMap<>();
+    Map<String, Object> properties = new HashMap<String, Object>();
     properties.put(HostResourceProvider.HOST_CLUSTER_NAME_PROPERTY_ID, clusterName);
-    properties.put(HostResourceProvider.HOST_HOST_NAME_PROPERTY_ID, hostName);
+    properties.put(HostResourceProvider.HOST_NAME_PROPERTY_ID, hostName);
     properties.put(HostResourceProvider.HOST_RACK_INFO_PROPERTY_ID, host.getRackInfo());
 
     try {
@@ -300,7 +275,7 @@ public class AmbariContext {
           hostName, e.toString()), e);
     }
 
-    final Set<ServiceComponentHostRequest> requests = new HashSet<>();
+    final Set<ServiceComponentHostRequest> requests = new HashSet<ServiceComponentHostRequest>();
 
     for (Map.Entry<String, Collection<String>> entry : components.entrySet()) {
       String service = entry.getKey();
@@ -353,17 +328,11 @@ public class AmbariContext {
   }
 
   public void registerHostWithConfigGroup(final String hostName, final ClusterTopology topology, final String groupName) {
-    String qualifiedGroupName = getConfigurationGroupName(topology.getBlueprint().getName(), groupName);
-
-    Lock configGroupLock = configGroupCreateLock.get(qualifiedGroupName);
-
     try {
-      configGroupLock.lock();
-
       boolean hostAdded = RetryHelper.executeWithRetry(new Callable<Boolean>() {
         @Override
         public Boolean call() throws Exception {
-          return addHostToExistingConfigGroups(hostName, topology, qualifiedGroupName);
+          return addHostToExistingConfigGroups(hostName, topology, groupName);
         }
       });
       if (!hostAdded) {
@@ -373,9 +342,6 @@ public class AmbariContext {
       LOG.error("Unable to register config group for host: ", e);
       throw new RuntimeException("Unable to register config group for host: " + hostName);
     }
-    finally {
-      configGroupLock.unlock();
-    }
   }
 
   public RequestStatusResponse installHost(String hostName, String clusterName, Collection<String> skipInstallForComponents, Collection<String> dontSkipInstallForComponents, boolean skipFailure) {
@@ -583,7 +549,7 @@ public class AmbariContext {
   /**
    * Add the new host to an existing config group.
    */
-  private boolean addHostToExistingConfigGroups(String hostName, ClusterTopology topology, String configGroupName) {
+  private boolean addHostToExistingConfigGroups(String hostName, ClusterTopology topology, String groupName) {
     boolean addedHost = false;
     Clusters clusters;
     Cluster cluster;
@@ -597,8 +563,9 @@ public class AmbariContext {
     // I don't know of a method to get config group by name
     //todo: add a method to get config group by name
     Map<Long, ConfigGroup> configGroups = cluster.getConfigGroups();
+    String qualifiedGroupName = getConfigurationGroupName(topology.getBlueprint().getName(), groupName);
     for (ConfigGroup group : configGroups.values()) {
-      if (group.getName().equals(configGroupName)) {
+      if (group.getName().equals(qualifiedGroupName)) {
         try {
           Host host = clusters.getHost(hostName);
           addedHost = true;
@@ -622,7 +589,7 @@ public class AmbariContext {
    * and the hosts associated with the host group are assigned to the config group.
    */
   private void createConfigGroupsAndRegisterHost(ClusterTopology topology, String groupName) throws AmbariException {
-    Map<String, Map<String, Config>> groupConfigs = new HashMap<>();
+    Map<String, Map<String, Config>> groupConfigs = new HashMap<String, Map<String, Config>>();
     Stack stack = topology.getBlueprint().getStack();
 
     // get the host-group config with cluster creation template overrides
@@ -641,7 +608,7 @@ public class AmbariContext {
       //todo: attributes
       Map<String, Config> serviceConfigs = groupConfigs.get(service);
       if (serviceConfigs == null) {
-        serviceConfigs = new HashMap<>();
+        serviceConfigs = new HashMap<String, Config>();
         groupConfigs.put(service, serviceConfigs);
       }
       serviceConfigs.put(type, config);
@@ -702,16 +669,6 @@ public class AmbariContext {
     return String.format("%s:%s", bpName, hostGroupName);
   }
 
-  /**
-   * Gets an instance of {@link ConfigHelper} for classes which are not
-   * dependency injected.
-   *
-   * @return a {@link ConfigHelper} instance.
-   */
-  public ConfigHelper getConfigHelper() {
-    return configHelper.get();
-  }
-
   private synchronized HostResourceProvider getHostResourceProvider() {
     if (hostResourceProvider == null) {
       hostResourceProvider = (HostResourceProvider)


[03/31] ambari git commit: AMBARI-21430 - Allow Multiple Versions of Stack Tools to Co-Exist (jonathanhurley)

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-default.json b/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-default.json
index abe84ab..e5abe32 100644
--- a/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-default.json
+++ b/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-default.json
@@ -1,143 +1,143 @@
 {
     "localComponents": [
-        "NAMENODE", 
-        "SECONDARY_NAMENODE", 
-        "ZOOKEEPER_SERVER", 
-        "DATANODE", 
-        "HDFS_CLIENT", 
-        "ZOOKEEPER_CLIENT", 
-        "RANGER_USERSYNC", 
-        "RANGER_ADMIN", 
+        "NAMENODE",
+        "SECONDARY_NAMENODE",
+        "ZOOKEEPER_SERVER",
+        "DATANODE",
+        "HDFS_CLIENT",
+        "ZOOKEEPER_CLIENT",
+        "RANGER_USERSYNC",
+        "RANGER_ADMIN",
         "RANGER_TAGSYNC",
         "LOGSEARCH_SOLR",
         "LOGSEARCH_SOLR_CLIENT"
-    ], 
+    ],
     "configuration_attributes": {
-        "ranger-hdfs-audit": {}, 
-        "ssl-client": {}, 
-        "ranger-admin-site": {}, 
-        "ranger-hdfs-policymgr-ssl": {}, 
-        "tagsync-application-properties": {}, 
-        "ranger-env": {}, 
-        "usersync-log4j": {}, 
-        "admin-properties": {}, 
-        "ranger-ugsync-site": {}, 
+        "ranger-hdfs-audit": {},
+        "ssl-client": {},
+        "ranger-admin-site": {},
+        "ranger-hdfs-policymgr-ssl": {},
+        "tagsync-application-properties": {},
+        "ranger-env": {},
+        "usersync-log4j": {},
+        "admin-properties": {},
+        "ranger-ugsync-site": {},
         "hdfs-site": {
             "final": {
-                "dfs.datanode.data.dir": "true", 
-                "dfs.namenode.http-address": "true", 
-                "dfs.datanode.failed.volumes.tolerated": "true", 
-                "dfs.support.append": "true", 
-                "dfs.namenode.name.dir": "true", 
+                "dfs.datanode.data.dir": "true",
+                "dfs.namenode.http-address": "true",
+                "dfs.datanode.failed.volumes.tolerated": "true",
+                "dfs.support.append": "true",
+                "dfs.namenode.name.dir": "true",
                 "dfs.webhdfs.enabled": "true"
             }
-        }, 
+        },
         "ranger-tagsync-site": {},
         "ranger-tagsync-policymgr-ssl": {},
         "zoo.cfg": {},
         "hadoop-policy": {},
-        "hdfs-log4j": {}, 
-        "ranger-hdfs-plugin-properties": {}, 
+        "hdfs-log4j": {},
+        "ranger-hdfs-plugin-properties": {},
         "core-site": {
             "final": {
                 "fs.defaultFS": "true"
             }
-        }, 
-        "hadoop-env": {}, 
-        "zookeeper-log4j": {}, 
-        "ssl-server": {}, 
-        "ranger-site": {}, 
-        "admin-log4j": {}, 
-        "tagsync-log4j": {}, 
-        "ranger-hdfs-security": {}, 
+        },
+        "hadoop-env": {},
+        "zookeeper-log4j": {},
+        "ssl-server": {},
+        "ranger-site": {},
+        "admin-log4j": {},
+        "tagsync-log4j": {},
+        "ranger-hdfs-security": {},
         "ranger-solr-configuration": {},
         "usersync-properties": {},
         "zookeeper-env": {},
         "infra-solr-env": {},
         "infra-solr-client-log4j": {},
         "cluster-env": {}
-    }, 
-    "public_hostname": "c6401.ambari.apache.org", 
-    "commandId": "11-0", 
-    "hostname": "c6401.ambari.apache.org", 
-    "kerberosCommandParams": [], 
-    "serviceName": "RANGER", 
-    "role": "RANGER_ADMIN", 
-    "forceRefreshConfigTagsBeforeExecution": [], 
-    "requestId": 11, 
+    },
+    "public_hostname": "c6401.ambari.apache.org",
+    "commandId": "11-0",
+    "hostname": "c6401.ambari.apache.org",
+    "kerberosCommandParams": [],
+    "serviceName": "RANGER",
+    "role": "RANGER_ADMIN",
+    "forceRefreshConfigTagsBeforeExecution": [],
+    "requestId": 11,
     "agentConfigParams": {
         "agent": {
             "parallel_execution": 0
         }
-    }, 
-    "clusterName": "c1", 
-    "commandType": "EXECUTION_COMMAND", 
-    "taskId": 31, 
-    "roleParams": {}, 
+    },
+    "clusterName": "c1",
+    "commandType": "EXECUTION_COMMAND",
+    "taskId": 31,
+    "roleParams": {},
     "configurationTags": {
         "ranger-hdfs-audit": {
             "tag": "version1466705299922"
-        }, 
+        },
         "ssl-client": {
             "tag": "version1"
-        }, 
+        },
         "ranger-admin-site": {
             "tag": "version1466705299949"
-        }, 
+        },
         "ranger-hdfs-policymgr-ssl": {
             "tag": "version1466705299922"
-        }, 
+        },
         "tagsync-application-properties": {
             "tag": "version1466705299949"
-        }, 
+        },
         "ranger-env": {
             "tag": "version1466705299949"
-        }, 
+        },
         "usersync-log4j": {
             "tag": "version1466705299949"
-        }, 
+        },
         "admin-properties": {
             "tag": "version1466705299949"
-        }, 
+        },
         "ranger-ugsync-site": {
             "tag": "version1466705299949"
-        }, 
+        },
         "hdfs-site": {
             "tag": "version1"
-        }, 
+        },
         "ranger-tagsync-site": {
             "tag": "version1466705299949"
-        }, 
+        },
         "zoo.cfg": {
             "tag": "version1"
-        }, 
+        },
         "hadoop-policy": {
             "tag": "version1"
-        }, 
+        },
         "hdfs-log4j": {
             "tag": "version1"
-        }, 
+        },
         "ranger-hdfs-plugin-properties": {
             "tag": "version1466705299922"
-        }, 
+        },
         "core-site": {
             "tag": "version1"
-        }, 
+        },
         "hadoop-env": {
             "tag": "version1"
-        }, 
+        },
         "zookeeper-log4j": {
             "tag": "version1"
-        }, 
+        },
         "ssl-server": {
             "tag": "version1"
-        }, 
+        },
         "ranger-site": {
             "tag": "version1466705299949"
-        }, 
+        },
         "admin-log4j": {
             "tag": "version1466705299949"
-        }, 
+        },
         "tagsync-log4j": {
             "tag": "version1466705299949"
         },
@@ -146,7 +146,7 @@
         },
         "ranger-hdfs-security": {
             "tag": "version1466705299922"
-        }, 
+        },
         "usersync-properties": {
             "tag": "version1466705299949"
         },
@@ -165,116 +165,116 @@
         "cluster-env": {
             "tag": "version1"
         }
-    }, 
-    "roleCommand": "START", 
+    },
+    "roleCommand": "START",
     "hostLevelParams": {
-        "agent_stack_retry_on_unavailability": "false", 
-        "stack_name": "HDP", 
+        "agent_stack_retry_on_unavailability": "false",
+        "stack_name": "HDP",
         "package_version": "2_6_0_0_*",
         "custom_mysql_jdbc_name": "mysql-connector-java.jar",
         "previous_custom_mysql_jdbc_name": "mysql-connector-java-old.jar",
-        "host_sys_prepped": "false", 
-        "ambari_db_rca_username": "mapred", 
+        "host_sys_prepped": "false",
+        "ambari_db_rca_username": "mapred",
         "current_version": "2.6.0.0-801",
         "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar",
         "agent_stack_retry_count": "5",
         "stack_version": "2.6",
-        "jdk_name": "jdk-8u60-linux-x64.tar.gz", 
-        "ambari_db_rca_driver": "org.postgresql.Driver", 
-        "java_home": "/usr/jdk64/jdk1.7.0_45", 
-        "repository_version_id": "1", 
-        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
-        "not_managed_hdfs_path_list": "[\"/tmp\"]", 
-        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
-        "java_version": "8", 
+        "jdk_name": "jdk-8u60-linux-x64.tar.gz",
+        "ambari_db_rca_driver": "org.postgresql.Driver",
+        "java_home": "/usr/jdk64/jdk1.7.0_45",
+        "repository_version_id": "1",
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
+        "not_managed_hdfs_path_list": "[\"/tmp\"]",
+        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
+        "java_version": "8",
         "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.6.0.0-801\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.6\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.6.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.6.0.0-801\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]",
         "package_list": "[{\"name\":\"ranger_${stack_version}-admin\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-usersync\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-tagsync\",\"condition\":\"should_install_ranger_tagsync\",\"skipUpgrade\":false},{\"name\":\"ambari-logsearch-solr-client\",\"condition\":\"should_install_logsearch_solr_client\",\"skipUpgrade\":false}]",
-        "db_name": "ambari", 
-        "group_list": "[\"ranger\",\"hadoop\",\"users\"]", 
-        "agentCacheDir": "/var/lib/ambari-agent/cache", 
-        "ambari_db_rca_password": "mapred", 
-        "jce_name": "jce_policy-8.zip", 
-        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar", 
-        "db_driver_filename": "mysql-connector-java.jar", 
-        "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]", 
+        "db_name": "ambari",
+        "group_list": "[\"ranger\",\"hadoop\",\"users\"]",
+        "agentCacheDir": "/var/lib/ambari-agent/cache",
+        "ambari_db_rca_password": "mapred",
+        "jce_name": "jce_policy-8.zip",
+        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
+        "db_driver_filename": "mysql-connector-java.jar",
+        "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]",
         "clientsToUpdateConfigs": "[\"*\"]"
-    }, 
+    },
     "commandParams": {
-        "service_package_folder": "common-services/RANGER/0.4.0/package", 
-        "script": "scripts/ranger_admin.py", 
+        "service_package_folder": "common-services/RANGER/0.4.0/package",
+        "script": "scripts/ranger_admin.py",
         "hooks_folder": "HDP/2.0.6/hooks",
         "version": "2.6.0.0-801",
-        "max_duration_for_retries": "0", 
-        "command_retry_enabled": "false", 
-        "command_timeout": "600", 
+        "max_duration_for_retries": "0",
+        "command_retry_enabled": "false",
+        "command_timeout": "600",
         "script_type": "PYTHON"
-    }, 
-    "forceRefreshConfigTags": [], 
-    "stageId": 0, 
+    },
+    "forceRefreshConfigTags": [],
+    "stageId": 0,
     "clusterHostInfo": {
         "snamenode_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ambari_server_use_ssl": [
             "false"
-        ], 
+        ],
         "all_ping_ports": [
             "8670"
-        ], 
+        ],
         "ranger_tagsync_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ranger_usersync_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "all_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "slave_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "namenode_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ambari_server_port": [
             "8080"
-        ], 
+        ],
         "ranger_admin_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "all_racks": [
             "/default-rack"
-        ], 
+        ],
         "all_ipv4_ips": [
             "172.22.83.73"
-        ], 
+        ],
         "ambari_server_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "zookeeper_hosts": [
             "c6401.ambari.apache.org"
         ],
         "infra_solr_hosts": [
             "c6401.ambari.apache.org"
         ]
-    }, 
+    },
     "configurations": {
         "ranger-hdfs-audit": {
-            "xasecure.audit.destination.solr.zookeepers": "NONE", 
-            "xasecure.audit.destination.solr.urls": "", 
-            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool", 
-            "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool", 
+            "xasecure.audit.destination.solr.zookeepers": "NONE",
+            "xasecure.audit.destination.solr.urls": "",
+            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool",
+            "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool",
             "xasecure.audit.destination.hdfs": "true",
-            "xasecure.audit.destination.solr": "false", 
+            "xasecure.audit.destination.solr": "false",
             "xasecure.audit.provider.summary.enabled": "false",
             "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
             "xasecure.audit.is.enabled": "true"
         },
         "ranger-tagsync-policymgr-ssl": {
-            "xasecure.policymgr.clientssl.keystore": "/etc/security/serverKeys/ranger-tagsync-keystore.jks", 
-            "xasecure.policymgr.clientssl.truststore.password": "changeit", 
+            "xasecure.policymgr.clientssl.keystore": "/etc/security/serverKeys/ranger-tagsync-keystore.jks",
+            "xasecure.policymgr.clientssl.truststore.password": "changeit",
             "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{ranger_tagsync_credential_file}}",
-            "xasecure.policymgr.clientssl.truststore": "/etc/security/serverKeys/ranger-tagsync-mytruststore.jks", 
+            "xasecure.policymgr.clientssl.truststore": "/etc/security/serverKeys/ranger-tagsync-mytruststore.jks",
             "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{ranger_tagsync_credential_file}}",
             "xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
         },
@@ -287,143 +287,143 @@
             "xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
         },
         "ssl-client": {
-            "ssl.client.truststore.reload.interval": "10000", 
-            "ssl.client.keystore.password": "bigdata", 
-            "ssl.client.truststore.type": "jks", 
-            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks", 
-            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks", 
-            "ssl.client.truststore.password": "bigdata", 
+            "ssl.client.truststore.reload.interval": "10000",
+            "ssl.client.keystore.password": "bigdata",
+            "ssl.client.truststore.type": "jks",
+            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks",
+            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks",
+            "ssl.client.truststore.password": "bigdata",
             "ssl.client.keystore.type": "jks"
-        }, 
+        },
         "ranger-admin-site": {
             "ranger.admin.kerberos.cookie.domain": "",
-            "ranger.kms.service.user.hdfs": "hdfs", 
-            "ranger.spnego.kerberos.principal": "", 
-            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}", 
-            "ranger.plugins.hive.serviceuser": "hive", 
-            "ranger.lookup.kerberos.keytab": "", 
-            "ranger.plugins.kms.serviceuser": "kms", 
-            "ranger.service.https.attrib.ssl.enabled": "false", 
-            "ranger.sso.browser.useragent": "Mozilla,chrome", 
-            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01", 
-            "ranger.plugins.hbase.serviceuser": "hbase", 
-            "ranger.plugins.hdfs.serviceuser": "hdfs", 
-            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}", 
-            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net", 
-            "ranger.plugins.knox.serviceuser": "knox", 
-            "ranger.ldap.base.dn": "dc=example,dc=com", 
-            "ranger.sso.publicKey": "", 
-            "ranger.admin.kerberos.cookie.path": "/", 
-            "ranger.service.https.attrib.clientAuth": "want", 
-            "ranger.jpa.jdbc.user": "{{ranger_db_user}}", 
-            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})", 
-            "ranger.ldap.group.roleattribute": "cn", 
-            "ranger.plugins.kafka.serviceuser": "kafka", 
-            "ranger.admin.kerberos.principal": "", 
-            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
-            "ranger.credential.provider.path": "/etc/ranger/admin/rangeradmin.jceks", 
-            "ranger.ldap.referral": "ignore", 
-            "ranger.service.http.port": "6080", 
-            "ranger.ldap.user.searchfilter": "(uid={0})", 
-            "ranger.plugins.atlas.serviceuser": "atlas", 
+            "ranger.kms.service.user.hdfs": "hdfs",
+            "ranger.spnego.kerberos.principal": "",
+            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}",
+            "ranger.plugins.hive.serviceuser": "hive",
+            "ranger.lookup.kerberos.keytab": "",
+            "ranger.plugins.kms.serviceuser": "kms",
+            "ranger.service.https.attrib.ssl.enabled": "false",
+            "ranger.sso.browser.useragent": "Mozilla,chrome",
+            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01",
+            "ranger.plugins.hbase.serviceuser": "hbase",
+            "ranger.plugins.hdfs.serviceuser": "hdfs",
+            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}",
+            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net",
+            "ranger.plugins.knox.serviceuser": "knox",
+            "ranger.ldap.base.dn": "dc=example,dc=com",
+            "ranger.sso.publicKey": "",
+            "ranger.admin.kerberos.cookie.path": "/",
+            "ranger.service.https.attrib.clientAuth": "want",
+            "ranger.jpa.jdbc.user": "{{ranger_db_user}}",
+            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})",
+            "ranger.ldap.group.roleattribute": "cn",
+            "ranger.plugins.kafka.serviceuser": "kafka",
+            "ranger.admin.kerberos.principal": "",
+            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
+            "ranger.credential.provider.path": "/etc/ranger/admin/rangeradmin.jceks",
+            "ranger.ldap.referral": "ignore",
+            "ranger.service.http.port": "6080",
+            "ranger.ldap.user.searchfilter": "(uid={0})",
+            "ranger.plugins.atlas.serviceuser": "atlas",
             "ranger.truststore.password": "changeit",
             "ranger.truststore.alias": "trustStoreAlias",
-            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
-            "ranger.audit.solr.password": "NONE", 
+            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+            "ranger.audit.solr.password": "NONE",
             "ranger.audit.solr.zookeepers": "c6401.ambari.apache.org:2181/infra-solr",
-            "ranger.lookup.kerberos.principal": "", 
-            "ranger.service.https.port": "6182", 
-            "ranger.plugins.storm.serviceuser": "storm", 
-            "ranger.externalurl": "{{ranger_external_url}}", 
-            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
-            "ranger.kms.service.user.hive": "", 
-            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
-            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}", 
-            "ranger.service.host": "{{ranger_host}}", 
-            "ranger.service.https.attrib.keystore.keyalias": "rangeradmin", 
-            "ranger.service.https.attrib.keystore.pass": "xasecure", 
-            "ranger.unixauth.remote.login.enabled": "true", 
-            "ranger.jpa.jdbc.credential.alias": "rangeradmin", 
-            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
-            "ranger.audit.solr.username": "ranger_solr", 
-            "ranger.sso.enabled": "false", 
-            "ranger.audit.solr.urls": "", 
-            "ranger.ldap.ad.domain": "", 
-            "ranger.plugins.yarn.serviceuser": "yarn", 
-            "ranger.audit.source.type": "solr", 
-            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
-            "ranger.ldap.url": "{{ranger_ug_ldap_url}}", 
-            "ranger.authentication.method": "UNIX", 
-            "ranger.service.http.enabled": "true", 
-            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}", 
-            "ranger.ldap.ad.referral": "ignore", 
-            "ranger.ldap.ad.base.dn": "dc=example,dc=com", 
-            "ranger.jpa.jdbc.password": "_", 
-            "ranger.spnego.kerberos.keytab": "", 
-            "ranger.sso.providerurl": "", 
-            "ranger.unixauth.service.hostname": "{{ugsync_host}}", 
-            "ranger.admin.kerberos.keytab": "", 
-            "ranger.admin.kerberos.token.valid.seconds": "30", 
-            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver", 
+            "ranger.lookup.kerberos.principal": "",
+            "ranger.service.https.port": "6182",
+            "ranger.plugins.storm.serviceuser": "storm",
+            "ranger.externalurl": "{{ranger_external_url}}",
+            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+            "ranger.kms.service.user.hive": "",
+            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}",
+            "ranger.service.host": "{{ranger_host}}",
+            "ranger.service.https.attrib.keystore.keyalias": "rangeradmin",
+            "ranger.service.https.attrib.keystore.pass": "xasecure",
+            "ranger.unixauth.remote.login.enabled": "true",
+            "ranger.jpa.jdbc.credential.alias": "rangeradmin",
+            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+            "ranger.audit.solr.username": "ranger_solr",
+            "ranger.sso.enabled": "false",
+            "ranger.audit.solr.urls": "",
+            "ranger.ldap.ad.domain": "",
+            "ranger.plugins.yarn.serviceuser": "yarn",
+            "ranger.audit.source.type": "solr",
+            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
+            "ranger.ldap.url": "{{ranger_ug_ldap_url}}",
+            "ranger.authentication.method": "UNIX",
+            "ranger.service.http.enabled": "true",
+            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}",
+            "ranger.ldap.ad.referral": "ignore",
+            "ranger.ldap.ad.base.dn": "dc=example,dc=com",
+            "ranger.jpa.jdbc.password": "_",
+            "ranger.spnego.kerberos.keytab": "",
+            "ranger.sso.providerurl": "",
+            "ranger.unixauth.service.hostname": "{{ugsync_host}}",
+            "ranger.admin.kerberos.keytab": "",
+            "ranger.admin.kerberos.token.valid.seconds": "30",
+            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver",
             "ranger.unixauth.service.port": "5151",
             "ranger.service.https.attrib.keystore.credential.alias": "keyStoreCredentialAlias"
-        }, 
+        },
         "ranger-hdfs-policymgr-ssl": {
-            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks", 
-            "xasecure.policymgr.clientssl.truststore.password": "changeit", 
-            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}", 
-            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks", 
-            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}", 
+            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks",
+            "xasecure.policymgr.clientssl.truststore.password": "changeit",
+            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}",
+            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks",
+            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}",
             "xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
-        }, 
+        },
         "tagsync-application-properties": {
-            "atlas.kafka.entities.group.id": "ranger_entities_consumer", 
-            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181", 
+            "atlas.kafka.entities.group.id": "ranger_entities_consumer",
+            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181",
             "atlas.kafka.bootstrap.servers": "localhost:6667"
-        }, 
+        },
         "ranger-env": {
-            "ranger_solr_shards": "1", 
-            "ranger_solr_config_set": "ranger_audits", 
-            "ranger_user": "ranger", 
+            "ranger_solr_shards": "1",
+            "ranger_solr_config_set": "ranger_audits",
+            "ranger_user": "ranger",
             "ranger_solr_replication_factor": "1",
-            "xml_configurations_supported": "true", 
-            "ranger-atlas-plugin-enabled": "No", 
-            "ranger-hbase-plugin-enabled": "No", 
-            "ranger-yarn-plugin-enabled": "No", 
-            "bind_anonymous": "false", 
-            "ranger_admin_username": "amb_ranger_admin", 
-            "admin_password": "admin", 
-            "is_solrCloud_enabled": "true", 
-            "ranger-storm-plugin-enabled": "No", 
-            "ranger-hdfs-plugin-enabled": "No", 
-            "ranger_group": "ranger", 
-            "ranger-knox-plugin-enabled": "No", 
-            "ranger_admin_log_dir": "/var/log/ranger/admin", 
-            "ranger-kafka-plugin-enabled": "No", 
-            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306", 
-            "ranger-hive-plugin-enabled": "No", 
-            "xasecure.audit.destination.solr": "true", 
-            "ranger_pid_dir": "/var/run/ranger", 
-            "xasecure.audit.destination.hdfs": "true", 
-            "admin_username": "admin", 
-            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit", 
-            "create_db_dbuser": "true", 
-            "ranger_solr_collection_name": "ranger_audits", 
-            "ranger_admin_password": "P1!q9xa96SMi5NCl", 
+            "xml_configurations_supported": "true",
+            "ranger-atlas-plugin-enabled": "No",
+            "ranger-hbase-plugin-enabled": "No",
+            "ranger-yarn-plugin-enabled": "No",
+            "bind_anonymous": "false",
+            "ranger_admin_username": "amb_ranger_admin",
+            "admin_password": "admin",
+            "is_solrCloud_enabled": "true",
+            "ranger-storm-plugin-enabled": "No",
+            "ranger-hdfs-plugin-enabled": "No",
+            "ranger_group": "ranger",
+            "ranger-knox-plugin-enabled": "No",
+            "ranger_admin_log_dir": "/var/log/ranger/admin",
+            "ranger-kafka-plugin-enabled": "No",
+            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306",
+            "ranger-hive-plugin-enabled": "No",
+            "xasecure.audit.destination.solr": "true",
+            "ranger_pid_dir": "/var/run/ranger",
+            "xasecure.audit.destination.hdfs": "true",
+            "admin_username": "admin",
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
+            "create_db_dbuser": "true",
+            "ranger_solr_collection_name": "ranger_audits",
+            "ranger_admin_password": "P1!q9xa96SMi5NCl",
             "ranger_usersync_log_dir": "/var/log/ranger/usersync"
-        }, 
+        },
         "usersync-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/usersync.
 log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n"
-        }, 
+        },
         "admin-properties": {
-            "db_user": "rangeradmin01", 
-            "DB_FLAVOR": "MYSQL", 
-            "db_password": "rangeradmin01", 
-            "db_root_user": "root", 
-            "policymgr_external_url": "http://c6401.ambari.apache.org:6080", 
-            "db_name": "ranger01", 
-            "db_host": "c6401.ambari.apache.org", 
-            "db_root_password": "vagrant", 
+            "db_user": "rangeradmin01",
+            "DB_FLAVOR": "MYSQL",
+            "db_password": "rangeradmin01",
+            "db_root_user": "root",
+            "policymgr_external_url": "http://c6401.ambari.apache.org:6080",
+            "db_name": "ranger01",
+            "db_host": "c6401.ambari.apache.org",
+            "db_root_password": "vagrant",
             "SQL_CONNECTOR_JAR": "{{driver_curl_target}}"
         },
         "ranger-solr-configuration": {
@@ -432,248 +432,248 @@
             "ranger_audit_logs_merge_factor": "5"
         },
         "ranger-ugsync-site": {
-            "ranger.usersync.ldap.binddn": "", 
-            "ranger.usersync.policymgr.username": "rangerusersync", 
-            "ranger.usersync.policymanager.mockrun": "false", 
-            "ranger.usersync.group.searchbase": "", 
-            "ranger.usersync.ldap.bindalias": "testldapalias", 
-            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks", 
-            "ranger.usersync.port": "5151", 
-            "ranger.usersync.pagedresultssize": "500", 
-            "ranger.usersync.group.memberattributename": "", 
-            "ranger.usersync.kerberos.principal": "", 
-            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder", 
-            "ranger.usersync.ldap.referral": "ignore", 
-            "ranger.usersync.group.searchfilter": "", 
-            "ranger.usersync.ldap.user.objectclass": "person", 
-            "ranger.usersync.logdir": "{{usersync_log_dir}}", 
-            "ranger.usersync.ldap.user.searchfilter": "", 
-            "ranger.usersync.ldap.groupname.caseconversion": "none", 
-            "ranger.usersync.ldap.ldapbindpassword": "", 
-            "ranger.usersync.unix.minUserId": "500", 
-            "ranger.usersync.policymanager.maxrecordsperapicall": "1000", 
-            "ranger.usersync.group.nameattribute": "", 
-            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password", 
-            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks", 
-            "ranger.usersync.user.searchenabled": "false", 
-            "ranger.usersync.group.usermapsyncenabled": "true", 
-            "ranger.usersync.ldap.bindkeystore": "", 
-            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof", 
-            "ranger.usersync.kerberos.keytab": "", 
-            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe", 
-            "ranger.usersync.group.objectclass": "", 
-            "ranger.usersync.ldap.user.searchscope": "sub", 
-            "ranger.usersync.unix.password.file": "/etc/passwd", 
-            "ranger.usersync.ldap.user.nameattribute": "", 
-            "ranger.usersync.pagedresultsenabled": "true", 
-            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}", 
-            "ranger.usersync.group.search.first.enabled": "false", 
-            "ranger.usersync.group.searchenabled": "false", 
-            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder", 
-            "ranger.usersync.ssl": "true", 
-            "ranger.usersync.ldap.url": "", 
-            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org", 
-            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
-            "ranger.usersync.ldap.user.searchbase": "", 
-            "ranger.usersync.ldap.username.caseconversion": "none", 
-            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
-            "ranger.usersync.keystore.password": "UnIx529p", 
-            "ranger.usersync.unix.group.file": "/etc/group", 
-            "ranger.usersync.filesource.file": "/tmp/usergroup.txt", 
-            "ranger.usersync.group.searchscope": "", 
-            "ranger.usersync.truststore.password": "changeit", 
-            "ranger.usersync.enabled": "true", 
-            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000", 
+            "ranger.usersync.ldap.binddn": "",
+            "ranger.usersync.policymgr.username": "rangerusersync",
+            "ranger.usersync.policymanager.mockrun": "false",
+            "ranger.usersync.group.searchbase": "",
+            "ranger.usersync.ldap.bindalias": "testldapalias",
+            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks",
+            "ranger.usersync.port": "5151",
+            "ranger.usersync.pagedresultssize": "500",
+            "ranger.usersync.group.memberattributename": "",
+            "ranger.usersync.kerberos.principal": "",
+            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder",
+            "ranger.usersync.ldap.referral": "ignore",
+            "ranger.usersync.group.searchfilter": "",
+            "ranger.usersync.ldap.user.objectclass": "person",
+            "ranger.usersync.logdir": "{{usersync_log_dir}}",
+            "ranger.usersync.ldap.user.searchfilter": "",
+            "ranger.usersync.ldap.groupname.caseconversion": "none",
+            "ranger.usersync.ldap.ldapbindpassword": "",
+            "ranger.usersync.unix.minUserId": "500",
+            "ranger.usersync.policymanager.maxrecordsperapicall": "1000",
+            "ranger.usersync.group.nameattribute": "",
+            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password",
+            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks",
+            "ranger.usersync.user.searchenabled": "false",
+            "ranger.usersync.group.usermapsyncenabled": "true",
+            "ranger.usersync.ldap.bindkeystore": "",
+            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof",
+            "ranger.usersync.kerberos.keytab": "",
+            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe",
+            "ranger.usersync.group.objectclass": "",
+            "ranger.usersync.ldap.user.searchscope": "sub",
+            "ranger.usersync.unix.password.file": "/etc/passwd",
+            "ranger.usersync.ldap.user.nameattribute": "",
+            "ranger.usersync.pagedresultsenabled": "true",
+            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}",
+            "ranger.usersync.group.search.first.enabled": "false",
+            "ranger.usersync.group.searchenabled": "false",
+            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder",
+            "ranger.usersync.ssl": "true",
+            "ranger.usersync.ldap.url": "",
+            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org",
+            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+            "ranger.usersync.ldap.user.searchbase": "",
+            "ranger.usersync.ldap.username.caseconversion": "none",
+            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+            "ranger.usersync.keystore.password": "UnIx529p",
+            "ranger.usersync.unix.group.file": "/etc/group",
+            "ranger.usersync.filesource.file": "/tmp/usergroup.txt",
+            "ranger.usersync.group.searchscope": "",
+            "ranger.usersync.truststore.password": "changeit",
+            "ranger.usersync.enabled": "true",
+            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000",
             "ranger.usersync.filesource.text.delimiter": ","
-        }, 
+        },
         "hdfs-site": {
-            "dfs.namenode.checkpoint.period": "21600", 
-            "dfs.namenode.avoid.write.stale.datanode": "true", 
-            "dfs.namenode.startup.delay.block.deletion.sec": "3600", 
-            "dfs.namenode.checkpoint.txns": "1000000", 
-            "dfs.content-summary.limit": "5000", 
-            "dfs.support.append": "true", 
-            "dfs.datanode.address": "0.0.0.0:50010", 
-            "dfs.cluster.administrators": " hdfs", 
-            "dfs.namenode.audit.log.async": "true", 
-            "dfs.datanode.balance.bandwidthPerSec": "6250000", 
-            "dfs.namenode.safemode.threshold-pct": "1", 
-            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", 
-            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020", 
-            "dfs.permissions.enabled": "true", 
-            "dfs.client.read.shortcircuit": "true", 
-            "dfs.https.port": "50470", 
-            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470", 
-            "nfs.file.dump.dir": "/tmp/.hdfs-nfs", 
-            "dfs.blocksize": "134217728", 
-            "dfs.blockreport.initialDelay": "120", 
-            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode", 
-            "dfs.namenode.fslock.fair": "false", 
-            "dfs.datanode.max.transfer.threads": "4096", 
-            "dfs.heartbeat.interval": "3", 
-            "dfs.replication": "3", 
-            "dfs.namenode.handler.count": "50", 
-            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary", 
-            "fs.permissions.umask-mode": "022", 
-            "dfs.namenode.stale.datanode.interval": "30000", 
-            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
-            "dfs.datanode.failed.volumes.tolerated": "0", 
-            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data", 
-            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070", 
-            "dfs.webhdfs.enabled": "true", 
-            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding", 
-            "dfs.namenode.accesstime.precision": "0", 
-            "dfs.datanode.https.address": "0.0.0.0:50475", 
-            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
-            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090", 
-            "nfs.exports.allowed.hosts": "* rw", 
-            "dfs.datanode.http.address": "0.0.0.0:50075", 
-            "dfs.datanode.du.reserved": "33011188224", 
-            "dfs.client.read.shortcircuit.streams.cache.size": "4096", 
-            "dfs.http.policy": "HTTP_ONLY", 
-            "dfs.block.access.token.enable": "true", 
-            "dfs.client.retry.policy.enabled": "false", 
-            "dfs.namenode.name.dir.restore": "true", 
-            "dfs.permissions.superusergroup": "hdfs", 
-            "dfs.journalnode.https-address": "0.0.0.0:8481", 
-            "dfs.journalnode.http-address": "0.0.0.0:8480", 
-            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
-            "dfs.namenode.avoid.read.stale.datanode": "true", 
-            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
-            "dfs.datanode.data.dir.perm": "750", 
-            "dfs.encryption.key.provider.uri": "", 
-            "dfs.replication.max": "50", 
+            "dfs.namenode.checkpoint.period": "21600",
+            "dfs.namenode.avoid.write.stale.datanode": "true",
+            "dfs.namenode.startup.delay.block.deletion.sec": "3600",
+            "dfs.namenode.checkpoint.txns": "1000000",
+            "dfs.content-summary.limit": "5000",
+            "dfs.support.append": "true",
+            "dfs.datanode.address": "0.0.0.0:50010",
+            "dfs.cluster.administrators": " hdfs",
+            "dfs.namenode.audit.log.async": "true",
+            "dfs.datanode.balance.bandwidthPerSec": "6250000",
+            "dfs.namenode.safemode.threshold-pct": "1",
+            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
+            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020",
+            "dfs.permissions.enabled": "true",
+            "dfs.client.read.shortcircuit": "true",
+            "dfs.https.port": "50470",
+            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470",
+            "nfs.file.dump.dir": "/tmp/.hdfs-nfs",
+            "dfs.blocksize": "134217728",
+            "dfs.blockreport.initialDelay": "120",
+            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode",
+            "dfs.namenode.fslock.fair": "false",
+            "dfs.datanode.max.transfer.threads": "4096",
+            "dfs.heartbeat.interval": "3",
+            "dfs.replication": "3",
+            "dfs.namenode.handler.count": "50",
+            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary",
+            "fs.permissions.umask-mode": "022",
+            "dfs.namenode.stale.datanode.interval": "30000",
+            "dfs.datanode.ipc.address": "0.0.0.0:8010",
+            "dfs.datanode.failed.volumes.tolerated": "0",
+            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data",
+            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070",
+            "dfs.webhdfs.enabled": "true",
+            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding",
+            "dfs.namenode.accesstime.precision": "0",
+            "dfs.datanode.https.address": "0.0.0.0:50475",
+            "dfs.namenode.write.stale.datanode.ratio": "1.0f",
+            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090",
+            "nfs.exports.allowed.hosts": "* rw",
+            "dfs.datanode.http.address": "0.0.0.0:50075",
+            "dfs.datanode.du.reserved": "33011188224",
+            "dfs.client.read.shortcircuit.streams.cache.size": "4096",
+            "dfs.http.policy": "HTTP_ONLY",
+            "dfs.block.access.token.enable": "true",
+            "dfs.client.retry.policy.enabled": "false",
+            "dfs.namenode.name.dir.restore": "true",
+            "dfs.permissions.superusergroup": "hdfs",
+            "dfs.journalnode.https-address": "0.0.0.0:8481",
+            "dfs.journalnode.http-address": "0.0.0.0:8480",
+            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
+            "dfs.namenode.avoid.read.stale.datanode": "true",
+            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
+            "dfs.datanode.data.dir.perm": "750",
+            "dfs.encryption.key.provider.uri": "",
+            "dfs.replication.max": "50",
             "dfs.namenode.name.dir": "/grid/0/hadoop/hdfs/namenode"
-        }, 
+        },
         "ranger-tagsync-site": {
             "ranger.tagsync.dest.ranger.ssl.config.filename": "{{stack_root}}/current/ranger-tagsync/conf/ranger-policymgr-ssl.xml",
             "ranger.tagsync.source.atlasrest.username": "",
-            "ranger.tagsync.logdir": "/var/log/ranger/tagsync", 
-            "ranger.tagsync.source.atlasrest.download.interval.millis": "", 
-            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks", 
-            "ranger.tagsync.source.file.check.interval.millis": "", 
-            "ranger.tagsync.source.atlasrest.endpoint": "", 
-            "ranger.tagsync.dest.ranger.username": "rangertagsync", 
-            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}", 
-            "ranger.tagsync.kerberos.principal": "", 
-            "ranger.tagsync.kerberos.keytab": "", 
-            "ranger.tagsync.source.atlas": "false", 
-            "ranger.tagsync.source.atlasrest": "false", 
-            "ranger.tagsync.source.file": "false", 
+            "ranger.tagsync.logdir": "/var/log/ranger/tagsync",
+            "ranger.tagsync.source.atlasrest.download.interval.millis": "",
+            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks",
+            "ranger.tagsync.source.file.check.interval.millis": "",
+            "ranger.tagsync.source.atlasrest.endpoint": "",
+            "ranger.tagsync.dest.ranger.username": "rangertagsync",
+            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}",
+            "ranger.tagsync.kerberos.principal": "",
+            "ranger.tagsync.kerberos.keytab": "",
+            "ranger.tagsync.source.atlas": "false",
+            "ranger.tagsync.source.atlasrest": "false",
+            "ranger.tagsync.source.file": "false",
             "ranger.tagsync.source.file.filename": ""
-        }, 
+        },
         "zoo.cfg": {
-            "clientPort": "2181", 
-            "autopurge.purgeInterval": "24", 
-            "syncLimit": "5", 
-            "dataDir": "/grid/0/hadoop/zookeeper", 
-            "initLimit": "10", 
-            "tickTime": "2000", 
+            "clientPort": "2181",
+            "autopurge.purgeInterval": "24",
+            "syncLimit": "5",
+            "dataDir": "/grid/0/hadoop/zookeeper",
+            "initLimit": "10",
+            "tickTime": "2000",
             "autopurge.snapRetainCount": "30"
-        }, 
+        },
         "hadoop-policy": {
-            "security.job.client.protocol.acl": "*", 
-            "security.job.task.protocol.acl": "*", 
-            "security.datanode.protocol.acl": "*", 
-            "security.namenode.protocol.acl": "*", 
-            "security.client.datanode.protocol.acl": "*", 
-            "security.inter.tracker.protocol.acl": "*", 
-            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop", 
-            "security.client.protocol.acl": "*", 
-            "security.refresh.policy.protocol.acl": "hadoop", 
-            "security.admin.operations.protocol.acl": "hadoop", 
+            "security.job.client.protocol.acl": "*",
+            "security.job.task.protocol.acl": "*",
+            "security.datanode.protocol.acl": "*",
+            "security.namenode.protocol.acl": "*",
+            "security.client.datanode.protocol.acl": "*",
+            "security.inter.tracker.protocol.acl": "*",
+            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
+            "security.client.protocol.acl": "*",
+            "security.refresh.policy.protocol.acl": "hadoop",
+            "security.admin.operations.protocol.acl": "hadoop",
             "security.inter.datanode.protocol.acl": "*"
-        }, 
+        },
         "hdfs-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.lo
 gger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender
 .console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logg
 er}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=
 false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# NameNode metrics logging.\n# The default is to retain two namenode-metrics.log files up to 64MB each.\n#\nnamenode.metrics.logger=INFO,NullAppender\nlog4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}\nlog4j.additivity.NameNodeMetricsLog=false\nlog4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log\nlog4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n\nlog4j.appender.NNMETRICSRFA.MaxBackupIndex=1\nlog4j.appender.NNMETRICSRFA.MaxFileSize=64MB\n\n#\n# mapred audit logging\n#\nmapred
 .audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging leve
 ls\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateCh
 ange=WARN"
-        }, 
+        },
         "ranger-hdfs-plugin-properties": {
-            "hadoop.rpc.protection": "authentication", 
-            "ranger-hdfs-plugin-enabled": "No", 
-            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
-            "policy_user": "ambari-qa", 
-            "common.name.for.certificate": "", 
+            "hadoop.rpc.protection": "authentication",
+            "ranger-hdfs-plugin-enabled": "No",
+            "REPOSITORY_CONFIG_USERNAME": "hadoop",
+            "policy_user": "ambari-qa",
+            "common.name.for.certificate": "",
             "REPOSITORY_CONFIG_PASSWORD": "hadoop"
-        }, 
+        },
         "core-site": {
-            "hadoop.proxyuser.root.hosts": "c6401.ambari.apache.org", 
-            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", 
-            "fs.trash.interval": "360", 
-            "ha.failover-controller.active-standby-elector.zk.op.retries": "120", 
-            "hadoop.http.authentication.simple.anonymous.allowed": "true", 
-            "hadoop.security.authentication": "simple", 
-            "hadoop.proxyuser.root.groups": "*", 
-            "ipc.client.connection.maxidletime": "30000", 
-            "hadoop.security.key.provider.path": "", 
-            "mapreduce.jobtracker.webinterface.trusted": "false", 
-            "hadoop.security.authorization": "false", 
-            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py", 
-            "ipc.server.tcpnodelay": "true", 
-            "ipc.client.connect.max.retries": "50", 
-            "hadoop.security.auth_to_local": "DEFAULT", 
-            "io.file.buffer.size": "131072", 
-            "hadoop.proxyuser.hdfs.hosts": "*", 
-            "hadoop.proxyuser.hdfs.groups": "*", 
-            "ipc.client.idlethreshold": "8000", 
-            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020", 
+            "hadoop.proxyuser.root.hosts": "c6401.ambari.apache.org",
+            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
+            "fs.trash.interval": "360",
+            "ha.failover-controller.active-standby-elector.zk.op.retries": "120",
+            "hadoop.http.authentication.simple.anonymous.allowed": "true",
+            "hadoop.security.authentication": "simple",
+            "hadoop.proxyuser.root.groups": "*",
+            "ipc.client.connection.maxidletime": "30000",
+            "hadoop.security.key.provider.path": "",
+            "mapreduce.jobtracker.webinterface.trusted": "false",
+            "hadoop.security.authorization": "false",
+            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py",
+            "ipc.server.tcpnodelay": "true",
+            "ipc.client.connect.max.retries": "50",
+            "hadoop.security.auth_to_local": "DEFAULT",
+            "io.file.buffer.size": "131072",
+            "hadoop.proxyuser.hdfs.hosts": "*",
+            "hadoop.proxyuser.hdfs.groups": "*",
+            "ipc.client.idlethreshold": "8000",
+            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020",
             "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec"
-        }, 
+        },
         "hadoop-env": {
-            "keyserver_port": "", 
-            "proxyuser_group": "users", 
-            "hdfs_user_nproc_limit": "65536", 
-            "hdfs_log_dir_prefix": "/var/log/hadoop", 
-            "hdfs_user_nofile_limit": "128000", 
-            "hdfs_user": "hdfs", 
-            "keyserver_host": " ", 
-            "namenode_opt_maxnewsize": "128m", 
-            "namenode_opt_maxpermsize": "256m", 
-            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
 {{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
 node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
 NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
 N_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
 ing priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n  for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n  do\n    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n  done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
 ECURE_DN_USER\" ]; then\n  ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}", 
-            "namenode_heapsize": "1024m", 
-            "namenode_opt_newsize": "128m", 
-            "nfsgateway_heapsize": "1024", 
-            "dtnode_heapsize": "1024m", 
-            "hadoop_root_logger": "INFO,RFA", 
-            "hadoop_heapsize": "1024", 
-            "hadoop_pid_dir_prefix": "/var/run/hadoop", 
-            "namenode_opt_permsize": "128m", 
+            "keyserver_port": "",
+            "proxyuser_group": "users",
+            "hdfs_user_nproc_limit": "65536",
+            "hdfs_log_dir_prefix": "/var/log/hadoop",
+            "hdfs_user_nofile_limit": "128000",
+            "hdfs_user": "hdfs",
+            "keyserver_host": " ",
+            "namenode_opt_maxnewsize": "128m",
+            "namenode_opt_maxpermsize": "256m",
+            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
 {{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
 node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
 NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
 N_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
 ing priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n  for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n  do\n    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n  done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
 ECURE_DN_USER\" ]; then\n  ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}",
+            "namenode_heapsize": "1024m",
+            "namenode_opt_newsize": "128m",
+            "nfsgateway_heapsize": "1024",
+            "dtnode_heapsize": "1024m",
+            "hadoop_root_logger": "INFO,RFA",
+            "hadoop_heapsize": "1024",
+            "hadoop_pid_dir_prefix": "/var/run/hadoop",
+            "namenode_opt_permsize": "128m",
             "hdfs_tmp_dir": "/tmp"
-        }, 
+        },
         "zookeeper-log4j": {
             "content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: console appender only\nlog4j.rootLogger=INFO, CONSOLE\n\n# Example with rolling log file\n#log4
 j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE\n\n# Example with rolling log file and tracing\n#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE\n\n#\n# Log INFO level and above messages to the console\n#\nlog4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\nlog4j.appender.CONSOLE.Threshold=INFO\nlog4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n#\n# Add ROLLINGFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender\nlog4j.appender.ROLLINGFILE.Threshold=DEBUG\nlog4j.appender.ROLLINGFILE.File=zookeeper.log\n\n# Max log file size of 10MB\nlog4j.appender.ROLLINGFILE.MaxFileSize=10MB\n# uncomment the next line to limit number of backup files\n#log4j.appender.ROLLINGFILE.MaxBackupIndex=10\n\nlog4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.ROLLIN
 GFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n\n#\n# Add TRACEFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.TRACEFILE=org.apache.log4j.FileAppender\nlog4j.appender.TRACEFILE.Threshold=TRACE\nlog4j.appender.TRACEFILE.File=zookeeper_trace.log\n\nlog4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout\n### Notice we are including log4j's NDC here (%x)\nlog4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n"
-        }, 
+        },
         "ssl-server": {
-            "ssl.server.keystore.location": "/etc/security/serverKeys/keystore.jks", 
-            "ssl.server.keystore.keypassword": "bigdata", 
-            "ssl.server.truststore.location": "/etc/security/serverKeys/all.jks", 
-            "ssl.server.keystore.password": "bigdata", 
-            "ssl.server.truststore.password": "bigdata", 
-            "ssl.server.truststore.type": "jks", 
-            "ssl.server.keystore.type": "jks", 
+            "ssl.server.keystore.location": "/etc/security/serverKeys/keystore.jks",
+            "ssl.server.keystore.keypassword": "bigdata",
+            "ssl.server.truststore.location": "/etc/security/serverKeys/all.jks",
+            "ssl.server.keystore.password": "bigdata",
+            "ssl.server.truststore.password": "bigdata",
+            "ssl.server.truststore.type": "jks",
+            "ssl.server.keystore.type": "jks",
             "ssl.server.truststore.reload.interval": "10000"
-        }, 
-        "ranger-site": {}, 
+        },
+        "ranger-site": {},
         "admin-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nlog4j.rootLogger = warn,xa_log_appender\n\n\n# xa_logger\nlog4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.xa_log_app
 ender.file=${logdir}/xa_portal.log\nlog4j.appender.xa_log_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.xa_log_appender.append=true\nlog4j.appender.xa_log_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.xa_log_appender.layout.ConversionPattern=%d [%t] %-5p %C{6} (%F:%L) - %m%n\n# xa_log_appender : category and additivity\nlog4j.category.org.springframework=warn,xa_log_appender\nlog4j.additivity.org.springframework=false\n\nlog4j.category.org.apache.ranger=info,xa_log_appender\nlog4j.additivity.org.apache.ranger=false\n\nlog4j.category.xa=info,xa_log_appender\nlog4j.additivity.xa=false\n\n# perf_logger\nlog4j.appender.perf_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.perf_appender.file=${logdir}/ranger_admin_perf.log\nlog4j.appender.perf_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.perf_appender.append=true\nlog4j.appender.perf_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.perf_appender.layout.ConversionPattern=%d [%t] 
 %m%n\n\n\n# sql_appender\nlog4j.appender.sql_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.sql_appender.file=${logdir}/xa_portal_sql.log\nlog4j.appender.sql_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.sql_appender.append=true\nlog4j.appender.sql_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.sql_appender.layout.ConversionPattern=%d [%t] %-5p %C{6} (%F:%L) - %m%n\n\n# sql_appender : category and additivity\nlog4j.category.org.hibernate.SQL=warn,sql_appender\nlog4j.additivity.org.hibernate.SQL=false\n\nlog4j.category.jdbc.sqlonly=fatal,sql_appender\nlog4j.additivity.jdbc.sqlonly=false\n\nlog4j.category.jdbc.sqltiming=warn,sql_appender\nlog4j.additivity.jdbc.sqltiming=false\n\nlog4j.category.jdbc.audit=fatal,sql_appender\nlog4j.additivity.jdbc.audit=false\n\nlog4j.category.jdbc.resultset=fatal,sql_appender\nlog4j.additivity.jdbc.resultset=false\n\nlog4j.category.jdbc.connection=fatal,sql_appender\nlog4j.additivity.jdbc.connection=false"
-        }, 
+        },
         "tagsync-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/tagsync
 .log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n"
-        }, 
+        },
         "ranger-hdfs-security": {
-            "ranger.plugin.hdfs.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient", 
-            "ranger.plugin.hdfs.service.name": "{{repo_name}}", 
-            "ranger.plugin.hdfs.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache", 
-            "ranger.plugin.hdfs.policy.pollIntervalMs": "30000", 
-            "ranger.plugin.hdfs.policy.rest.url": "{{policymgr_mgr_url}}", 
-            "ranger.plugin.hdfs.policy.rest.ssl.config.file": "/etc/hadoop/conf/ranger-policymgr-ssl.xml", 
+            "ranger.plugin.hdfs.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient",
+            "ranger.plugin.hdfs.service.name": "{{repo_name}}",
+            "ranger.plugin.hdfs.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache",
+            "ranger.plugin.hdfs.policy.pollIntervalMs": "30000",
+            "ranger.plugin.hdfs.policy.rest.url": "{{policymgr_mgr_url}}",
+            "ranger.plugin.hdfs.policy.rest.ssl.config.file": "/etc/hadoop/conf/ranger-policymgr-ssl.xml",
             "xasecure.add-hadoop-authorization": "true"
-        }, 
-        "usersync-properties": {}, 
+        },
+        "usersync-properties": {},
         "zookeeper-env": {
-            "zk_log_dir": "/var/log/zookeeper", 
-            "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}", 
-            "zk_server_heapsize": "1024m", 
-            "zk_pid_dir": "/var/run/zookeeper", 
+            "zk_log_dir": "/var/log/zookeeper",
+            "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}",
+            "zk_server_heapsize": "1024m",
+            "zk_pid_dir": "/var/run/zookeeper",
             "zk_user": "zookeeper"
         },
         "infra-solr-env": {
@@ -682,7 +682,7 @@
             "infra_solr_kerberos_name_rules": "DEFAULT",
             "infra_solr_user": "infra-solr",
             "infra_solr_maxmem": "1024",
-            "content": "#!/bin/bash\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# By default the script will use JAVA_HOME to determine which java\n# to use, but you can set a specific path for Solr to use without\n# affecting other Java applica
 tions on your server/workstation.\nSOLR_JAVA_HOME

<TRUNCATED>

[02/31] ambari git commit: AMBARI-21430 - Allow Multiple Versions of Stack Tools to Co-Exist (jonathanhurley)

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-secured.json b/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-secured.json
index fa791c1..64e7d52 100644
--- a/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-secured.json
+++ b/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-secured.json
@@ -1,101 +1,101 @@
 {
     "localComponents": [
-        "SECONDARY_NAMENODE", 
-        "HDFS_CLIENT", 
-        "DATANODE", 
-        "NAMENODE", 
-        "RANGER_ADMIN", 
-        "RANGER_TAGSYNC", 
-        "RANGER_USERSYNC", 
-        "ZOOKEEPER_SERVER", 
-        "ZOOKEEPER_CLIENT", 
+        "SECONDARY_NAMENODE",
+        "HDFS_CLIENT",
+        "DATANODE",
+        "NAMENODE",
+        "RANGER_ADMIN",
+        "RANGER_TAGSYNC",
+        "RANGER_USERSYNC",
+        "ZOOKEEPER_SERVER",
+        "ZOOKEEPER_CLIENT",
         "KERBEROS_CLIENT",
         "LOGSEARCH_SOLR",
         "LOGSEARCH_SOLR_CLIENT"
-    ], 
+    ],
     "configuration_attributes": {
-        "ranger-hdfs-audit": {}, 
-        "ssl-client": {}, 
-        "ranger-admin-site": {}, 
-        "ranger-hdfs-policymgr-ssl": {}, 
-        "tagsync-application-properties": {}, 
-        "ranger-env": {}, 
-        "usersync-log4j": {}, 
-        "ranger-hdfs-plugin-properties": {}, 
-        "kerberos-env": {}, 
-        "admin-properties": {}, 
-        "ranger-ugsync-site": {}, 
+        "ranger-hdfs-audit": {},
+        "ssl-client": {},
+        "ranger-admin-site": {},
+        "ranger-hdfs-policymgr-ssl": {},
+        "tagsync-application-properties": {},
+        "ranger-env": {},
+        "usersync-log4j": {},
+        "ranger-hdfs-plugin-properties": {},
+        "kerberos-env": {},
+        "admin-properties": {},
+        "ranger-ugsync-site": {},
         "hdfs-site": {
             "final": {
-                "dfs.datanode.data.dir": "true", 
-                "dfs.namenode.http-address": "true", 
-                "dfs.datanode.failed.volumes.tolerated": "true", 
-                "dfs.support.append": "true", 
-                "dfs.namenode.name.dir": "true", 
+                "dfs.datanode.data.dir": "true",
+                "dfs.namenode.http-address": "true",
+                "dfs.datanode.failed.volumes.tolerated": "true",
+                "dfs.support.append": "true",
+                "dfs.namenode.name.dir": "true",
                 "dfs.webhdfs.enabled": "true"
             }
-        }, 
+        },
         "ranger-tagsync-site": {},
-        "ranger-tagsync-policymgr-ssl": {}, 
+        "ranger-tagsync-policymgr-ssl": {},
         "zoo.cfg": {},
         "hadoop-policy": {},
-        "hdfs-log4j": {}, 
-        "krb5-conf": {}, 
+        "hdfs-log4j": {},
+        "krb5-conf": {},
         "core-site": {
             "final": {
                 "fs.defaultFS": "true"
             }
-        }, 
-        "hadoop-env": {}, 
-        "zookeeper-log4j": {}, 
-        "ssl-server": {}, 
-        "ranger-site": {}, 
-        "admin-log4j": {}, 
-        "tagsync-log4j": {}, 
-        "ranger-hdfs-security": {}, 
+        },
+        "hadoop-env": {},
+        "zookeeper-log4j": {},
+        "ssl-server": {},
+        "ranger-site": {},
+        "admin-log4j": {},
+        "tagsync-log4j": {},
+        "ranger-hdfs-security": {},
         "ranger-solr-configuration": {},
         "usersync-properties": {},
         "zookeeper-env": {},
         "infra-solr-env": {},
         "infra-solr-client-log4j": {},
         "cluster-env": {}
-    }, 
-    "public_hostname": "c6401.ambari.apache.org", 
-    "commandId": "41-2", 
-    "hostname": "c6401.ambari.apache.org", 
-    "kerberosCommandParams": [], 
-    "serviceName": "RANGER", 
-    "role": "RANGER_ADMIN", 
-    "forceRefreshConfigTagsBeforeExecution": [], 
-    "requestId": 41, 
+    },
+    "public_hostname": "c6401.ambari.apache.org",
+    "commandId": "41-2",
+    "hostname": "c6401.ambari.apache.org",
+    "kerberosCommandParams": [],
+    "serviceName": "RANGER",
+    "role": "RANGER_ADMIN",
+    "forceRefreshConfigTagsBeforeExecution": [],
+    "requestId": 41,
     "agentConfigParams": {
         "agent": {
             "parallel_execution": 0
         }
-    }, 
-    "clusterName": "test_Cluster01", 
-    "commandType": "EXECUTION_COMMAND", 
-    "taskId": 186, 
-    "roleParams": {}, 
+    },
+    "clusterName": "test_Cluster01",
+    "commandType": "EXECUTION_COMMAND",
+    "taskId": 186,
+    "roleParams": {},
     "configurationTags": {
         "ranger-hdfs-audit": {
             "tag": "version1466705299922"
-        }, 
+        },
         "ssl-client": {
             "tag": "version1"
-        }, 
+        },
         "ranger-admin-site": {
             "tag": "version1467016680635"
-        }, 
+        },
         "ranger-hdfs-policymgr-ssl": {
             "tag": "version1466705299922"
-        }, 
+        },
         "tagsync-application-properties": {
             "tag": "version1467016680511"
-        }, 
+        },
         "ranger-env": {
             "tag": "version1466705299949"
-        }, 
+        },
         "ranger-ugsync-site": {
             "tag": "version1467016680537"
         },
@@ -104,52 +104,52 @@
         },
         "ranger-hdfs-plugin-properties": {
             "tag": "version1466705299922"
-        }, 
+        },
         "kerberos-env": {
             "tag": "version1467016537243"
-        }, 
+        },
         "admin-properties": {
             "tag": "version1466705299949"
-        }, 
+        },
         "hdfs-site": {
             "tag": "version1467016680401"
-        }, 
+        },
         "ranger-tagsync-site": {
             "tag": "version1467016680586"
-        }, 
+        },
         "zoo.cfg": {
             "tag": "version1"
-        }, 
+        },
         "hadoop-policy": {
             "tag": "version1"
-        }, 
+        },
         "hdfs-log4j": {
             "tag": "version1"
-        }, 
+        },
         "usersync-log4j": {
             "tag": "version1466705299949"
-        }, 
+        },
         "krb5-conf": {
             "tag": "version1467016537243"
-        }, 
+        },
         "core-site": {
             "tag": "version1467016680612"
-        }, 
+        },
         "hadoop-env": {
             "tag": "version1467016680446"
-        }, 
+        },
         "zookeeper-log4j": {
             "tag": "version1"
-        }, 
+        },
         "ssl-server": {
             "tag": "version1"
-        }, 
+        },
         "ranger-site": {
             "tag": "version1466705299949"
-        }, 
+        },
         "admin-log4j": {
             "tag": "version1466705299949"
-        }, 
+        },
         "tagsync-log4j": {
             "tag": "version1466705299949"
         },
@@ -158,10 +158,10 @@
         },
         "ranger-hdfs-security": {
             "tag": "version1466705299922"
-        }, 
+        },
         "usersync-properties": {
             "tag": "version1466705299949"
-        }, 
+        },
         "zookeeper-env": {
             "tag": "version1467016680492"
         },
@@ -174,116 +174,116 @@
         "cluster-env": {
             "tag": "version1467016680567"
         }
-    }, 
-    "roleCommand": "START", 
+    },
+    "roleCommand": "START",
     "hostLevelParams": {
-        "agent_stack_retry_on_unavailability": "false", 
-        "stack_name": "HDP", 
+        "agent_stack_retry_on_unavailability": "false",
+        "stack_name": "HDP",
         "package_version": "2_6_0_0_*",
         "custom_mysql_jdbc_name": "mysql-connector-java.jar",
         "previous_custom_mysql_jdbc_name": "mysql-connector-java-old.jar",
-        "host_sys_prepped": "false", 
-        "ambari_db_rca_username": "mapred", 
+        "host_sys_prepped": "false",
+        "ambari_db_rca_username": "mapred",
         "current_version": "2.6.0.0-801",
         "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar",
         "agent_stack_retry_count": "5",
         "stack_version": "2.6",
-        "jdk_name": "jdk-8u60-linux-x64.tar.gz", 
-        "ambari_db_rca_driver": "org.postgresql.Driver", 
+        "jdk_name": "jdk-8u60-linux-x64.tar.gz",
+        "ambari_db_rca_driver": "org.postgresql.Driver",
         "java_home": "/usr/jdk64/jdk1.7.0_45",
         "repository_version_id": "1",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
-        "not_managed_hdfs_path_list": "[\"/tmp\"]", 
-        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
-        "java_version": "8", 
+        "not_managed_hdfs_path_list": "[\"/tmp\"]",
+        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
+        "java_version": "8",
         "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.6.0.0-801\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.6\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.6.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.6.0.0-801\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]",
         "package_list": "[{\"name\":\"ranger_${stack_version}-admin\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-usersync\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-tagsync\",\"condition\":\"should_install_ranger_tagsync\",\"skipUpgrade\":false},{\"name\":\"ambari-logsearch-solr-client\",\"condition\":\"should_install_logsearch_solr_client\",\"skipUpgrade\":false}]",
         "db_name": "ambari",
-        "group_list": "[\"ranger\",\"hadoop\",\"users\"]", 
-        "agentCacheDir": "/var/lib/ambari-agent/cache", 
-        "ambari_db_rca_password": "mapred", 
-        "jce_name": "jce_policy-8.zip", 
-        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar", 
-        "db_driver_filename": "mysql-connector-java.jar", 
-        "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]", 
+        "group_list": "[\"ranger\",\"hadoop\",\"users\"]",
+        "agentCacheDir": "/var/lib/ambari-agent/cache",
+        "ambari_db_rca_password": "mapred",
+        "jce_name": "jce_policy-8.zip",
+        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
+        "db_driver_filename": "mysql-connector-java.jar",
+        "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]",
         "clientsToUpdateConfigs": "[\"*\"]"
-    }, 
+    },
     "commandParams": {
-        "service_package_folder": "common-services/RANGER/0.4.0/package", 
-        "script": "scripts/ranger_admin.py", 
-        "hooks_folder": "HDP/2.0.6/hooks", 
+        "service_package_folder": "common-services/RANGER/0.4.0/package",
+        "script": "scripts/ranger_admin.py",
+        "hooks_folder": "HDP/2.0.6/hooks",
         "version": "2.6.0.0-801",
         "max_duration_for_retries": "0",
         "command_retry_enabled": "false",
-        "command_timeout": "600", 
+        "command_timeout": "600",
         "script_type": "PYTHON"
-    }, 
-    "forceRefreshConfigTags": [], 
-    "stageId": 2, 
+    },
+    "forceRefreshConfigTags": [],
+    "stageId": 2,
     "clusterHostInfo": {
         "snamenode_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ambari_server_use_ssl": [
             "false"
-        ], 
+        ],
         "all_ping_ports": [
             "8670"
-        ], 
+        ],
         "ranger_tagsync_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ranger_usersync_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "all_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "slave_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "namenode_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ambari_server_port": [
             "8080"
-        ], 
+        ],
         "ranger_admin_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "all_racks": [
             "/default-rack"
-        ], 
+        ],
         "all_ipv4_ips": [
             "172.22.83.73"
-        ], 
+        ],
         "ambari_server_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "zookeeper_hosts": [
             "c6401.ambari.apache.org"
         ],
         "infra_solr_hosts": [
             "c6401.ambari.apache.org"
         ]
-    }, 
+    },
     "configurations": {
         "ranger-hdfs-audit": {
-            "xasecure.audit.destination.solr.zookeepers": "NONE", 
-            "xasecure.audit.destination.solr.urls": "", 
-            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool", 
+            "xasecure.audit.destination.solr.zookeepers": "NONE",
+            "xasecure.audit.destination.solr.urls": "",
+            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool",
             "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool",
-            "xasecure.audit.destination.hdfs": "true", 
+            "xasecure.audit.destination.hdfs": "true",
             "xasecure.audit.destination.solr": "false",
-            "xasecure.audit.provider.summary.enabled": "false", 
-            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit", 
+            "xasecure.audit.provider.summary.enabled": "false",
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
             "xasecure.audit.is.enabled": "true"
         },
         "ranger-tagsync-policymgr-ssl": {
-            "xasecure.policymgr.clientssl.keystore": "/etc/security/serverKeys/ranger-tagsync-keystore.jks", 
-            "xasecure.policymgr.clientssl.truststore.password": "changeit", 
+            "xasecure.policymgr.clientssl.keystore": "/etc/security/serverKeys/ranger-tagsync-keystore.jks",
+            "xasecure.policymgr.clientssl.truststore.password": "changeit",
             "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{ranger_tagsync_credential_file}}",
-            "xasecure.policymgr.clientssl.truststore": "/etc/security/serverKeys/ranger-tagsync-mytruststore.jks", 
+            "xasecure.policymgr.clientssl.truststore": "/etc/security/serverKeys/ranger-tagsync-mytruststore.jks",
             "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{ranger_tagsync_credential_file}}",
             "xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
         },
@@ -296,186 +296,186 @@
             "xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
         },
         "ssl-client": {
-            "ssl.client.truststore.reload.interval": "10000", 
-            "ssl.client.keystore.password": "bigdata", 
-            "ssl.client.truststore.type": "jks", 
-            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks", 
-            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks", 
-            "ssl.client.truststore.password": "bigdata", 
+            "ssl.client.truststore.reload.interval": "10000",
+            "ssl.client.keystore.password": "bigdata",
+            "ssl.client.truststore.type": "jks",
+            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks",
+            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks",
+            "ssl.client.truststore.password": "bigdata",
             "ssl.client.keystore.type": "jks"
-        }, 
+        },
         "ranger-admin-site": {
             "ranger.is.solr.kerberised": "true",
-            "ranger.admin.kerberos.cookie.domain": "{{ranger_host}}", 
-            "ranger.kms.service.user.hdfs": "hdfs", 
-            "ranger.spnego.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}", 
-            "ranger.plugins.hive.serviceuser": "hive", 
-            "ranger.lookup.kerberos.keytab": "/etc/security/keytabs/rangerlookup.service.keytab", 
-            "ranger.plugins.kms.serviceuser": "kms", 
-            "ranger.service.https.attrib.ssl.enabled": "false", 
-            "ranger.sso.browser.useragent": "Mozilla,chrome", 
-            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01", 
-            "ranger.plugins.hbase.serviceuser": "hbase", 
-            "ranger.plugins.hdfs.serviceuser": "hdfs", 
-            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}", 
-            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net", 
-            "ranger.plugins.knox.serviceuser": "knox", 
-            "ranger.ldap.base.dn": "dc=example,dc=com", 
-            "ranger.sso.publicKey": "", 
-            "ranger.admin.kerberos.cookie.path": "/", 
-            "ranger.service.https.attrib.clientAuth": "want", 
-            "ranger.jpa.jdbc.user": "{{ranger_db_user}}", 
-            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})", 
-            "ranger.ldap.group.roleattribute": "cn", 
-            "ranger.plugins.kafka.serviceuser": "kafka", 
-            "ranger.admin.kerberos.principal": "rangeradmin/_HOST@EXAMPLE.COM", 
-            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
+            "ranger.admin.kerberos.cookie.domain": "{{ranger_host}}",
+            "ranger.kms.service.user.hdfs": "hdfs",
+            "ranger.spnego.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
+            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}",
+            "ranger.plugins.hive.serviceuser": "hive",
+            "ranger.lookup.kerberos.keytab": "/etc/security/keytabs/rangerlookup.service.keytab",
+            "ranger.plugins.kms.serviceuser": "kms",
+            "ranger.service.https.attrib.ssl.enabled": "false",
+            "ranger.sso.browser.useragent": "Mozilla,chrome",
+            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01",
+            "ranger.plugins.hbase.serviceuser": "hbase",
+            "ranger.plugins.hdfs.serviceuser": "hdfs",
+            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}",
+            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net",
+            "ranger.plugins.knox.serviceuser": "knox",
+            "ranger.ldap.base.dn": "dc=example,dc=com",
+            "ranger.sso.publicKey": "",
+            "ranger.admin.kerberos.cookie.path": "/",
+            "ranger.service.https.attrib.clientAuth": "want",
+            "ranger.jpa.jdbc.user": "{{ranger_db_user}}",
+            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})",
+            "ranger.ldap.group.roleattribute": "cn",
+            "ranger.plugins.kafka.serviceuser": "kafka",
+            "ranger.admin.kerberos.principal": "rangeradmin/_HOST@EXAMPLE.COM",
+            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
             "ranger.credential.provider.path": "/etc/ranger/admin/rangeradmin.jceks",
-            "ranger.ldap.referral": "ignore", 
-            "ranger.service.http.port": "6080", 
-            "ranger.ldap.user.searchfilter": "(uid={0})", 
-            "ranger.plugins.atlas.serviceuser": "atlas", 
-            "ranger.truststore.password": "changeit", 
-            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
-            "ranger.audit.solr.password": "NONE", 
+            "ranger.ldap.referral": "ignore",
+            "ranger.service.http.port": "6080",
+            "ranger.ldap.user.searchfilter": "(uid={0})",
+            "ranger.plugins.atlas.serviceuser": "atlas",
+            "ranger.truststore.password": "changeit",
+            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+            "ranger.audit.solr.password": "NONE",
             "ranger.audit.solr.zookeepers": "c6401.ambari.apache.org:2181/infra-solr",
             "ranger.lookup.kerberos.principal": "rangerlookup/_HOST@EXAMPLE.COM",
-            "ranger.service.https.port": "6182", 
-            "ranger.plugins.storm.serviceuser": "storm", 
-            "ranger.externalurl": "{{ranger_external_url}}", 
-            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
-            "ranger.kms.service.user.hive": "", 
-            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
-            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}", 
-            "ranger.service.host": "{{ranger_host}}", 
-            "ranger.service.https.attrib.keystore.keyalias": "rangeradmin", 
-            "ranger.service.https.attrib.keystore.pass": "xasecure", 
-            "ranger.unixauth.remote.login.enabled": "true", 
+            "ranger.service.https.port": "6182",
+            "ranger.plugins.storm.serviceuser": "storm",
+            "ranger.externalurl": "{{ranger_external_url}}",
+            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+            "ranger.kms.service.user.hive": "",
+            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}",
+            "ranger.service.host": "{{ranger_host}}",
+            "ranger.service.https.attrib.keystore.keyalias": "rangeradmin",
+            "ranger.service.https.attrib.keystore.pass": "xasecure",
+            "ranger.unixauth.remote.login.enabled": "true",
             "ranger.jpa.jdbc.credential.alias": "rangeradmin",
-            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
-            "ranger.audit.solr.username": "ranger_solr", 
-            "ranger.sso.enabled": "false", 
-            "ranger.audit.solr.urls": "", 
-            "ranger.ldap.ad.domain": "", 
-            "ranger.plugins.yarn.serviceuser": "yarn", 
-            "ranger.audit.source.type": "solr", 
-            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
-            "ranger.ldap.url": "{{ranger_ug_ldap_url}}", 
-            "ranger.authentication.method": "UNIX", 
-            "ranger.service.http.enabled": "true", 
-            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}", 
-            "ranger.ldap.ad.referral": "ignore", 
-            "ranger.ldap.ad.base.dn": "dc=example,dc=com", 
-            "ranger.jpa.jdbc.password": "_", 
-            "ranger.spnego.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
-            "ranger.sso.providerurl": "", 
-            "ranger.unixauth.service.hostname": "{{ugsync_host}}", 
-            "ranger.admin.kerberos.keytab": "/etc/security/keytabs/rangeradmin.service.keytab", 
-            "ranger.admin.kerberos.token.valid.seconds": "30", 
-            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver", 
+            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+            "ranger.audit.solr.username": "ranger_solr",
+            "ranger.sso.enabled": "false",
+            "ranger.audit.solr.urls": "",
+            "ranger.ldap.ad.domain": "",
+            "ranger.plugins.yarn.serviceuser": "yarn",
+            "ranger.audit.source.type": "solr",
+            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
+            "ranger.ldap.url": "{{ranger_ug_ldap_url}}",
+            "ranger.authentication.method": "UNIX",
+            "ranger.service.http.enabled": "true",
+            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}",
+            "ranger.ldap.ad.referral": "ignore",
+            "ranger.ldap.ad.base.dn": "dc=example,dc=com",
+            "ranger.jpa.jdbc.password": "_",
+            "ranger.spnego.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+            "ranger.sso.providerurl": "",
+            "ranger.unixauth.service.hostname": "{{ugsync_host}}",
+            "ranger.admin.kerberos.keytab": "/etc/security/keytabs/rangeradmin.service.keytab",
+            "ranger.admin.kerberos.token.valid.seconds": "30",
+            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver",
             "ranger.unixauth.service.port": "5151"
-        }, 
+        },
         "ranger-hdfs-policymgr-ssl": {
-            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks", 
-            "xasecure.policymgr.clientssl.truststore.password": "changeit", 
-            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}", 
-            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks", 
-            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}", 
+            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks",
+            "xasecure.policymgr.clientssl.truststore.password": "changeit",
+            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}",
+            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks",
+            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}",
             "xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
-        }, 
+        },
         "tagsync-application-properties": {
-            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181", 
-            "atlas.kafka.security.protocol": "SASL_PLAINTEXT", 
-            "atlas.jaas.KafkaClient.option.principal": "{{tagsync_jaas_principal}}", 
-            "atlas.jaas.KafkaClient.option.keyTab": "{{tagsync_keytab_path}}", 
-            "atlas.kafka.entities.group.id": "ranger_entities_consumer", 
-            "atlas.jaas.KafkaClient.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule", 
-            "atlas.jaas.KafkaClient.option.serviceName": "kafka", 
-            "atlas.kafka.bootstrap.servers": "localhost:6667", 
-            "atlas.jaas.KafkaClient.option.useKeyTab": "true", 
-            "atlas.jaas.KafkaClient.option.storeKey": "true", 
-            "atlas.jaas.KafkaClient.loginModuleControlFlag": "required", 
+            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181",
+            "atlas.kafka.security.protocol": "SASL_PLAINTEXT",
+            "atlas.jaas.KafkaClient.option.principal": "{{tagsync_jaas_principal}}",
+            "atlas.jaas.KafkaClient.option.keyTab": "{{tagsync_keytab_path}}",
+            "atlas.kafka.entities.group.id": "ranger_entities_consumer",
+            "atlas.jaas.KafkaClient.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "atlas.jaas.KafkaClient.option.serviceName": "kafka",
+            "atlas.kafka.bootstrap.servers": "localhost:6667",
+            "atlas.jaas.KafkaClient.option.useKeyTab": "true",
+            "atlas.jaas.KafkaClient.option.storeKey": "true",
+            "atlas.jaas.KafkaClient.loginModuleControlFlag": "required",
             "atlas.kafka.sasl.kerberos.service.name": "kafka"
-        }, 
+        },
         "ranger-env": {
-            "ranger_solr_shards": "1", 
-            "ranger_solr_config_set": "ranger_audits", 
-            "ranger_user": "ranger", 
+            "ranger_solr_shards": "1",
+            "ranger_solr_config_set": "ranger_audits",
+            "ranger_user": "ranger",
             "ranger_solr_replication_factor": "1",
-            "xml_configurations_supported": "true", 
-            "ranger-atlas-plugin-enabled": "No", 
-            "ranger-hbase-plugin-enabled": "No", 
-            "ranger-yarn-plugin-enabled": "No", 
-            "bind_anonymous": "false", 
-            "ranger_admin_username": "amb_ranger_admin", 
-            "admin_password": "admin", 
-            "is_solrCloud_enabled": "true", 
-            "ranger-storm-plugin-enabled": "No", 
-            "ranger-hdfs-plugin-enabled": "No", 
-            "ranger_group": "ranger", 
-            "ranger-knox-plugin-enabled": "No", 
-            "ranger_admin_log_dir": "/var/log/ranger/admin", 
-            "ranger-kafka-plugin-enabled": "No", 
-            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306", 
-            "ranger-hive-plugin-enabled": "No", 
-            "xasecure.audit.destination.solr": "true", 
-            "ranger_pid_dir": "/var/run/ranger", 
-            "xasecure.audit.destination.hdfs": "true", 
-            "admin_username": "admin", 
-            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit", 
-            "create_db_dbuser": "true", 
-            "ranger_solr_collection_name": "ranger_audits", 
-            "ranger_admin_password": "P1!q9xa96SMi5NCl", 
+            "xml_configurations_supported": "true",
+            "ranger-atlas-plugin-enabled": "No",
+            "ranger-hbase-plugin-enabled": "No",
+            "ranger-yarn-plugin-enabled": "No",
+            "bind_anonymous": "false",
+            "ranger_admin_username": "amb_ranger_admin",
+            "admin_password": "admin",
+            "is_solrCloud_enabled": "true",
+            "ranger-storm-plugin-enabled": "No",
+            "ranger-hdfs-plugin-enabled": "No",
+            "ranger_group": "ranger",
+            "ranger-knox-plugin-enabled": "No",
+            "ranger_admin_log_dir": "/var/log/ranger/admin",
+            "ranger-kafka-plugin-enabled": "No",
+            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306",
+            "ranger-hive-plugin-enabled": "No",
+            "xasecure.audit.destination.solr": "true",
+            "ranger_pid_dir": "/var/run/ranger",
+            "xasecure.audit.destination.hdfs": "true",
+            "admin_username": "admin",
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
+            "create_db_dbuser": "true",
+            "ranger_solr_collection_name": "ranger_audits",
+            "ranger_admin_password": "P1!q9xa96SMi5NCl",
             "ranger_usersync_log_dir": "/var/log/ranger/usersync"
-        }, 
+        },
         "usersync-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/usersync.
 log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n"
-        }, 
+        },
         "ranger-hdfs-plugin-properties": {
-            "hadoop.rpc.protection": "authentication", 
-            "ranger-hdfs-plugin-enabled": "No", 
-            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
-            "policy_user": "ambari-qa", 
-            "common.name.for.certificate": "", 
+            "hadoop.rpc.protection": "authentication",
+            "ranger-hdfs-plugin-enabled": "No",
+            "REPOSITORY_CONFIG_USERNAME": "hadoop",
+            "policy_user": "ambari-qa",
+            "common.name.for.certificate": "",
             "REPOSITORY_CONFIG_PASSWORD": "hadoop"
-        }, 
+        },
         "kerberos-env": {
-            "kdc_hosts": "c6401.ambari.apache.org", 
-            "manage_auth_to_local": "true", 
-            "install_packages": "true", 
-            "realm": "EXAMPLE.COM", 
-            "encryption_types": "aes des3-cbc-sha1 rc4 des-cbc-md5", 
-            "ad_create_attributes_template": "\n{\n  \"objectClass\": [\"top\", \"person\", \"organizationalPerson\", \"user\"],\n  \"cn\": \"$principal_name\",\n  #if( $is_service )\n  \"servicePrincipalName\": \"$principal_name\",\n  #end\n  \"userPrincipalName\": \"$normalized_principal\",\n  \"unicodePwd\": \"$password\",\n  \"accountExpires\": \"0\",\n  \"userAccountControl\": \"66048\"\n}", 
-            "kdc_create_attributes": "", 
-            "admin_server_host": "c6401.ambari.apache.org", 
-            "group": "ambari-managed-principals", 
-            "password_length": "20", 
-            "ldap_url": "", 
-            "manage_identities": "true", 
-            "password_min_lowercase_letters": "1", 
-            "create_ambari_principal": "true", 
-            "service_check_principal_name": "${cluster_name|toLower()}-${short_date}", 
-            "executable_search_paths": "/usr/bin, /usr/kerberos/bin, /usr/sbin, /usr/lib/mit/bin, /usr/lib/mit/sbin", 
-            "password_chat_timeout": "5", 
-            "kdc_type": "mit-kdc", 
-            "set_password_expiry": "false", 
-            "password_min_punctuation": "1", 
-            "container_dn": "", 
-            "case_insensitive_username_rules": "false", 
-            "password_min_whitespace": "0", 
-            "password_min_uppercase_letters": "1", 
+            "kdc_hosts": "c6401.ambari.apache.org",
+            "manage_auth_to_local": "true",
+            "install_packages": "true",
+            "realm": "EXAMPLE.COM",
+            "encryption_types": "aes des3-cbc-sha1 rc4 des-cbc-md5",
+            "ad_create_attributes_template": "\n{\n  \"objectClass\": [\"top\", \"person\", \"organizationalPerson\", \"user\"],\n  \"cn\": \"$principal_name\",\n  #if( $is_service )\n  \"servicePrincipalName\": \"$principal_name\",\n  #end\n  \"userPrincipalName\": \"$normalized_principal\",\n  \"unicodePwd\": \"$password\",\n  \"accountExpires\": \"0\",\n  \"userAccountControl\": \"66048\"\n}",
+            "kdc_create_attributes": "",
+            "admin_server_host": "c6401.ambari.apache.org",
+            "group": "ambari-managed-principals",
+            "password_length": "20",
+            "ldap_url": "",
+            "manage_identities": "true",
+            "password_min_lowercase_letters": "1",
+            "create_ambari_principal": "true",
+            "service_check_principal_name": "${cluster_name|toLower()}-${short_date}",
+            "executable_search_paths": "/usr/bin, /usr/kerberos/bin, /usr/sbin, /usr/lib/mit/bin, /usr/lib/mit/sbin",
+            "password_chat_timeout": "5",
+            "kdc_type": "mit-kdc",
+            "set_password_expiry": "false",
+            "password_min_punctuation": "1",
+            "container_dn": "",
+            "case_insensitive_username_rules": "false",
+            "password_min_whitespace": "0",
+            "password_min_uppercase_letters": "1",
             "password_min_digits": "1"
-        }, 
+        },
         "admin-properties": {
-            "db_user": "rangeradmin01", 
-            "DB_FLAVOR": "MYSQL", 
-            "db_password": "rangeradmin01", 
-            "db_root_user": "root", 
-            "policymgr_external_url": "http://c6401.ambari.apache.org:6080", 
-            "db_name": "ranger01", 
-            "db_host": "c6401.ambari.apache.org", 
-            "db_root_password": "vagrant", 
+            "db_user": "rangeradmin01",
+            "DB_FLAVOR": "MYSQL",
+            "db_password": "rangeradmin01",
+            "db_root_user": "root",
+            "policymgr_external_url": "http://c6401.ambari.apache.org:6080",
+            "db_name": "ranger01",
+            "db_host": "c6401.ambari.apache.org",
+            "db_root_password": "vagrant",
             "SQL_CONNECTOR_JAR": "{{driver_curl_target}}"
         },
         "ranger-solr-configuration": {
@@ -484,261 +484,261 @@
             "ranger_audit_logs_merge_factor": "5"
         },
         "ranger-ugsync-site": {
-            "ranger.usersync.ldap.binddn": "", 
-            "ranger.usersync.policymgr.username": "rangerusersync", 
-            "ranger.usersync.policymanager.mockrun": "false", 
-            "ranger.usersync.group.searchbase": "", 
-            "ranger.usersync.ldap.bindalias": "testldapalias", 
-            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks", 
-            "ranger.usersync.port": "5151", 
-            "ranger.usersync.pagedresultssize": "500", 
-            "ranger.usersync.group.memberattributename": "", 
-            "ranger.usersync.kerberos.principal": "rangerusersync/_HOST@EXAMPLE.COM", 
-            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder", 
-            "ranger.usersync.ldap.referral": "ignore", 
-            "ranger.usersync.group.searchfilter": "", 
-            "ranger.usersync.ldap.user.objectclass": "person", 
-            "ranger.usersync.logdir": "{{usersync_log_dir}}", 
-            "ranger.usersync.ldap.user.searchfilter": "", 
-            "ranger.usersync.ldap.groupname.caseconversion": "none", 
-            "ranger.usersync.ldap.ldapbindpassword": "", 
-            "ranger.usersync.unix.minUserId": "500", 
-            "ranger.usersync.policymanager.maxrecordsperapicall": "1000", 
-            "ranger.usersync.group.nameattribute": "", 
-            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password", 
-            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks", 
-            "ranger.usersync.user.searchenabled": "false", 
-            "ranger.usersync.group.usermapsyncenabled": "true", 
-            "ranger.usersync.ldap.bindkeystore": "", 
-            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof", 
-            "ranger.usersync.kerberos.keytab": "/etc/security/keytabs/rangerusersync.service.keytab", 
-            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe", 
-            "ranger.usersync.group.objectclass": "", 
-            "ranger.usersync.ldap.user.searchscope": "sub", 
-            "ranger.usersync.unix.password.file": "/etc/passwd", 
-            "ranger.usersync.ldap.user.nameattribute": "", 
-            "ranger.usersync.pagedresultsenabled": "true", 
-            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}", 
-            "ranger.usersync.group.search.first.enabled": "false", 
-            "ranger.usersync.group.searchenabled": "false", 
-            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder", 
-            "ranger.usersync.ssl": "true", 
-            "ranger.usersync.ldap.url": "", 
-            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org", 
-            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
-            "ranger.usersync.ldap.user.searchbase": "", 
-            "ranger.usersync.ldap.username.caseconversion": "none", 
-            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
-            "ranger.usersync.keystore.password": "UnIx529p", 
-            "ranger.usersync.unix.group.file": "/etc/group", 
-            "ranger.usersync.filesource.file": "/tmp/usergroup.txt", 
-            "ranger.usersync.group.searchscope": "", 
-            "ranger.usersync.truststore.password": "changeit", 
-            "ranger.usersync.enabled": "true", 
-            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000", 
+            "ranger.usersync.ldap.binddn": "",
+            "ranger.usersync.policymgr.username": "rangerusersync",
+            "ranger.usersync.policymanager.mockrun": "false",
+            "ranger.usersync.group.searchbase": "",
+            "ranger.usersync.ldap.bindalias": "testldapalias",
+            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks",
+            "ranger.usersync.port": "5151",
+            "ranger.usersync.pagedresultssize": "500",
+            "ranger.usersync.group.memberattributename": "",
+            "ranger.usersync.kerberos.principal": "rangerusersync/_HOST@EXAMPLE.COM",
+            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder",
+            "ranger.usersync.ldap.referral": "ignore",
+            "ranger.usersync.group.searchfilter": "",
+            "ranger.usersync.ldap.user.objectclass": "person",
+            "ranger.usersync.logdir": "{{usersync_log_dir}}",
+            "ranger.usersync.ldap.user.searchfilter": "",
+            "ranger.usersync.ldap.groupname.caseconversion": "none",
+            "ranger.usersync.ldap.ldapbindpassword": "",
+            "ranger.usersync.unix.minUserId": "500",
+            "ranger.usersync.policymanager.maxrecordsperapicall": "1000",
+            "ranger.usersync.group.nameattribute": "",
+            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password",
+            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks",
+            "ranger.usersync.user.searchenabled": "false",
+            "ranger.usersync.group.usermapsyncenabled": "true",
+            "ranger.usersync.ldap.bindkeystore": "",
+            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof",
+            "ranger.usersync.kerberos.keytab": "/etc/security/keytabs/rangerusersync.service.keytab",
+            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe",
+            "ranger.usersync.group.objectclass": "",
+            "ranger.usersync.ldap.user.searchscope": "sub",
+            "ranger.usersync.unix.password.file": "/etc/passwd",
+            "ranger.usersync.ldap.user.nameattribute": "",
+            "ranger.usersync.pagedresultsenabled": "true",
+            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}",
+            "ranger.usersync.group.search.first.enabled": "false",
+            "ranger.usersync.group.searchenabled": "false",
+            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder",
+            "ranger.usersync.ssl": "true",
+            "ranger.usersync.ldap.url": "",
+            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org",
+            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+            "ranger.usersync.ldap.user.searchbase": "",
+            "ranger.usersync.ldap.username.caseconversion": "none",
+            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+            "ranger.usersync.keystore.password": "UnIx529p",
+            "ranger.usersync.unix.group.file": "/etc/group",
+            "ranger.usersync.filesource.file": "/tmp/usergroup.txt",
+            "ranger.usersync.group.searchscope": "",
+            "ranger.usersync.truststore.password": "changeit",
+            "ranger.usersync.enabled": "true",
+            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000",
             "ranger.usersync.filesource.text.delimiter": ","
-        }, 
+        },
         "hdfs-site": {
-            "dfs.namenode.checkpoint.period": "21600", 
-            "dfs.namenode.avoid.write.stale.datanode": "true", 
-            "dfs.permissions.superusergroup": "hdfs", 
-            "dfs.namenode.startup.delay.block.deletion.sec": "3600", 
-            "dfs.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM", 
-            "dfs.heartbeat.interval": "3", 
-            "dfs.content-summary.limit": "5000", 
-            "dfs.support.append": "true", 
-            "dfs.datanode.address": "0.0.0.0:1019", 
-            "dfs.cluster.administrators": " hdfs", 
-            "dfs.namenode.audit.log.async": "true", 
-            "dfs.datanode.balance.bandwidthPerSec": "6250000", 
-            "dfs.namenode.safemode.threshold-pct": "1", 
-            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", 
-            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020", 
-            "dfs.permissions.enabled": "true", 
-            "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", 
-            "dfs.client.read.shortcircuit": "true", 
-            "dfs.https.port": "50470", 
-            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470", 
-            "nfs.file.dump.dir": "/tmp/.hdfs-nfs", 
-            "dfs.blocksize": "134217728", 
-            "dfs.blockreport.initialDelay": "120", 
-            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode", 
-            "dfs.namenode.fslock.fair": "false", 
-            "dfs.datanode.max.transfer.threads": "4096", 
-            "dfs.secondary.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "dfs.replication": "3", 
-            "dfs.namenode.handler.count": "50", 
-            "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
-            "fs.permissions.umask-mode": "022", 
-            "dfs.namenode.stale.datanode.interval": "30000", 
-            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
-            "dfs.datanode.failed.volumes.tolerated": "0", 
-            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data", 
-            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070", 
-            "dfs.webhdfs.enabled": "true", 
-            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding", 
-            "dfs.namenode.accesstime.precision": "0", 
-            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
-            "dfs.datanode.https.address": "0.0.0.0:50475", 
-            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary", 
-            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090", 
-            "nfs.exports.allowed.hosts": "* rw", 
-            "dfs.namenode.checkpoint.txns": "1000000", 
-            "dfs.datanode.http.address": "0.0.0.0:1022", 
-            "dfs.datanode.du.reserved": "33011188224", 
-            "dfs.client.read.shortcircuit.streams.cache.size": "4096", 
-            "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", 
-            "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "dfs.http.policy": "HTTP_ONLY", 
-            "dfs.block.access.token.enable": "true", 
-            "dfs.client.retry.policy.enabled": "false", 
-            "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", 
-            "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab", 
-            "dfs.namenode.name.dir.restore": "true", 
-            "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", 
-            "dfs.journalnode.https-address": "0.0.0.0:8481", 
-            "dfs.journalnode.http-address": "0.0.0.0:8480", 
-            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
-            "dfs.namenode.avoid.read.stale.datanode": "true", 
-            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
-            "dfs.datanode.data.dir.perm": "750", 
-            "dfs.encryption.key.provider.uri": "kms://http@c6401.ambari.apache.org:9292/kms", 
-            "dfs.replication.max": "50", 
+            "dfs.namenode.checkpoint.period": "21600",
+            "dfs.namenode.avoid.write.stale.datanode": "true",
+            "dfs.permissions.superusergroup": "hdfs",
+            "dfs.namenode.startup.delay.block.deletion.sec": "3600",
+            "dfs.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
+            "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM",
+            "dfs.heartbeat.interval": "3",
+            "dfs.content-summary.limit": "5000",
+            "dfs.support.append": "true",
+            "dfs.datanode.address": "0.0.0.0:1019",
+            "dfs.cluster.administrators": " hdfs",
+            "dfs.namenode.audit.log.async": "true",
+            "dfs.datanode.balance.bandwidthPerSec": "6250000",
+            "dfs.namenode.safemode.threshold-pct": "1",
+            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
+            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020",
+            "dfs.permissions.enabled": "true",
+            "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
+            "dfs.client.read.shortcircuit": "true",
+            "dfs.https.port": "50470",
+            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470",
+            "nfs.file.dump.dir": "/tmp/.hdfs-nfs",
+            "dfs.blocksize": "134217728",
+            "dfs.blockreport.initialDelay": "120",
+            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode",
+            "dfs.namenode.fslock.fair": "false",
+            "dfs.datanode.max.transfer.threads": "4096",
+            "dfs.secondary.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
+            "dfs.replication": "3",
+            "dfs.namenode.handler.count": "50",
+            "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+            "fs.permissions.umask-mode": "022",
+            "dfs.namenode.stale.datanode.interval": "30000",
+            "dfs.datanode.ipc.address": "0.0.0.0:8010",
+            "dfs.datanode.failed.volumes.tolerated": "0",
+            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data",
+            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070",
+            "dfs.webhdfs.enabled": "true",
+            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding",
+            "dfs.namenode.accesstime.precision": "0",
+            "dfs.namenode.write.stale.datanode.ratio": "1.0f",
+            "dfs.datanode.https.address": "0.0.0.0:50475",
+            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary",
+            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090",
+            "nfs.exports.allowed.hosts": "* rw",
+            "dfs.namenode.checkpoint.txns": "1000000",
+            "dfs.datanode.http.address": "0.0.0.0:1022",
+            "dfs.datanode.du.reserved": "33011188224",
+            "dfs.client.read.shortcircuit.streams.cache.size": "4096",
+            "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
+            "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
+            "dfs.http.policy": "HTTP_ONLY",
+            "dfs.block.access.token.enable": "true",
+            "dfs.client.retry.policy.enabled": "false",
+            "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
+            "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab",
+            "dfs.namenode.name.dir.restore": "true",
+            "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
+            "dfs.journalnode.https-address": "0.0.0.0:8481",
+            "dfs.journalnode.http-address": "0.0.0.0:8480",
+            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
+            "dfs.namenode.avoid.read.stale.datanode": "true",
+            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
+            "dfs.datanode.data.dir.perm": "750",
+            "dfs.encryption.key.provider.uri": "kms://http@c6401.ambari.apache.org:9292/kms",
+            "dfs.replication.max": "50",
             "dfs.namenode.name.dir": "/grid/0/hadoop/hdfs/namenode"
-        }, 
+        },
         "ranger-tagsync-site": {
             "ranger.tagsync.dest.ranger.ssl.config.filename": "{{stack_root}}/current/ranger-tagsync/conf/ranger-policymgr-ssl.xml",
             "ranger.tagsync.source.atlasrest.username": "",
             "ranger.tagsync.logdir": "/var/log/ranger/tagsync",
-            "ranger.tagsync.source.atlasrest.download.interval.millis": "", 
-            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks", 
-            "ranger.tagsync.source.file.check.interval.millis": "", 
-            "ranger.tagsync.source.atlasrest.endpoint": "", 
-            "ranger.tagsync.dest.ranger.username": "rangertagsync", 
-            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}", 
-            "ranger.tagsync.kerberos.principal": "rangertagsync/_HOST@EXAMPLE.COM", 
-            "ranger.tagsync.kerberos.keytab": "/etc/security/keytabs/rangertagsync.service.keytab", 
-            "ranger.tagsync.source.atlas": "false", 
-            "ranger.tagsync.source.atlasrest": "false", 
-            "ranger.tagsync.source.file": "false", 
+            "ranger.tagsync.source.atlasrest.download.interval.millis": "",
+            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks",
+            "ranger.tagsync.source.file.check.interval.millis": "",
+            "ranger.tagsync.source.atlasrest.endpoint": "",
+            "ranger.tagsync.dest.ranger.username": "rangertagsync",
+            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}",
+            "ranger.tagsync.kerberos.principal": "rangertagsync/_HOST@EXAMPLE.COM",
+            "ranger.tagsync.kerberos.keytab": "/etc/security/keytabs/rangertagsync.service.keytab",
+            "ranger.tagsync.source.atlas": "false",
+            "ranger.tagsync.source.atlasrest": "false",
+            "ranger.tagsync.source.file": "false",
             "ranger.tagsync.source.file.filename": ""
-        }, 
+        },
         "zoo.cfg": {
-            "clientPort": "2181", 
-            "autopurge.purgeInterval": "24", 
-            "syncLimit": "5", 
-            "dataDir": "/grid/0/hadoop/zookeeper", 
-            "initLimit": "10", 
-            "tickTime": "2000", 
+            "clientPort": "2181",
+            "autopurge.purgeInterval": "24",
+            "syncLimit": "5",
+            "dataDir": "/grid/0/hadoop/zookeeper",
+            "initLimit": "10",
+            "tickTime": "2000",
             "autopurge.snapRetainCount": "30"
-        }, 
+        },
         "hadoop-policy": {
-            "security.job.client.protocol.acl": "*", 
-            "security.job.task.protocol.acl": "*", 
-            "security.datanode.protocol.acl": "*", 
-            "security.namenode.protocol.acl": "*", 
-            "security.client.datanode.protocol.acl": "*", 
-            "security.inter.tracker.protocol.acl": "*", 
-            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop", 
-            "security.client.protocol.acl": "*", 
-            "security.refresh.policy.protocol.acl": "hadoop", 
-            "security.admin.operations.protocol.acl": "hadoop", 
+            "security.job.client.protocol.acl": "*",
+            "security.job.task.protocol.acl": "*",
+            "security.datanode.protocol.acl": "*",
+            "security.namenode.protocol.acl": "*",
+            "security.client.datanode.protocol.acl": "*",
+            "security.inter.tracker.protocol.acl": "*",
+            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
+            "security.client.protocol.acl": "*",
+            "security.refresh.policy.protocol.acl": "hadoop",
+            "security.admin.operations.protocol.acl": "hadoop",
             "security.inter.datanode.protocol.acl": "*"
-        }, 
+        },
         "hdfs-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.lo
 gger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender
 .console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logg
 er}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=
 false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# NameNode metrics logging.\n# The default is to retain two namenode-metrics.log files up to 64MB each.\n#\nnamenode.metrics.logger=INFO,NullAppender\nlog4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}\nlog4j.additivity.NameNodeMetricsLog=false\nlog4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log\nlog4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n\nlog4j.appender.NNMETRICSRFA.MaxBackupIndex=1\nlog4j.appender.NNMETRICSRFA.MaxFileSize=64MB\n\n#\n# mapred audit logging\n#\nmapred
 .audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging leve
 ls\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateCh
 ange=WARN"
-        }, 
+        },
         "krb5-conf": {
-            "domains": "", 
-            "manage_krb5_conf": "true", 
-            "content": "\n[libdefaults]\n  renew_lifetime = 7d\n  forwardable = true\n  default_realm = {{realm}}\n  ticket_lifetime = 24h\n  dns_lookup_realm = false\n  dns_lookup_kdc = false\n  default_ccache_name = /tmp/krb5cc_%{uid}\n  #default_tgs_enctypes = {{encryption_types}}\n  #default_tkt_enctypes = {{encryption_types}}\n{% if domains %}\n[domain_realm]\n{%- for domain in domains.split(',') %}\n  {{domain|trim()}} = {{realm}}\n{%- endfor %}\n{% endif %}\n[logging]\n  default = FILE:/var/log/krb5kdc.log\n  admin_server = FILE:/var/log/kadmind.log\n  kdc = FILE:/var/log/krb5kdc.log\n\n[realms]\n  {{realm}} = {\n{%- if kdc_hosts > 0 -%}\n{%- set kdc_host_list = kdc_hosts.split(',')  -%}\n{%- if kdc_host_list and kdc_host_list|length > 0 %}\n    admin_server = {{admin_server_host|default(kdc_host_list[0]|trim(), True)}}\n{%- if kdc_host_list -%}\n{% for kdc_host in kdc_host_list %}\n    kdc = {{kdc_host|trim()}}\n{%- endfor -%}\n{% endif %}\n{%- endif %}\n{%- endif %}\n  }\n\
 n{# Append additional realm declarations below #}", 
+            "domains": "",
+            "manage_krb5_conf": "true",
+            "content": "\n[libdefaults]\n  renew_lifetime = 7d\n  forwardable = true\n  default_realm = {{realm}}\n  ticket_lifetime = 24h\n  dns_lookup_realm = false\n  dns_lookup_kdc = false\n  default_ccache_name = /tmp/krb5cc_%{uid}\n  #default_tgs_enctypes = {{encryption_types}}\n  #default_tkt_enctypes = {{encryption_types}}\n{% if domains %}\n[domain_realm]\n{%- for domain in domains.split(',') %}\n  {{domain|trim()}} = {{realm}}\n{%- endfor %}\n{% endif %}\n[logging]\n  default = FILE:/var/log/krb5kdc.log\n  admin_server = FILE:/var/log/kadmind.log\n  kdc = FILE:/var/log/krb5kdc.log\n\n[realms]\n  {{realm}} = {\n{%- if kdc_hosts > 0 -%}\n{%- set kdc_host_list = kdc_hosts.split(',')  -%}\n{%- if kdc_host_list and kdc_host_list|length > 0 %}\n    admin_server = {{admin_server_host|default(kdc_host_list[0]|trim(), True)}}\n{%- if kdc_host_list -%}\n{% for kdc_host in kdc_host_list %}\n    kdc = {{kdc_host|trim()}}\n{%- endfor -%}\n{% endif %}\n{%- endif %}\n{%- endif %}\n  }\n\
 n{# Append additional realm declarations below #}",
             "conf_dir": "/etc"
-        }, 
+        },
         "core-site": {
-            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py", 
-            "hadoop.proxyuser.hdfs.groups": "*", 
-            "fs.trash.interval": "360", 
-            "ipc.server.tcpnodelay": "true", 
-            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec", 
-            "ipc.client.idlethreshold": "8000", 
-            "io.file.buffer.size": "131072", 
-            "hadoop.proxyuser.ambari-server-test_cluster01.groups": "*", 
-            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", 
-            "hadoop.security.authentication": "kerberos", 
-            "mapreduce.jobtracker.webinterface.trusted": "false", 
-            "hadoop.proxyuser.hdfs.hosts": "*", 
-            "hadoop.proxyuser.HTTP.groups": "users", 
-            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020", 
-            "ha.failover-controller.active-standby-elector.zk.op.retries": "120", 
-            "hadoop.security.key.provider.path": "kms://http@c6401.ambari.apache.org:9292/kms", 
-            "hadoop.security.authorization": "true", 
-            "hadoop.http.authentication.simple.anonymous.allowed": "true", 
-            "ipc.client.connect.max.retries": "50", 
-            "hadoop.security.auth_to_local": "RULE:[1:$1@$0](ambari-qa-test_cluster01@EXAMPLE.COM)s/.*/ambari-qa/\nRULE:[1:$1@$0](hdfs-test_cluster01@EXAMPLE.COM)s/.*/hdfs/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nRULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](rangeradmin@EXAMPLE.COM)s/.*/ranger/\nRULE:[2:$1@$0](rangertagsync@EXAMPLE.COM)s/.*/rangertagsync/\nRULE:[2:$1@$0](rangerusersync@EXAMPLE.COM)s/.*/rangerusersync/\nDEFAULT", 
-            "hadoop.proxyuser.ambari-server-test_cluster01.hosts": "c6401.ambari.apache.org", 
+            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py",
+            "hadoop.proxyuser.hdfs.groups": "*",
+            "fs.trash.interval": "360",
+            "ipc.server.tcpnodelay": "true",
+            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec",
+            "ipc.client.idlethreshold": "8000",
+            "io.file.buffer.size": "131072",
+            "hadoop.proxyuser.ambari-server-test_cluster01.groups": "*",
+            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
+            "hadoop.security.authentication": "kerberos",
+            "mapreduce.jobtracker.webinterface.trusted": "false",
+            "hadoop.proxyuser.hdfs.hosts": "*",
+            "hadoop.proxyuser.HTTP.groups": "users",
+            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020",
+            "ha.failover-controller.active-standby-elector.zk.op.retries": "120",
+            "hadoop.security.key.provider.path": "kms://http@c6401.ambari.apache.org:9292/kms",
+            "hadoop.security.authorization": "true",
+            "hadoop.http.authentication.simple.anonymous.allowed": "true",
+            "ipc.client.connect.max.retries": "50",
+            "hadoop.security.auth_to_local": "RULE:[1:$1@$0](ambari-qa-test_cluster01@EXAMPLE.COM)s/.*/ambari-qa/\nRULE:[1:$1@$0](hdfs-test_cluster01@EXAMPLE.COM)s/.*/hdfs/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nRULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](rangeradmin@EXAMPLE.COM)s/.*/ranger/\nRULE:[2:$1@$0](rangertagsync@EXAMPLE.COM)s/.*/rangertagsync/\nRULE:[2:$1@$0](rangerusersync@EXAMPLE.COM)s/.*/rangerusersync/\nDEFAULT",
+            "hadoop.proxyuser.ambari-server-test_cluster01.hosts": "c6401.ambari.apache.org",
             "ipc.client.connection.maxidletime": "30000"
-        }, 
+        },
         "hadoop-env": {
-            "keyserver_port": "", 
-            "proxyuser_group": "users", 
-            "hdfs_user_nproc_limit": "65536", 
-            "hdfs_log_dir_prefix": "/var/log/hadoop", 
-            "hdfs_user_nofile_limit": "128000", 
-            "hdfs_user": "hdfs", 
-            "hdfs_principal_name": "hdfs-test_cluster01@EXAMPLE.COM", 
-            "keyserver_host": " ", 
-            "namenode_opt_maxnewsize": "128m", 
-            "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab", 
-            "namenode_opt_maxpermsize": "256m", 
-            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
 {{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
 node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
 NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
 N_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
 ing priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n  for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n  do\n    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n  done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
 ECURE_DN_USER\" ]; then\n  ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}", 
-            "namenode_heapsize": "1024m", 
-            "namenode_opt_newsize": "128m", 
-            "nfsgateway_heapsize": "1024", 
-            "dtnode_heapsize": "1024m", 
-            "hadoop_root_logger": "INFO,RFA", 
-            "hadoop_heapsize": "1024", 
-            "hadoop_pid_dir_prefix": "/var/run/hadoop", 
-            "namenode_opt_permsize": "128m", 
+            "keyserver_port": "",
+            "proxyuser_group": "users",
+            "hdfs_user_nproc_limit": "65536",
+            "hdfs_log_dir_prefix": "/var/log/hadoop",
+            "hdfs_user_nofile_limit": "128000",
+            "hdfs_user": "hdfs",
+            "hdfs_principal_name": "hdfs-test_cluster01@EXAMPLE.COM",
+            "keyserver_host": " ",
+            "namenode_opt_maxnewsize": "128m",
+            "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab",
+            "namenode_opt_maxpermsize": "256m",
+            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
 {{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
 node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
 NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
 N_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
 ing priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n  for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n  do\n    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n  done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
 ECURE_DN_USER\" ]; then\n  ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}",
+            "namenode_heapsize": "1024m",
+            "namenode_opt_newsize": "128m",
+            "nfsgateway_heapsize": "1024",
+            "dtnode_heapsize": "1024m",
+            "hadoop_root_logger": "INFO,RFA",
+            "hadoop_heapsize": "1024",
+            "hadoop_pid_dir_prefix": "/var/run/hadoop",
+            "namenode_opt_permsize": "128m",
             "hdfs_tmp_dir": "/tmp"
-        }, 
+        },
         "zookeeper-log4j": {
             "content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: consol

<TRUNCATED>

[30/31] ambari git commit: AMBARI-21476 Log Search UI: implement pagination for logs list. (ababiichuk)

Posted by ab...@apache.org.
AMBARI-21476 Log Search UI: implement pagination for logs list. (ababiichuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c9338e61
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c9338e61
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c9338e61

Branch: refs/heads/branch-feature-logsearch-ui
Commit: c9338e6186dcab80536e8edc48974329b9831100
Parents: 2d5b756
Author: ababiichuk <ab...@hortonworks.com>
Authored: Fri Jul 14 15:21:38 2017 +0300
Committer: ababiichuk <ab...@hortonworks.com>
Committed: Fri Jul 14 15:21:38 2017 +0300

----------------------------------------------------------------------
 .../src/app/app.module.ts                       |   8 +-
 .../queries/audit-logs-query-params.class.ts    |  18 +-
 .../src/app/components/app.component.html       |  13 +-
 .../src/app/components/app.component.less       |   4 +
 .../dropdown-button.component.html              |  12 +-
 .../dropdown-button.component.ts                |  18 +-
 .../filter-button.component.spec.ts             |   5 -
 .../filter-button/filter-button.component.ts    |  32 +-
 .../filter-dropdown.component.spec.ts           |   5 -
 .../filter-dropdown.component.ts                |  55 +-
 .../filter-text-field.component.html            |   4 +-
 .../filter-text-field.component.spec.ts         |   6 +-
 .../filter-text-field.component.ts              |  33 +-
 .../filters-panel/filters-panel.component.html  |  25 +-
 .../filters-panel/filters-panel.component.less  |  40 +-
 .../filters-panel/filters-panel.component.ts    |   3 +-
 .../logs-list/logs-list.component.html          |   6 +-
 .../logs-list/logs-list.component.less          |   3 +-
 .../logs-list/logs-list.component.spec.ts       |  14 +-
 .../components/logs-list/logs-list.component.ts |  27 +-
 .../main-container.component.html               |   1 +
 .../main-container.component.less               |  24 +
 .../main-container/main-container.component.ts  |   8 +-
 .../menu-button/menu-button.component.html      |   4 +-
 .../menu-button/menu-button.component.ts        |   7 +-
 .../pagination-controls.component.html          |  23 +
 .../pagination-controls.component.less          |  22 +
 .../pagination-controls.component.spec.ts       |  43 +
 .../pagination-controls.component.ts            |  73 ++
 .../pagination/pagination.component.html        |  24 +
 .../pagination/pagination.component.less        |  28 +
 .../pagination/pagination.component.spec.ts     |  69 ++
 .../pagination/pagination.component.ts          |  72 ++
 .../src/app/components/variables.less           |  10 +
 .../src/app/mock-data.ts                        | 906 +++++++++++++++++++
 .../src/app/services/filtering.service.ts       |  50 +-
 .../src/app/services/mock-api-data.service.ts   |  15 +-
 .../src/assets/i18n/en.json                     |   3 +
 .../src/assets/mock-data.ts                     | 906 -------------------
 .../ambari-logsearch-web-new/src/main.ts        |   4 +-
 .../ambari-logsearch-web-new/tsconfig.json      |   3 +
 41 files changed, 1515 insertions(+), 1111 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/app/app.module.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/app.module.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/app.module.ts
index 580fffa..503aa46 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/app.module.ts
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/app.module.ts
@@ -27,7 +27,7 @@ import {TranslateHttpLoader} from '@ngx-translate/http-loader';
 import {StoreModule} from '@ngrx/store';
 import {MomentModule} from 'angular2-moment';
 import {MomentTimezoneModule} from 'angular-moment-timezone';
-import {environment} from '../environments/environment';
+import {environment} from '@envs/environment';
 import {mockApiDataService} from '@app/services/mock-api-data.service'
 import {HttpClientService} from '@app/services/http-client.service';
 import {ComponentActionsService} from '@app/services/component-actions.service';
@@ -60,6 +60,8 @@ import {FilterButtonComponent} from '@app/components/filter-button/filter-button
 import {AccordionPanelComponent} from '@app/components/accordion-panel/accordion-panel.component';
 import {LogsListComponent} from '@app/components/logs-list/logs-list.component';
 import {DropdownButtonComponent} from '@app/components/dropdown-button/dropdown-button.component';
+import {PaginationComponent} from '@app/components/pagination/pagination.component';
+import {PaginationControlsComponent} from '@app/components/pagination-controls/pagination-controls.component';
 
 export function HttpLoaderFactory(http: Http) {
   // adding 'static' parameter to step over mock data request
@@ -95,7 +97,9 @@ export function getXHRBackend(injector: Injector, browser: BrowserXhr, xsrf: XSR
     FilterButtonComponent,
     AccordionPanelComponent,
     LogsListComponent,
-    DropdownButtonComponent
+    DropdownButtonComponent,
+    PaginationComponent,
+    PaginationControlsComponent
   ],
   imports: [
     BrowserModule,

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/app/classes/queries/audit-logs-query-params.class.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/classes/queries/audit-logs-query-params.class.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/classes/queries/audit-logs-query-params.class.ts
index 749ed21..75ad097 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/classes/queries/audit-logs-query-params.class.ts
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/classes/queries/audit-logs-query-params.class.ts
@@ -18,10 +18,22 @@
 
 import {QueryParams} from '@app/classes/queries/query-params.class';
 
+export const defaultParams = {
+  page: '0',
+  pageSize: '10'
+};
+
 export class AuditLogsQueryParams extends QueryParams {
-  startIndex?: string = '0';
-  page?: string = '0';
-  pageSize?: string = '25';
+  constructor(options: AuditLogsQueryParams) {
+    let finalParams = Object.assign({}, defaultParams, options);
+    const page = parseInt(finalParams.page),
+      pageSize = parseInt(finalParams.pageSize);
+    finalParams.startIndex = isNaN(page) || isNaN(pageSize) ? '' : (page * pageSize).toString();
+    super(finalParams);
+  }
+  page: string;
+  pageSize: string;
+  startIndex: string;
   sortBy?: string;
   sortType?: string;
   start_time?: string;

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/app/components/app.component.html
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/app.component.html b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/app.component.html
index 5add5d5..a353f4d 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/app.component.html
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/app.component.html
@@ -24,9 +24,12 @@
   </nav>
 </header>
 
-<!-- hidden element for moving the main bar outside the fixed header -->
-<div class="navbar invisible">
-  <h1>&nbsp;</h1>
-</div>
 
-<main-container></main-container>
+<main-container>
+  <ng-template>
+    <!-- hidden element for moving the main bar outside the fixed header -->
+    <div class="navbar invisible">
+      <h1>&nbsp;</h1>
+    </div>
+  </ng-template>
+</main-container>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/app/components/app.component.less
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/app.component.less b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/app.component.less
index f948b12..d1aa7ff 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/app.component.less
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/app.component.less
@@ -18,6 +18,10 @@
 @import 'variables';
 
 :host {
+  .full-size;
+  display: flex;
+  flex-direction: column;
+  background-color: @main-background-color; // TODO implement actual color
   line-height: @default-line-height;
 
   .navbar {

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/app/components/dropdown-button/dropdown-button.component.html
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/dropdown-button/dropdown-button.component.html b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/dropdown-button/dropdown-button.component.html
index 8eb92f0..bd0d528 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/dropdown-button/dropdown-button.component.html
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/dropdown-button/dropdown-button.component.html
@@ -16,8 +16,10 @@
 -->
 
 <div class="filter-label" *ngIf="label">{{label | translate}}</div>
-<button class="btn btn-link dropdown-toggle" data-toggle="dropdown">
-  {{selectedLabel | translate}} <span class="caret"></span>
-</button>
-<ul data-component="dropdown-list" [ngClass]="{'dropdown-menu': true, 'dropdown-menu-right': isRightAlign}" [items]="options"
-    (selectedItemChange)="writeValue($event)"></ul>
+<div [ngClass]="{'dropup': isDropup}">
+  <button class="btn btn-link dropdown-toggle" data-toggle="dropdown">
+    {{selectedLabel | translate}} <span class="caret"></span>
+  </button>
+  <ul data-component="dropdown-list" [ngClass]="{'dropdown-menu': true, 'dropdown-menu-right': isRightAlign}"
+      [items]="options" (selectedItemChange)="updateValue($event)"></ul>
+</div>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/app/components/dropdown-button/dropdown-button.component.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/dropdown-button/dropdown-button.component.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/dropdown-button/dropdown-button.component.ts
index 821f137..3aecd9e 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/dropdown-button/dropdown-button.component.ts
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/dropdown-button/dropdown-button.component.ts
@@ -17,7 +17,6 @@
  */
 
 import {Component, OnInit, Input} from '@angular/core';
-import {FilteringService} from '@app/services/filtering.service';
 import {ComponentActionsService} from '@app/services/component-actions.service';
 import {UtilsService} from '@app/services/utils.service';
 
@@ -28,7 +27,7 @@ import {UtilsService} from '@app/services/utils.service';
 })
 export class DropdownButtonComponent implements OnInit {
 
-  constructor(protected filtering: FilteringService, protected actions: ComponentActionsService, protected utils: UtilsService) {
+  constructor(protected actions: ComponentActionsService, protected utils: UtilsService) {
   }
 
   ngOnInit() {
@@ -53,18 +52,25 @@ export class DropdownButtonComponent implements OnInit {
   @Input()
   isRightAlign?: boolean = false;
 
-  private selectedValue?: any;
+  @Input()
+  isDropup?: boolean = false;
+
+  protected selectedValue?: any;
 
   selectedLabel: string;
 
   get value(): any {
-    return this.selectedValue == null ? this.defaultValue : this.selectedValue;
+    return this.selectedValue;
+  }
+
+  set value(value: any) {
+    this.selectedValue = value;
   }
 
-  writeValue(options: any) {
+  updateValue(options: any) {
     const value = options && options.value;
     if (this.utils.valueHasChanged(this.value, value)) {
-      this.selectedValue = value;
+      this.value = value;
       this.selectedLabel = options.label;
       if (this.action) {
         this.actions[this.action](value);

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-button/filter-button.component.spec.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-button/filter-button.component.spec.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-button/filter-button.component.spec.ts
index 370b46e..c410ad8 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-button/filter-button.component.spec.ts
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-button/filter-button.component.spec.ts
@@ -19,7 +19,6 @@
 import {NO_ERRORS_SCHEMA} from '@angular/core';
 import {async, ComponentFixture, TestBed} from '@angular/core/testing';
 import {Http} from '@angular/http';
-import {FormControl, FormGroup} from '@angular/forms';
 import {TranslateModule, TranslateLoader} from '@ngx-translate/core';
 import {TranslateHttpLoader} from '@ngx-translate/http-loader';
 import {StoreModule} from '@ngrx/store';
@@ -70,10 +69,6 @@ describe('FilterButtonComponent', () => {
   beforeEach(() => {
     fixture = TestBed.createComponent(FilterButtonComponent);
     component = fixture.componentInstance;
-    component.filterName = 'f';
-    component.form = new FormGroup({
-      f: new FormControl()
-    });
     fixture.detectChanges();
   });
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-button/filter-button.component.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-button/filter-button.component.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-button/filter-button.component.ts
index 27456e6..dc6bf82 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-button/filter-button.component.ts
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-button/filter-button.component.ts
@@ -17,9 +17,8 @@
  */
 
 import {Component, Input, forwardRef} from '@angular/core';
-import {ControlValueAccessor, NG_VALUE_ACCESSOR, FormGroup} from '@angular/forms';
+import {ControlValueAccessor, NG_VALUE_ACCESSOR} from '@angular/forms';
 import {ComponentActionsService} from '@app/services/component-actions.service';
-import {FilteringService} from '@app/services/filtering.service';
 import {UtilsService} from '@app/services/utils.service';
 import {MenuButtonComponent} from '@app/components/menu-button/menu-button.component';
 
@@ -37,41 +36,36 @@ import {MenuButtonComponent} from '@app/components/menu-button/menu-button.compo
 })
 export class FilterButtonComponent extends MenuButtonComponent implements ControlValueAccessor {
 
-  constructor(protected actions: ComponentActionsService, private filtering: FilteringService, private utils: UtilsService) {
+  constructor(protected actions: ComponentActionsService, private utils: UtilsService) {
     super(actions);
   }
 
   @Input()
-  filterName: string;
+  defaultValue?: string;
 
-  @Input()
-  form: FormGroup;
+  private selectedValue: any;
 
   private onChange: (fn: any) => void;
 
-  get filterInstance(): any {
-    return this.filtering.filters[this.filterName];
-  }
-
   get value(): any {
-    return this.filterInstance.selectedValue;
+    return this.selectedValue;
   }
 
   set value(newValue: any) {
-    if (this.utils.valueHasChanged(this.filterInstance.selectedValue, newValue)) {
-      this.filterInstance.selectedValue = newValue;
-      this.onChange(newValue);
-    }
+    this.selectedValue = newValue;
+    this.onChange(newValue);
   }
 
-  writeValue(options: any) {
+  updateValue(options: any) {
     const value = options && options.value;
-    if (this.utils.valueHasChanged(this.filterInstance.selectedValue, value)) {
-      this.filterInstance.selectedValue = value;
-      this.filterInstance.selectedLabel = options.label;
+    if (this.utils.valueHasChanged(this.selectedValue, value)) {
+      this.value = value;
     }
   }
 
+  writeValue() {
+  }
+
   registerOnChange(callback: any): void {
     this.onChange = callback;
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-dropdown/filter-dropdown.component.spec.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-dropdown/filter-dropdown.component.spec.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-dropdown/filter-dropdown.component.spec.ts
index e05ef48..323aa56 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-dropdown/filter-dropdown.component.spec.ts
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-dropdown/filter-dropdown.component.spec.ts
@@ -18,7 +18,6 @@
 import {NO_ERRORS_SCHEMA} from '@angular/core';
 import {async, ComponentFixture, TestBed} from '@angular/core/testing';
 import {Http} from '@angular/http';
-import {FormControl, FormGroup} from '@angular/forms';
 import {TranslateModule, TranslateLoader} from '@ngx-translate/core';
 import {TranslateHttpLoader} from '@ngx-translate/http-loader';
 import {StoreModule} from '@ngrx/store';
@@ -83,10 +82,6 @@ describe('FilterDropdownComponent', () => {
   beforeEach(() => {
     fixture = TestBed.createComponent(FilterDropdownComponent);
     component = fixture.componentInstance;
-    component.filterName = 'f';
-    component.form = new FormGroup({
-      f: new FormControl()
-    });
     fixture.detectChanges();
   });
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-dropdown/filter-dropdown.component.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-dropdown/filter-dropdown.component.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-dropdown/filter-dropdown.component.ts
index 9ebd821..8352ff1 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-dropdown/filter-dropdown.component.ts
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-dropdown/filter-dropdown.component.ts
@@ -15,9 +15,8 @@
  * limitations under the License.
  */
 
-import {Component, Input, forwardRef} from '@angular/core';
-import {ControlValueAccessor, NG_VALUE_ACCESSOR, FormGroup} from '@angular/forms';
-import {FilteringService} from '@app/services/filtering.service';
+import {Component, forwardRef} from '@angular/core';
+import {ControlValueAccessor, NG_VALUE_ACCESSOR} from '@angular/forms';
 import {ComponentActionsService} from '@app/services/component-actions.service';
 import {UtilsService} from '@app/services/utils.service';
 import {DropdownButtonComponent} from '@app/components/dropdown-button/dropdown-button.component';
@@ -36,58 +35,18 @@ import {DropdownButtonComponent} from '@app/components/dropdown-button/dropdown-
 })
 export class FilterDropdownComponent extends DropdownButtonComponent implements ControlValueAccessor {
 
-  constructor(protected filtering: FilteringService, protected actions: ComponentActionsService, protected utils: UtilsService) {
-    super(filtering, actions, utils);
+  constructor(protected actions: ComponentActionsService, protected utils: UtilsService) {
+    super(actions, utils);
   }
 
-  ngOnInit() {
-  }
-
-  @Input()
-  form: FormGroup;
-
-  @Input()
-  filterName: string;
-
   private onChange: (fn: any) => void;
 
-  get filterInstance(): any {
-    return this.filtering.filters[this.filterName];
-  }
-
-  get label(): string {
-    return this.filterInstance.label;
-  }
-
-  get defaultValue(): any {
-    return this.filterInstance.defaultValue;
-  }
-
-  get defaultLabel(): any {
-    return this.filterInstance.defaultLabel;
-  }
-
-  get value(): any {
-    return this.filterInstance.selectedValue == null ? this.defaultValue : this.filterInstance.selectedValue;
-  }
-
   set value(newValue: any) {
-    if (this.utils.valueHasChanged(this.filterInstance.selectedValue, newValue)) {
-      this.filterInstance.selectedValue = newValue;
-      this.onChange(newValue);
-    }
-  }
-
-  get selectedLabel(): string {
-    return this.filterInstance.selectedLabel == null ? this.defaultLabel : this.filterInstance.selectedLabel;
+    this.selectedValue = newValue;
+    this.onChange(newValue);
   }
 
-  writeValue(options: any): void {
-    const value = options && options.value;
-    if (this.utils.valueHasChanged(this.filterInstance.selectedValue, value)) {
-      this.filterInstance.selectedValue = value;
-      this.filterInstance.selectedLabel = options.label;
-    }
+  writeValue() {
   }
 
   registerOnChange(callback: any): void {

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-text-field/filter-text-field.component.html
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-text-field/filter-text-field.component.html b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-text-field/filter-text-field.component.html
index d135ba5..3f00e8b 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-text-field/filter-text-field.component.html
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-text-field/filter-text-field.component.html
@@ -16,6 +16,6 @@
 -->
 
 <div class="input-group">
-  <span class="input-group-addon">{{filterInstance.label | translate}}</span>
-  <input type="text" class="form-control" [(ngModel)]="instantValue" (ngModelChange)="updateValue($event)">
+  <span class="input-group-addon">{{label | translate}}</span>
+  <input type="text" class="form-control" [(ngModel)]="instantValue" (ngModelChange)="updateInstantValue($event)">
 </div>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-text-field/filter-text-field.component.spec.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-text-field/filter-text-field.component.spec.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-text-field/filter-text-field.component.spec.ts
index a30e12a..71039ed 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-text-field/filter-text-field.component.spec.ts
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-text-field/filter-text-field.component.spec.ts
@@ -18,7 +18,7 @@
 import {CUSTOM_ELEMENTS_SCHEMA} from '@angular/core';
 import {async, ComponentFixture, TestBed} from '@angular/core/testing';
 import {Http} from '@angular/http';
-import {FormsModule, FormControl, FormGroup} from '@angular/forms';
+import {FormsModule} from '@angular/forms';
 import {TranslateModule, TranslateLoader} from '@ngx-translate/core';
 import {TranslateHttpLoader} from '@ngx-translate/http-loader';
 import {StoreModule} from '@ngrx/store';
@@ -73,10 +73,6 @@ describe('FilterTextFieldComponent', () => {
   beforeEach(() => {
     fixture = TestBed.createComponent(FilterTextFieldComponent);
     component = fixture.componentInstance;
-    component.filterName = 'f';
-    component.form = new FormGroup({
-      f: new FormControl()
-    });
     fixture.detectChanges();
   });
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-text-field/filter-text-field.component.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-text-field/filter-text-field.component.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-text-field/filter-text-field.component.ts
index fe6ea34..2b6bfea 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-text-field/filter-text-field.component.ts
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-text-field/filter-text-field.component.ts
@@ -19,7 +19,6 @@ import {Component, Input, forwardRef} from '@angular/core';
 import {ControlValueAccessor, NG_VALUE_ACCESSOR, FormGroup} from '@angular/forms';
 import {Subject} from 'rxjs/Subject';
 import 'rxjs/add/operator/debounceTime';
-import {FilteringService} from '@app/services/filtering.service';
 import {UtilsService} from '@app/services/utils.service';
 
 @Component({
@@ -36,17 +35,16 @@ import {UtilsService} from '@app/services/utils.service';
 })
 export class FilterTextFieldComponent implements ControlValueAccessor {
 
-  constructor(private filtering: FilteringService, private utils: UtilsService) {
-    this.valueSubject.debounceTime(this.debounceInterval).subscribe(value => this.writeValue({
+  constructor(private utils: UtilsService) {
+    this.valueSubject.debounceTime(this.debounceInterval).subscribe(value => this.updateValue({
       value
     }));
   }
 
   @Input()
-  filterName: string;
+  label: string;
 
-  @Input()
-  form: FormGroup;
+  private selectedValue: string;
 
   private onChange: (fn: any) => void;
 
@@ -56,28 +54,25 @@ export class FilterTextFieldComponent implements ControlValueAccessor {
 
   private valueSubject = new Subject<string>();
 
-  get filterInstance(): any {
-    return this.filtering.filters[this.filterName];
-  }
-
   get value(): any {
-    return this.filterInstance.selectedValue;
+    return this.selectedValue;
   }
 
   set value(newValue: any) {
-    if (this.utils.valueHasChanged(this.filterInstance.selectedValue, newValue)) {
-      this.filterInstance.selectedValue = newValue;
-      this.onChange(newValue);
-    }
+    this.selectedValue = newValue;
+    this.onChange(newValue);
   }
 
-  writeValue(options: any) {
+  updateValue(options: any) {
     const value = options && options.value;
-    if (this.utils.valueHasChanged(this.filterInstance.selectedValue, value)) {
-      this.filterInstance.selectedValue = value;
+    if (this.utils.valueHasChanged(this.selectedValue, value)) {
+      this.value = value;
     }
   }
 
+  writeValue() {
+  }
+
   registerOnChange(callback: any): void {
     this.onChange = callback;
   }
@@ -85,7 +80,7 @@ export class FilterTextFieldComponent implements ControlValueAccessor {
   registerOnTouched() {
   }
 
-  updateValue(value: string): void {
+  updateInstantValue(value: string): void {
     this.valueSubject.next(value);
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filters-panel/filters-panel.component.html
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filters-panel/filters-panel.component.html b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filters-panel/filters-panel.component.html
index 6547d88..d2de5e2 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filters-panel/filters-panel.component.html
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filters-panel/filters-panel.component.html
@@ -15,14 +15,14 @@
   limitations under the License.
 -->
 
-<form [formGroup]="filtersForm">
+<form class="col-md-12" [formGroup]="filtersForm">
   <div class="form-inline filter-input-container col-md-8">
-    <filter-dropdown [(ngModel)]="filters.clusters.selectedValue" filterName="clusters" [form]="filtersForm"
-                     formControlName="clusters" [options]="filters.clusters.options"></filter-dropdown>
-    <filter-text-field [(ngModel)]="filters.text.selectedValue" filterName="text" formControlName="text"
-                       [form]="filtersForm"></filter-text-field>
-    <filter-dropdown [(ngModel)]="filters.timeRange.selectedValue" filterName="timeRange" [form]="filtersForm"
-                     formControlName="timeRange" [options]="filters.timeRange.options"></filter-dropdown>
+    <filter-dropdown [label]="filters.clusters.label" formControlName="clusters" [options]="filters.clusters.options"
+                     [defaultLabel]="filters.clusters.defaultLabel"></filter-dropdown>
+    <filter-text-field [label]="filters.text.label"
+                       formControlName="text"></filter-text-field>
+    <filter-dropdown formControlName="timeRange" [options]="filters.timeRange.options"
+                     [defaultLabel]="filters.timeRange.defaultLabel"></filter-dropdown>
     <dropdown-button [options]="timeZoneSelection.options" [defaultValue]="timeZoneSelection.defaultValue"
                      [defaultLabel]="timeZoneSelection.defaultLabel" action="setTimeZone"></dropdown-button>
     <!--button class="btn btn-success" type="button">
@@ -33,12 +33,11 @@
     <a href="#">
       <span class="fa fa-search-minus"></span> {{'filter.excluded' | translate}}
     </a>
-    <filter-button [(ngModel)]="filters.components.selectedValue" [form]="filtersForm" formControlName="components"
-                   [label]="filters.components.label" [iconClass]="filters.components.iconClass"
-                   [subItems]="filters.components.options" filterName="components"></filter-button>
-    <filter-button [(ngModel)]="filters.levels.selectedValue" [form]="filtersForm" formControlName="levels"
-                   [label]="filters.levels.label" [iconClass]="filters.levels.iconClass"
-                   [subItems]="filters.levels.options" filterName="levels"></filter-button>
+    <filter-button formControlName="components" [label]="filters.components.label"
+                   [iconClass]="filters.components.iconClass" [subItems]="filters.components.options"
+                   isRightAlign="true"></filter-button>
+    <filter-button formControlName="levels" [label]="filters.levels.label" [iconClass]="filters.levels.iconClass"
+                   [subItems]="filters.levels.options" isRightAlign="true"></filter-button>
     <menu-button label="filter.capture" iconClass="fa fa-caret-right"></menu-button>
   </div>
 </form>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filters-panel/filters-panel.component.less
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filters-panel/filters-panel.component.less b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filters-panel/filters-panel.component.less
index aebd385..6b18e91 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filters-panel/filters-panel.component.less
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filters-panel/filters-panel.component.less
@@ -18,28 +18,34 @@
 
 @import '../variables';
 
-.filter-input-container {
-  .flex-vertical-align;
-  justify-content: flex-start;
+:host {
+  display: block;
+  padding: @filters-panel-padding;
+  background-color: @filters-panel-background-color;
 
-  .btn-success {
-    border-top-left-radius: 0;
-    border-bottom-left-radius: 0;
-  }
-
-  filter-dropdown, dropdown-button {
-    border: @input-border;
+  .filter-input-container {
+    .flex-vertical-align;
+    justify-content: flex-start;
 
-    &:not(:last-child) {
-      border-right-width: 0;
+    .btn-success {
+      border-top-left-radius: 0;
+      border-bottom-left-radius: 0;
     }
 
-    &:first-child {
-      border-radius: @button-border-radius 0 0 @button-border-radius;
-    }
+    filter-dropdown, dropdown-button {
+      border: @input-border;
+
+      &:not(:last-child) {
+        border-right-width: 0;
+      }
+
+      &:first-child {
+        border-radius: @button-border-radius 0 0 @button-border-radius;
+      }
 
-    &:last-child {
-      border-radius: 0 @button-border-radius @button-border-radius 0;
+      &:last-child {
+        border-radius: 0 @button-border-radius @button-border-radius 0;
+      }
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filters-panel/filters-panel.component.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filters-panel/filters-panel.component.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filters-panel/filters-panel.component.ts
index 9855bdd..b145a94 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filters-panel/filters-panel.component.ts
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filters-panel/filters-panel.component.ts
@@ -17,6 +17,7 @@
  */
 
 import {Component} from '@angular/core';
+import {FormGroup} from '@angular/forms';
 import {FilteringService} from '@app/services/filtering.service';
 import {HttpClientService} from '@app/services/http-client.service';
 import {ClustersService} from '@app/services/storage/clusters.service';
@@ -62,7 +63,7 @@ export class FiltersPanelComponent {
     });
   }
 
-  get filtersForm() {
+  get filtersForm(): FormGroup {
     return this.filtering.filtersForm;
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/app/components/logs-list/logs-list.component.html
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/logs-list/logs-list.component.html b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/logs-list/logs-list.component.html
index 5977ea2..4c84b9c 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/logs-list/logs-list.component.html
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/logs-list/logs-list.component.html
@@ -16,8 +16,8 @@
 -->
 
 <form *ngIf="(logs | async).length" [formGroup]="filtersForm">
-  <filter-dropdown [(ngModel)]="filters.sorting.selectedValue" filterName="sorting" [form]="filtersForm"
-                   formControlName="sorting" [options]="filters.sorting.options" isRightAlign="true"></filter-dropdown>
+  <filter-dropdown [label]="filters.sorting.label" formControlName="sorting" [options]="filters.sorting.options"
+                   [defaultLabel]="filters.sorting.defaultLabel" isRightAlign="true"></filter-dropdown>
 </form>
 <div class="col-md-12 text-center" *ngIf="(logs | async).length">
   <div class="logs-header">
@@ -43,3 +43,5 @@
     </div>
   </ng-template>
 </accordion-panel>
+<pagination class="col-md-12" *ngIf="(logs | async).length" [totalCount]="totalCount" [filtersForm]="filtersForm"
+            [filterInstance]="filters.pageSize" [currentCount]="(logs | async).length"></pagination>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/app/components/logs-list/logs-list.component.less
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/logs-list/logs-list.component.less b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/logs-list/logs-list.component.less
index 8ebe870..76c16a5 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/logs-list/logs-list.component.less
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/logs-list/logs-list.component.less
@@ -21,7 +21,6 @@
   display: block;
   overflow: hidden;
   padding-top: @block-margin-top;
-  background-color: @main-background-color; // TODO implement actual color
 
   .logs-header {
     // TODO get rid of magic numbers, base on actual design
@@ -32,7 +31,7 @@
     text-transform: uppercase;
   }
 
-  filter-dropdown {
+  /deep/ filter-dropdown {
     justify-content: flex-end;
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/app/components/logs-list/logs-list.component.spec.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/logs-list/logs-list.component.spec.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/logs-list/logs-list.component.spec.ts
index 2c4f372..96adc8f 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/logs-list/logs-list.component.spec.ts
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/logs-list/logs-list.component.spec.ts
@@ -17,6 +17,9 @@
 
 import {NO_ERRORS_SCHEMA} from '@angular/core';
 import {async, ComponentFixture, TestBed} from '@angular/core/testing';
+import {Http} from '@angular/http';
+import {TranslateModule, TranslateLoader} from '@ngx-translate/core';
+import {TranslateHttpLoader} from '@ngx-translate/http-loader';
 import {StoreModule} from '@ngrx/store';
 import {MomentModule} from 'angular2-moment';
 import {MomentTimezoneModule} from 'angular-moment-timezone';
@@ -31,6 +34,10 @@ import {UtilsService} from '@app/services/utils.service';
 
 import {LogsListComponent} from './logs-list.component';
 
+export function HttpLoaderFactory(http: Http) {
+  return new TranslateHttpLoader(http, 'assets/i18n/', '.json');
+}
+
 describe('LogsListComponent', () => {
   let component: LogsListComponent;
   let fixture: ComponentFixture<LogsListComponent>;
@@ -55,7 +62,12 @@ describe('LogsListComponent', () => {
           components
         }),
         MomentModule,
-        MomentTimezoneModule
+        MomentTimezoneModule,
+        TranslateModule.forRoot({
+          provide: TranslateLoader,
+          useFactory: HttpLoaderFactory,
+          deps: [Http]
+        })
       ],
       providers: [
         {

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/app/components/logs-list/logs-list.component.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/logs-list/logs-list.component.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/logs-list/logs-list.component.ts
index a070814..7427fc1 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/logs-list/logs-list.component.ts
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/logs-list/logs-list.component.ts
@@ -16,6 +16,7 @@
  */
 
 import {Component, OnInit, Input} from '@angular/core';
+import {FormGroup} from '@angular/forms';
 import 'rxjs/add/operator/map';
 import {HttpClientService} from '@app/services/http-client.service';
 import {ServiceLogsService} from '@app/services/storage/service-logs.service';
@@ -41,6 +42,8 @@ export class LogsListComponent implements OnInit {
   @Input()
   private logsArrayId: string;
 
+  totalCount: number = 0;
+
   timeFormat: string = 'DD/MM/YYYY HH:mm:ss';
 
   private readonly usedFilters = {
@@ -49,7 +52,9 @@ export class LogsListComponent implements OnInit {
     timeRange: ['end_time', 'start_time'],
     components: ['component_name'],
     levels: ['level'],
-    sorting: ['sortType', 'sortBy']
+    sorting: ['sortType', 'sortBy'],
+    pageSize: ['pageSize'],
+    page: ['page']
   };
 
   logs = this.serviceLogsStorage.getAll().map(logs => logs.map(log => {
@@ -66,22 +71,26 @@ export class LogsListComponent implements OnInit {
     return this.filtering.timeZone;
   }
 
-  get filters() {
+  get filters(): any {
     return this.filtering.filters;
   }
   
-  get filtersForm() {
+  get filtersForm(): FormGroup {
     return this.filtering.filtersForm;
   }
 
   private loadLogs(): void {
     this.httpClient.get(this.logsArrayId, this.getParams()).subscribe(response => {
-      const jsonResponse = response.json(),
-        logs = jsonResponse && jsonResponse.logList;
+      const jsonResponse = response.json();
       this.serviceLogsStorage.clear();
-      if (logs) {
-        const logs = response.json().logList;
-        this.serviceLogsStorage.addInstances(logs);
+      if (jsonResponse) {
+        const logs = jsonResponse.logList,
+          count = jsonResponse.totalCount || 0;
+        if (logs) {
+          const logs = response.json().logList;
+          this.serviceLogsStorage.addInstances(logs);
+        }
+        this.totalCount = count;
       }
     });
   }
@@ -103,7 +112,7 @@ export class LogsListComponent implements OnInit {
         } else {
           value = inputValue;
         }
-        if (value) {
+        if (value != null && value !== '') {
           params[paramName] = value;
         }
       });

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/app/components/main-container/main-container.component.html
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/main-container/main-container.component.html b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/main-container/main-container.component.html
index 97ed1ce..42a8fbf 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/main-container/main-container.component.html
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/main-container/main-container.component.html
@@ -15,6 +15,7 @@
   limitations under the License.
 -->
 
+<ng-template [ngTemplateOutlet]="template"></ng-template>
 <div *ngIf="isInitialLoading" class="text-center">
   <span class="fa fa-spinner fa-spin"></span>
 </div>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/app/components/main-container/main-container.component.less
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/main-container/main-container.component.less b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/main-container/main-container.component.less
new file mode 100644
index 0000000..9736628
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/main-container/main-container.component.less
@@ -0,0 +1,24 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+@import '../variables';
+
+:host {
+  .full-size;
+  overflow-x: hidden;
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/app/components/main-container/main-container.component.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/main-container/main-container.component.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/main-container/main-container.component.ts
index b279cf5..b0c3943 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/main-container/main-container.component.ts
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/main-container/main-container.component.ts
@@ -16,12 +16,13 @@
  * limitations under the License.
  */
 
-import {Component} from '@angular/core';
+import {Component, ContentChild, TemplateRef} from '@angular/core';
 import {AppStateService} from '@app/services/storage/app-state.service';
 
 @Component({
   selector: 'main-container',
-  templateUrl: './main-container.component.html'
+  templateUrl: './main-container.component.html',
+  styleUrls: ['./main-container.component.less']
 })
 export class MainContainerComponent {
 
@@ -30,6 +31,9 @@ export class MainContainerComponent {
     this.appState.getParameter('isInitialLoading').subscribe(value => this.isInitialLoading = value);
   }
 
+  @ContentChild(TemplateRef)
+  template;
+
   isAuthorized: boolean = false;
 
   isInitialLoading: boolean = false;

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/app/components/menu-button/menu-button.component.html
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/menu-button/menu-button.component.html b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/menu-button/menu-button.component.html
index 031dec1..f18285f 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/menu-button/menu-button.component.html
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/menu-button/menu-button.component.html
@@ -22,6 +22,6 @@
   <br>
   <a *ngIf="label" (mousedown)="onMouseDown($event)" [ngClass]="labelClass" (mouseup)="onMouseUp($event)"
      (click)="$event.stopPropagation()">{{label | translate}}</a>
-  <ul data-component="dropdown-list" class="dropdown-menu" *ngIf="hasSubItems" [items]="subItems"
-      (selectedItemChange)="writeValue($event)"></ul>
+  <ul data-component="dropdown-list" *ngIf="hasSubItems" [items]="subItems" (selectedItemChange)="updateValue($event)"
+      [ngClass]="{'dropdown-menu': true, 'dropdown-menu-right': isRightAlign}"></ul>
 </div>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/app/components/menu-button/menu-button.component.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/menu-button/menu-button.component.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/menu-button/menu-button.component.ts
index e245fb3..d1baedc 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/menu-button/menu-button.component.ts
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/menu-button/menu-button.component.ts
@@ -49,7 +49,10 @@ export class MenuButtonComponent {
   subItems?: any[];
 
   @Input()
-  hideCaret?: boolean;
+  hideCaret?: boolean = false;
+
+  @Input()
+  isRightAlign?: boolean = false;
 
   get hasSubItems(): boolean {
     return Boolean(this.subItems && this.subItems.length);
@@ -81,7 +84,7 @@ export class MenuButtonComponent {
     }
   }
   
-  writeValue(options: any) {
+  updateValue(options: any) {
     // TODO implement value change behaviour
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/app/components/pagination-controls/pagination-controls.component.html
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/pagination-controls/pagination-controls.component.html b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/pagination-controls/pagination-controls.component.html
new file mode 100644
index 0000000..c227a2b
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/pagination-controls/pagination-controls.component.html
@@ -0,0 +1,23 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<button class="btn btn-link" [disabled]="currentPage === 0" (click)="updateValue(true)">
+  <span class="pagination-control fa fa-chevron-left"></span>
+</button>
+<button class="btn btn-link" [disabled]="currentPage === pagesCount - 1" (click)="updateValue()">
+  <span class="pagination-control fa fa-chevron-right"></span>
+</button>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/app/components/pagination-controls/pagination-controls.component.less
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/pagination-controls/pagination-controls.component.less b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/pagination-controls/pagination-controls.component.less
new file mode 100644
index 0000000..c21e83e
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/pagination-controls/pagination-controls.component.less
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@import '../variables';
+
+.pagination-control {
+  .clickable-item;
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/app/components/pagination-controls/pagination-controls.component.spec.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/pagination-controls/pagination-controls.component.spec.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/pagination-controls/pagination-controls.component.spec.ts
new file mode 100644
index 0000000..489f79c
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/pagination-controls/pagination-controls.component.spec.ts
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import {async, ComponentFixture, TestBed} from '@angular/core/testing';
+
+import {PaginationControlsComponent} from './pagination-controls.component';
+
+describe('PaginationControlsComponent', () => {
+  let component: PaginationControlsComponent;
+  let fixture: ComponentFixture<PaginationControlsComponent>;
+
+  beforeEach(async(() => {
+    TestBed.configureTestingModule({
+      declarations: [PaginationControlsComponent]
+    })
+    .compileComponents();
+  }));
+
+  beforeEach(() => {
+    fixture = TestBed.createComponent(PaginationControlsComponent);
+    component = fixture.componentInstance;
+    fixture.detectChanges();
+  });
+
+  it('should create component', () => {
+    expect(component).toBeTruthy();
+  });
+});

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/app/components/pagination-controls/pagination-controls.component.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/pagination-controls/pagination-controls.component.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/pagination-controls/pagination-controls.component.ts
new file mode 100644
index 0000000..c71844c
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/pagination-controls/pagination-controls.component.ts
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import {Component, forwardRef, Input, Output, EventEmitter} from '@angular/core';
+import {ControlValueAccessor, NG_VALUE_ACCESSOR } from '@angular/forms';
+
+@Component({
+  selector: 'pagination-controls',
+  templateUrl: './pagination-controls.component.html',
+  styleUrls: ['./pagination-controls.component.less'],
+  providers: [
+    {
+      provide: NG_VALUE_ACCESSOR,
+      useExisting: forwardRef(() => PaginationControlsComponent),
+      multi: true
+    }
+  ]
+})
+export class PaginationControlsComponent implements ControlValueAccessor {
+
+  private onChange: (fn: any) => void;
+
+  currentPage: number = 0;
+
+  @Input()
+  totalCount: number;
+
+  @Input()
+  pagesCount: number;
+
+  @Output()
+  currentPageChange: EventEmitter<number> = new EventEmitter();
+
+  get value(): number {
+    return this.currentPage;
+  }
+
+  set value(newValue: number) {
+    this.currentPage = newValue;
+    this.currentPageChange.emit(newValue);
+    this.onChange(newValue);
+  }
+
+  updateValue(isDecrement?: boolean) {
+    isDecrement? this.value-- : this.value++;
+  }
+
+  writeValue() {
+  }
+
+  registerOnChange(callback: any): void {
+    this.onChange = callback;
+  }
+
+  registerOnTouched() {
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/app/components/pagination/pagination.component.html
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/pagination/pagination.component.html b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/pagination/pagination.component.html
new file mode 100644
index 0000000..67fe591
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/pagination/pagination.component.html
@@ -0,0 +1,24 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<form class="pagination-form col-md-12" [formGroup]="filtersForm">
+  <filter-dropdown [label]="filterInstance.label" formControlName="pageSize" [options]="filterInstance.options"
+                   [defaultLabel]="filterInstance.defaultLabel" isRightAlign="true" isDropup="true"></filter-dropdown>
+  <span>{{'pagination.numbers' | translate: numbersTranslateParams}}</span>
+  <pagination-controls formControlName="page" [totalCount]="totalCount" [pagesCount]="pagesCount"
+                       (currentPageChange)="setCurrentPage($event)"></pagination-controls>
+</form>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/app/components/pagination/pagination.component.less
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/pagination/pagination.component.less b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/pagination/pagination.component.less
new file mode 100644
index 0000000..df8ad2d
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/pagination/pagination.component.less
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+@import '../variables';
+
+:host {
+  display: flex;
+
+  .pagination-form {
+    .flex-vertical-align;
+    justify-content: flex-end;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/app/components/pagination/pagination.component.spec.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/pagination/pagination.component.spec.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/pagination/pagination.component.spec.ts
new file mode 100644
index 0000000..7a15bbc
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/pagination/pagination.component.spec.ts
@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import {async, ComponentFixture, TestBed} from '@angular/core/testing';
+import {NO_ERRORS_SCHEMA} from '@angular/core';
+import {Http} from '@angular/http';
+import {TranslateModule, TranslateLoader} from '@ngx-translate/core';
+import {TranslateHttpLoader} from '@ngx-translate/http-loader';
+
+import {PaginationComponent} from './pagination.component';
+
+export function HttpLoaderFactory(http: Http) {
+  return new TranslateHttpLoader(http, 'assets/i18n/', '.json');
+}
+
+describe('PaginationComponent', () => {
+  let component: PaginationComponent;
+  let fixture: ComponentFixture<PaginationComponent>;
+
+  beforeEach(async(() => {
+    TestBed.configureTestingModule({
+      imports: [
+        TranslateModule.forRoot({
+          provide: TranslateLoader,
+          useFactory: HttpLoaderFactory,
+          deps: [Http]
+        })
+      ],
+      declarations: [PaginationComponent],
+      schemas: [NO_ERRORS_SCHEMA]
+    })
+    .compileComponents();
+  }));
+
+  beforeEach(() => {
+    fixture = TestBed.createComponent(PaginationComponent);
+    component = fixture.componentInstance;
+    component.filterInstance = {};
+    component.filtersForm = {
+      controls: {
+        pageSize: {
+          valueChanges: {
+            subscribe: () => {}
+          }
+        }
+      }
+    };
+    fixture.detectChanges();
+  });
+
+  it('should create component', () => {
+    expect(component).toBeTruthy();
+  });
+});

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/app/components/pagination/pagination.component.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/pagination/pagination.component.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/pagination/pagination.component.ts
new file mode 100644
index 0000000..d38d0d8
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/pagination/pagination.component.ts
@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import {Component, OnInit, Input} from '@angular/core';
+import {FormGroup} from '@angular/forms';
+
+@Component({
+  selector: 'pagination',
+  templateUrl: './pagination.component.html',
+  styleUrls: ['./pagination.component.less']
+})
+export class PaginationComponent implements OnInit {
+
+  ngOnInit() {
+    this.setPageSizeFromString(this.filterInstance.defaultValue);
+    this.filtersForm.controls.pageSize.valueChanges.subscribe(value => this.setPageSizeFromString(value));
+  }
+
+  @Input()
+  filtersForm: FormGroup;
+
+  @Input()
+  filterInstance: any;
+
+  @Input()
+  currentCount?: number;
+
+  @Input()
+  totalCount: number;
+
+  private pageSize: number = 0;
+
+  setPageSizeFromString(value: string) {
+    this.pageSize = parseInt(value);
+  }
+
+  private currentPage: number = 0;
+
+  get numbersTranslateParams(): any {
+    const pageSize = this.pageSize,
+      startIndex = (this.currentPage * pageSize) + 1;
+    return {
+      startIndex,
+      endIndex: startIndex + Math.min(pageSize, this.currentCount) - 1,
+      totalCount: this.totalCount
+    }
+  }
+
+  get pagesCount(): number {
+    return Math.ceil(this.totalCount / this.pageSize);
+  }
+
+  setCurrentPage(pageNumber: number) {
+    this.currentPage = pageNumber;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/app/components/variables.less
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/variables.less b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/variables.less
index c5f034c..7715876 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/variables.less
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/variables.less
@@ -29,6 +29,8 @@
 @grey-color: #666;
 @default-line-height: 1.42857143;
 @main-background-color: #ECECEC;
+@filters-panel-background-color: #FFF;
+@filters-panel-padding: 10px 0;
 @list-header-background-color: #F2F2F2;
 
 @fatal-color: #830A0A;
@@ -86,3 +88,11 @@
     color: @link-hover-color;
   }
 }
+
+.full-size {
+  position: absolute;
+  top: 0;
+  right: 0;
+  bottom: 0;
+  left: 0;
+}


[25/31] ambari git commit: AMBARI-21451 - Expected Values Like original_stack Are Missing On Downgrades (jonathanhurley)

Posted by ab...@apache.org.
AMBARI-21451 - Expected Values Like original_stack Are Missing On Downgrades (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f27f3aff
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f27f3aff
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f27f3aff

Branch: refs/heads/branch-feature-logsearch-ui
Commit: f27f3affbb4c7f49944dcefc7581ac228b103e3f
Parents: eb3d3ea
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Wed Jul 12 13:30:16 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Wed Jul 12 19:26:37 2017 -0400

----------------------------------------------------------------------
 .../libraries/functions/stack_features.py       |  28 ++--
 .../controller/ActionExecutionContext.java      |  30 ++--
 .../controller/AmbariActionExecutionHelper.java |  15 +-
 .../ClusterStackVersionResourceProvider.java    |   2 +-
 .../upgrades/UpgradeUserKerberosDescriptor.java | 142 +++++++------------
 .../ambari/server/state/UpgradeContext.java     |  16 ++-
 .../SPARK/1.2.1/package/scripts/params.py       |  11 +-
 .../SPARK/1.2.1/package/scripts/setup_spark.py  |   6 +-
 .../1.2.1/package/scripts/spark_service.py      |   6 +-
 .../UpgradeUserKerberosDescriptorTest.java      |  59 ++++++--
 .../src/test/python/TestStackFeature.py         |  44 ++++--
 .../test/python/stacks/2.0.6/configs/nn_eu.json |   2 +-
 .../stacks/2.0.6/configs/nn_eu_standby.json     |   2 +-
 .../2.1/configs/hive-metastore-upgrade.json     |   2 +-
 .../python/stacks/2.2/configs/knox_upgrade.json |   2 +-
 15 files changed, 199 insertions(+), 168 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py b/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
index 576c138..24201dd 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
@@ -105,7 +105,10 @@ def get_stack_feature_version(config):
 
   # something like 2.4.0.0-1234; represents the version for the command
   # (or None if this is a cluster install and it hasn't been calculated yet)
-  version = default("/commandParams/version", None)
+  # this is always guaranteed to be the correct version for the command, even in
+  # upgrade and downgrade scenarios
+  command_version = default("/commandParams/version", None)
+  command_stack = default("/commandParams/target_stack", None)
 
   # something like 2.4.0.0-1234
   # (or None if this is a cluster install and it hasn't been calculated yet)
@@ -115,13 +118,13 @@ def get_stack_feature_version(config):
   upgrade_direction = default("/commandParams/upgrade_direction", None)
 
   # start out with the value that's right 99% of the time
-  version_for_stack_feature_checks = version if version is not None else stack_version
+  version_for_stack_feature_checks = command_version if command_version is not None else stack_version
 
   # if this is not an upgrade, then we take the simple path
   if upgrade_direction is None:
     Logger.info(
-      "Stack Feature Version Info: stack_version={0}, version={1}, current_cluster_version={2} -> {3}".format(
-        stack_version, version, current_cluster_version, version_for_stack_feature_checks))
+      "Stack Feature Version Info: Cluster Stack={0}, Cluster Current Version={1}, Command Stack={2}, Command Version={3}-> {4}".format(
+        stack_version, current_cluster_version, command_stack, command_version, version_for_stack_feature_checks))
 
     return version_for_stack_feature_checks
 
@@ -130,15 +133,12 @@ def get_stack_feature_version(config):
   is_stop_command = _is_stop_command(config)
   if not is_stop_command:
     Logger.info(
-      "Stack Feature Version Info: stack_version={0}, version={1}, current_cluster_version={2}, upgrade_direction={3} -> {4}".format(
-        stack_version, version, current_cluster_version, upgrade_direction,
+      "Stack Feature Version Info: Cluster Stack={0}, Cluster Current Version={1}, Command Stack={2}, Command Version={3}, Upgrade Direction={4} -> {5}".format(
+        stack_version, current_cluster_version, command_stack, command_version, upgrade_direction,
         version_for_stack_feature_checks))
 
     return version_for_stack_feature_checks
 
-  original_stack = default("/commandParams/original_stack", None)
-  target_stack = default("/commandParams/target_stack", None)
-
   # something like 2.5.0.0-5678 (or None)
   downgrade_from_version = default("/commandParams/downgrade_from_version", None)
 
@@ -154,15 +154,13 @@ def get_stack_feature_version(config):
     # UPGRADE
     if current_cluster_version is not None:
       version_for_stack_feature_checks = current_cluster_version
-    elif original_stack is not None:
-      version_for_stack_feature_checks = format_stack_version(original_stack)
     else:
-      version_for_stack_feature_checks = version if version is not None else stack_version
+      version_for_stack_feature_checks = command_version if command_version is not None else stack_version
 
   Logger.info(
-    "Stack Feature Version Info: stack_version={0}, version={1}, current_cluster_version={2}, upgrade_direction={3}, original_stack={4}, target_stack={5}, downgrade_from_version={6}, stop_command={7} -> {8}".format(
-      stack_version, version, current_cluster_version, upgrade_direction, original_stack,
-      target_stack, downgrade_from_version, is_stop_command, version_for_stack_feature_checks))
+    "Stack Feature Version Info: Cluster Stack={0}, Cluster Current Version={1}, Command Stack={2}, Command Version={3}, Upgrade Direction={4}, stop_command={5} -> {6}".format(
+      stack_version, current_cluster_version, command_stack, command_version, upgrade_direction,
+      is_stop_command, version_for_stack_feature_checks))
 
   return version_for_stack_feature_checks
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
index 34d6db9..5d71869 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
@@ -27,7 +27,7 @@ import org.apache.ambari.server.actionmanager.TargetHostType;
 import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.controller.internal.RequestOperationLevel;
 import org.apache.ambari.server.controller.internal.RequestResourceFilter;
-import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 
 /**
  * The context required to create tasks and stages for a custom action
@@ -44,7 +44,7 @@ public class ActionExecutionContext {
   private String expectedComponentName;
   private boolean hostsInMaintenanceModeExcluded = true;
   private boolean allowRetry = false;
-  private StackId stackId;
+  private RepositoryVersionEntity repositoryVersion;
 
   private List<ExecutionCommandVisitor> m_visitors = new ArrayList<>();
 
@@ -175,27 +175,29 @@ public class ActionExecutionContext {
   }
 
   /**
-   * Gets the stack to use for generating stack-associated values for a command.
-   * In some cases the cluster's stack is not the correct one to use, such as
-   * when distributing a repository.
+   * Gets the stack/version to use for generating stack-associated values for a
+   * command. In some cases the cluster's stack is not the correct one to use,
+   * such as when distributing a repository.
    *
-   * @return the stackId the stack to use when generating stack-specific content
-   *         for the command.
+   * @return the repository for the stack/version to use when generating
+   *         stack-specific content for the command.
+   *
+   * @return
    */
-  public StackId getStackId() {
-    return stackId;
+  public RepositoryVersionEntity getRepositoryVersion() {
+    return repositoryVersion;
   }
 
   /**
-   * Sets the stack to use for generating stack-associated values for a command.
-   * In some cases the cluster's stack is not the correct one to use, such as
-   * when distributing a repository.
+   * Sets the stack/version to use for generating stack-associated values for a
+   * command. In some cases the cluster's stack is not the correct one to use,
+   * such as when distributing a repository.
    *
    * @param stackId
    *          the stackId to use for stack-based properties on the command.
    */
-  public void setStackId(StackId stackId) {
-    this.stackId = stackId;
+  public void setRepositoryVersion(RepositoryVersionEntity repositoryVersion) {
+    this.repositoryVersion = repositoryVersion;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
index 391daa9..55356c7 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
@@ -539,13 +539,18 @@ public class AmbariActionExecutionHelper {
     // if the repo is null, see if any values from the context should go on the
     // host params and then return
     if (null == repositoryVersion) {
-      if (null != actionContext.getStackId()) {
-        StackId stackId = actionContext.getStackId();
+      // see if the action context has a repository set to use for the command
+      if (null != actionContext.getRepositoryVersion()) {
+        StackId stackId = actionContext.getRepositoryVersion().getStackId();
         hostLevelParams.put(STACK_NAME, stackId.getStackName());
         hostLevelParams.put(STACK_VERSION, stackId.getStackVersion());
       }
 
       return;
+    } else {
+      StackId stackId = repositoryVersion.getStackId();
+      hostLevelParams.put(STACK_NAME, stackId.getStackName());
+      hostLevelParams.put(STACK_VERSION, stackId.getStackVersion());
     }
 
     JsonObject rootJsonObject = new JsonObject();
@@ -569,11 +574,5 @@ public class AmbariActionExecutionHelper {
     }
 
     hostLevelParams.put(REPO_INFO, rootJsonObject.toString());
-
-    // set the host level params if not already set by whoever is creating this command
-    if (!hostLevelParams.containsKey(STACK_NAME) || !hostLevelParams.containsKey(STACK_VERSION)) {
-      hostLevelParams.put(STACK_NAME, repositoryVersion.getStackName());
-      hostLevelParams.put(STACK_VERSION, repositoryVersion.getStackVersion());
-    }
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
index c4fce8a..9ecea95 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
@@ -613,7 +613,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
     ActionExecutionContext actionContext = new ActionExecutionContext(cluster.getClusterName(),
         INSTALL_PACKAGES_ACTION, Collections.singletonList(filter), roleParams);
 
-    actionContext.setStackId(stackId);
+    actionContext.setRepositoryVersion(repoVersion);
     actionContext.setTimeout(Short.valueOf(configuration.getDefaultAgentTaskTimeout(true)));
 
     repoVersionHelper.addCommandRepository(actionContext, osFamily, repoVersion, repoInfo);

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptor.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptor.java
index 59690a3..78aaa77 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptor.java
@@ -22,6 +22,8 @@ import java.util.List;
 import java.util.TreeMap;
 import java.util.concurrent.ConcurrentMap;
 
+import org.apache.ambari.annotations.Experimental;
+import org.apache.ambari.annotations.ExperimentalFeature;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleCommand;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
@@ -29,10 +31,10 @@ import org.apache.ambari.server.agent.CommandReport;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.orm.dao.ArtifactDAO;
 import org.apache.ambari.server.orm.entities.ArtifactEntity;
-import org.apache.ambari.server.serveraction.AbstractServerAction;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.UpgradeContext;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptorFactory;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptorUpdateHelper;
@@ -48,34 +50,9 @@ import com.google.inject.Inject;
  *
  * @see org.apache.ambari.server.state.kerberos.KerberosDescriptorUpdateHelper
  */
-public class UpgradeUserKerberosDescriptor extends AbstractServerAction {
+public class UpgradeUserKerberosDescriptor extends AbstractUpgradeServerAction {
   private static final Logger LOG = LoggerFactory.getLogger(UpgradeUserKerberosDescriptor.class);
 
-  /**
-   * The upgrade direction.
-   *
-   * @see Direction
-   */
-  private static final String UPGRADE_DIRECTION_KEY = "upgrade_direction";
-
-  /**
-   * The original "current" stack of the cluster before the upgrade started.
-   * This is the same regardless of whether the current direction is
-   * {@link Direction#UPGRADE} or {@link Direction#DOWNGRADE}.
-   *
-   * @see Direction
-   */
-  private static final String ORIGINAL_STACK_KEY = "original_stack";
-
-  /**
-   * The target upgrade stack before the upgrade started. This is the same
-   * regardless of whether the current direction is {@link Direction#UPGRADE} or
-   * {@link Direction#DOWNGRADE}.
-   *
-   * @see Direction
-   */
-  private static final String TARGET_STACK_KEY = "target_stack";
-
   private final static String KERBEROS_DESCRIPTOR_NAME = "kerberos_descriptor";
   private final static String KERBEROS_DESCRIPTOR_BACKUP_NAME = "kerberos_descriptor_backup";
 
@@ -108,70 +85,73 @@ public class UpgradeUserKerberosDescriptor extends AbstractServerAction {
     List<String> messages = new ArrayList<>();
     List<String> errorMessages = new ArrayList<>();
 
-    if (cluster != null) {
-      logMessage(messages, "Obtaining the user-defined Kerberos descriptor");
+    UpgradeContext upgradeContext = getUpgradeContext(cluster);
 
-      TreeMap<String, String> foreignKeys = new TreeMap<>();
-      foreignKeys.put("cluster", String.valueOf(cluster.getClusterId()));
+    logMessage(messages, "Obtaining the user-defined Kerberos descriptor");
 
-      ArtifactEntity entity = artifactDAO.findByNameAndForeignKeys("kerberos_descriptor", foreignKeys);
-      KerberosDescriptor userDescriptor = (entity == null) ? null : kerberosDescriptorFactory.createInstance(entity.getArtifactData());
+    TreeMap<String, String> foreignKeys = new TreeMap<>();
+    foreignKeys.put("cluster", String.valueOf(cluster.getClusterId()));
 
-      if (userDescriptor != null) {
-        StackId originalStackId = getStackIdFromCommandParams(ORIGINAL_STACK_KEY);
-        StackId targetStackId = getStackIdFromCommandParams(TARGET_STACK_KEY);
+    ArtifactEntity entity = artifactDAO.findByNameAndForeignKeys("kerberos_descriptor", foreignKeys);
+    KerberosDescriptor userDescriptor = (entity == null) ? null : kerberosDescriptorFactory.createInstance(entity.getArtifactData());
 
-        if (isDowngrade()) {
-          restoreDescriptor(foreignKeys, messages, errorMessages);
-        } else {
-          backupDescriptor(foreignKeys, messages, errorMessages);
+    if (userDescriptor != null) {
+
+      @Experimental(
+          feature = ExperimentalFeature.PATCH_UPGRADES,
+          comment = "This needs to be correctly done per-service")
+
+      StackId originalStackId = cluster.getCurrentStackVersion();
+      StackId targetStackId = upgradeContext.getRepositoryVersion().getStackId();
+
+      if (upgradeContext.getDirection() == Direction.DOWNGRADE) {
+        restoreDescriptor(foreignKeys, messages, errorMessages);
+      } else {
+        backupDescriptor(foreignKeys, messages, errorMessages);
 
-          KerberosDescriptor newDescriptor = null;
-          KerberosDescriptor previousDescriptor = null;
+        KerberosDescriptor newDescriptor = null;
+        KerberosDescriptor previousDescriptor = null;
 
-          if (targetStackId == null) {
-            logErrorMessage(messages, errorMessages, "The new stack version information was not found.");
-          } else {
-            logMessage(messages, String.format("Obtaining new stack Kerberos descriptor for %s.", targetStackId.toString()));
-            newDescriptor = ambariMetaInfo.getKerberosDescriptor(targetStackId.getStackName(), targetStackId.getStackVersion());
+        if (targetStackId == null) {
+          logErrorMessage(messages, errorMessages, "The new stack version information was not found.");
+        } else {
+          logMessage(messages, String.format("Obtaining new stack Kerberos descriptor for %s.", targetStackId.toString()));
+          newDescriptor = ambariMetaInfo.getKerberosDescriptor(targetStackId.getStackName(), targetStackId.getStackVersion());
 
-            if (newDescriptor == null) {
-              logErrorMessage(messages, errorMessages, String.format("The Kerberos descriptor for the new stack version, %s, was not found.", targetStackId.toString()));
-            }
+          if (newDescriptor == null) {
+            logErrorMessage(messages, errorMessages, String.format("The Kerberos descriptor for the new stack version, %s, was not found.", targetStackId.toString()));
           }
+        }
 
-          if (originalStackId == null) {
-            logErrorMessage(messages, errorMessages, "The previous stack version information was not found.");
-          } else {
-            logMessage(messages, String.format("Obtaining previous stack Kerberos descriptor for %s.", originalStackId.toString()));
-            previousDescriptor = ambariMetaInfo.getKerberosDescriptor(originalStackId.getStackName(), originalStackId.getStackVersion());
+        if (originalStackId == null) {
+          logErrorMessage(messages, errorMessages, "The previous stack version information was not found.");
+        } else {
+          logMessage(messages, String.format("Obtaining previous stack Kerberos descriptor for %s.", originalStackId.toString()));
+          previousDescriptor = ambariMetaInfo.getKerberosDescriptor(originalStackId.getStackName(), originalStackId.getStackVersion());
 
-            if (newDescriptor == null) {
-              logErrorMessage(messages, errorMessages, String.format("The Kerberos descriptor for the previous stack version, %s, was not found.", originalStackId.toString()));
-            }
+          if (newDescriptor == null) {
+            logErrorMessage(messages, errorMessages, String.format("The Kerberos descriptor for the previous stack version, %s, was not found.", originalStackId.toString()));
           }
+        }
 
-          if (errorMessages.isEmpty()) {
-            logMessage(messages, "Updating the user-specified Kerberos descriptor.");
+        if (errorMessages.isEmpty()) {
+          logMessage(messages, "Updating the user-specified Kerberos descriptor.");
 
-            KerberosDescriptor updatedDescriptor = KerberosDescriptorUpdateHelper.updateUserKerberosDescriptor(
-                previousDescriptor,
-                newDescriptor,
-                userDescriptor);
+          KerberosDescriptor updatedDescriptor = KerberosDescriptorUpdateHelper.updateUserKerberosDescriptor(
+              previousDescriptor,
+              newDescriptor,
+              userDescriptor);
 
-            logMessage(messages, "Storing updated user-specified Kerberos descriptor.");
+          logMessage(messages, "Storing updated user-specified Kerberos descriptor.");
 
-            entity.setArtifactData(updatedDescriptor.toMap());
-            artifactDAO.merge(entity);
+          entity.setArtifactData(updatedDescriptor.toMap());
+          artifactDAO.merge(entity);
 
-            logMessage(messages, "Successfully updated the user-specified Kerberos descriptor.");
-          }
+          logMessage(messages, "Successfully updated the user-specified Kerberos descriptor.");
         }
-      } else {
-        logMessage(messages, "A user-specified Kerberos descriptor was not found. No updates are necessary.");
       }
     } else {
-      logErrorMessage(messages, errorMessages, String.format("The cluster named %s was not found.", clusterName));
+      logMessage(messages, "A user-specified Kerberos descriptor was not found. No updates are necessary.");
     }
 
     if (!errorMessages.isEmpty()) {
@@ -181,24 +161,6 @@ public class UpgradeUserKerberosDescriptor extends AbstractServerAction {
     return createCommandReport(0, HostRoleStatus.COMPLETED, "{}", StringUtils.join(messages, "\n"), StringUtils.join(errorMessages, "\n"));
   }
 
-  /**
-   * Determines if upgrade direction is {@link Direction#UPGRADE} or {@link Direction#DOWNGRADE}.
-   *
-   * @return {@code true} if {@link Direction#DOWNGRADE}; {@code false} if {@link Direction#UPGRADE}
-   */
-  private boolean isDowngrade() {
-    return Direction.DOWNGRADE.name().equalsIgnoreCase(getCommandParameterValue(UPGRADE_DIRECTION_KEY));
-  }
-
-  private StackId getStackIdFromCommandParams(String commandParamKey) {
-    String stackId = getCommandParameterValue(commandParamKey);
-    if (stackId == null) {
-      return null;
-    } else {
-      return new StackId(stackId);
-    }
-  }
-
   private void logMessage(List<String> messages, String message) {
     LOG.info(message);
     messages.add(message);

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
index 3ecf64d..1695bd3 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
@@ -99,7 +99,13 @@ public class UpgradeContext {
   public static final String COMMAND_PARAM_TASKS = "tasks";
   public static final String COMMAND_PARAM_STRUCT_OUT = "structured_out";
 
-  /**
+  @Deprecated
+  @Experimental(
+      feature = ExperimentalFeature.PATCH_UPGRADES,
+      comment = "This isn't needed anymore, but many python classes still use it")
+  public static final String COMMAND_PARAM_DOWNGRADE_FROM_VERSION = "downgrade_from_version";
+
+  /*
    * The cluster that the upgrade is for.
    */
   final private Cluster m_cluster;
@@ -744,6 +750,7 @@ public class UpgradeContext {
    * <ul>
    * <li>{@link #COMMAND_PARAM_CLUSTER_NAME}
    * <li>{@link #COMMAND_PARAM_DIRECTION}
+   * <li>{@link #COMMAND_PARAM_DOWNGRADE_FROM_VERSION}
    * <li>{@link #COMMAND_PARAM_UPGRADE_TYPE}
    * <li>{@link KeyNames#REFRESH_CONFIG_TAGS_BEFORE_EXECUTION} - necessary in
    * order to have the commands contain the correct configurations. Otherwise,
@@ -758,8 +765,13 @@ public class UpgradeContext {
   public Map<String, String> getInitializedCommandParameters() {
     Map<String, String> parameters = new HashMap<>();
 
+    Direction direction = getDirection();
     parameters.put(COMMAND_PARAM_CLUSTER_NAME, m_cluster.getClusterName());
-    parameters.put(COMMAND_PARAM_DIRECTION, getDirection().name().toLowerCase());
+    parameters.put(COMMAND_PARAM_DIRECTION, direction.name().toLowerCase());
+
+    if (direction == Direction.DOWNGRADE) {
+      parameters.put(COMMAND_PARAM_DOWNGRADE_FROM_VERSION, m_repositoryVersion.getVersion());
+    }
 
     if (null != getType()) {
       // use the serialized attributes of the enum to convert it to a string,

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/params.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/params.py
index 74fd76a..93b4944 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/params.py
@@ -23,6 +23,7 @@ import status_params
 
 from setup_spark import *
 from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.stack_features import get_stack_feature_version
 from resource_management.libraries.functions.constants import StackFeature
 from resource_management.libraries.functions import conf_select, stack_select
 from resource_management.libraries.functions.get_stack_version import get_stack_version
@@ -56,10 +57,8 @@ upgrade_direction = default("/commandParams/upgrade_direction", None)
 java_home = config['hostLevelParams']['java_home']
 stack_name = status_params.stack_name
 stack_root = Script.get_stack_root()
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-if upgrade_direction == Direction.DOWNGRADE:
-  stack_version_unformatted = config['commandParams']['original_stack'].split("-")[1]
-stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+version_for_stack_feature_checks = get_stack_feature_version(config)
 
 sysprep_skip_copy_tarballs_hdfs = get_sysprep_skip_copy_tarballs_hdfs()
 
@@ -70,7 +69,7 @@ spark_conf = '/etc/spark/conf'
 hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
 hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
 
-if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted):
+if check_stack_feature(StackFeature.ROLLING_UPGRADE, version_for_stack_feature_checks):
   hadoop_home = stack_select.get_hadoop_dir("home")
   spark_conf = format("{stack_root}/current/{component_directory}/conf")
   spark_log_dir = config['configurations']['spark-env']['spark_log_dir']
@@ -211,7 +210,7 @@ dfs_type = default("/commandParams/dfs_type", "")
 # livy is only supported from HDP 2.5
 has_livyserver = False
 
-if stack_version_formatted and check_stack_feature(StackFeature.SPARK_LIVY, stack_version_formatted) and "livy-env" in config['configurations']:
+if check_stack_feature(StackFeature.SPARK_LIVY, version_for_stack_feature_checks) and "livy-env" in config['configurations']:
   livy_component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "LIVY_SERVER")
   livy_conf = format("{stack_root}/current/{livy_component_directory}/conf")
   livy_log_dir = config['configurations']['livy-env']['livy_log_dir']

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/setup_spark.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/setup_spark.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/setup_spark.py
index 50c1555..53c8f9e 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/setup_spark.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/setup_spark.py
@@ -118,11 +118,11 @@ def setup_spark(env, type, upgrade_type=None, action=None, config_dir=None):
       mode=0644
     )
 
-  effective_version = params.version if upgrade_type is not None else params.stack_version_formatted
+  effective_version = params.version if upgrade_type is not None else params.version_for_stack_feature_checks
   if effective_version:
     effective_version = format_stack_version(effective_version)
 
-  if effective_version and check_stack_feature(StackFeature.SPARK_JAVA_OPTS_SUPPORT, effective_version):
+  if check_stack_feature(StackFeature.SPARK_JAVA_OPTS_SUPPORT, effective_version):
     File(os.path.join(params.spark_conf, 'java-opts'),
       owner=params.spark_user,
       group=params.spark_group,
@@ -134,7 +134,7 @@ def setup_spark(env, type, upgrade_type=None, action=None, config_dir=None):
       action="delete"
     )
 
-  if params.spark_thrift_fairscheduler_content and effective_version and check_stack_feature(StackFeature.SPARK_16PLUS, effective_version):
+  if params.spark_thrift_fairscheduler_content and check_stack_feature(StackFeature.SPARK_16PLUS, effective_version):
     # create spark-thrift-fairscheduler.xml
     File(os.path.join(config_dir,"spark-thrift-fairscheduler.xml"),
       owner=params.spark_user,

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/spark_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/spark_service.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/spark_service.py
index 31a296a..2838186 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/spark_service.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/spark_service.py
@@ -34,11 +34,11 @@ def spark_service(name, upgrade_type=None, action=None):
 
   if action == 'start':
 
-    effective_version = params.version if upgrade_type is not None else params.stack_version_formatted
+    effective_version = params.version if upgrade_type is not None else params.version_for_stack_feature_checks
     if effective_version:
       effective_version = format_stack_version(effective_version)
 
-    if name == 'jobhistoryserver' and effective_version and check_stack_feature(StackFeature.SPARK_16PLUS, effective_version):
+    if name == 'jobhistoryserver' and check_stack_feature(StackFeature.SPARK_16PLUS, effective_version):
       # copy spark-hdp-assembly.jar to hdfs
       copy_to_hdfs("spark", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
       # create spark history directory
@@ -58,7 +58,7 @@ def spark_service(name, upgrade_type=None, action=None):
 
     # Spark 1.3.1.2.3, and higher, which was included in HDP 2.3, does not have a dependency on Tez, so it does not
     # need to copy the tarball, otherwise, copy it.
-    if params.stack_version_formatted and check_stack_feature(StackFeature.TEZ_FOR_SPARK, params.stack_version_formatted):
+    if check_stack_feature(StackFeature.TEZ_FOR_SPARK, params.version_for_stack_feature_checks):
       resource_created = copy_to_hdfs("tez", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
       if resource_created:
         params.HdfsResource(None, action="execute")

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptorTest.java
index ed92955..86f6d3b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptorTest.java
@@ -36,11 +36,17 @@ import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.orm.dao.ArtifactDAO;
 import org.apache.ambari.server.orm.entities.ArtifactEntity;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
+import org.apache.ambari.server.orm.entities.UpgradeEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.UpgradeContext;
+import org.apache.ambari.server.state.UpgradeContextFactory;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptorFactory;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptorUpdateHelper;
+import org.apache.ambari.server.state.stack.upgrade.Direction;
 import org.easymock.Capture;
 import org.easymock.EasyMock;
 import org.junit.Before;
@@ -58,23 +64,34 @@ import org.powermock.modules.junit4.PowerMockRunner;
 public class UpgradeUserKerberosDescriptorTest {
   private Clusters clusters;
   private Cluster cluster;
+  private UpgradeEntity upgrade;
+  private UpgradeContext upgradeContext;
   private AmbariMetaInfo ambariMetaInfo;
   private KerberosDescriptorFactory kerberosDescriptorFactory;
   private ArtifactDAO artifactDAO;
+  private UpgradeContextFactory upgradeContextFactory;
 
   private TreeMap<String, Field> fields = new TreeMap<>();
+  private StackId HDP_24 = new StackId("HDP", "2.4");
 
   @Before
   public void setup() throws Exception {
     clusters = EasyMock.createMock(Clusters.class);
     cluster = EasyMock.createMock(Cluster.class);
+    upgrade = EasyMock.createNiceMock(UpgradeEntity.class);
     kerberosDescriptorFactory = EasyMock.createNiceMock(KerberosDescriptorFactory.class);
     ambariMetaInfo = EasyMock.createMock(AmbariMetaInfo.class);
     artifactDAO = EasyMock.createNiceMock(ArtifactDAO.class);
+    upgradeContextFactory = EasyMock.createNiceMock(UpgradeContextFactory.class);
+    upgradeContext = EasyMock.createNiceMock(UpgradeContext.class);
 
     expect(clusters.getCluster((String) anyObject())).andReturn(cluster).anyTimes();
     expect(cluster.getClusterId()).andReturn(1l).atLeastOnce();
-    replay(clusters, cluster);
+    expect(cluster.getCurrentStackVersion()).andReturn(HDP_24).atLeastOnce();
+    expect(cluster.getUpgradeInProgress()).andReturn(upgrade).atLeastOnce();
+    expect(upgradeContextFactory.create(cluster, upgrade)).andReturn(upgradeContext).atLeastOnce();
+
+    replay(clusters, cluster, upgradeContextFactory, upgrade);
 
     prepareFields();
 
@@ -82,12 +99,16 @@ public class UpgradeUserKerberosDescriptorTest {
 
   @Test
   public void testUpgrade() throws Exception {
+    StackId stackId = new StackId("HDP", "2.5");
+    RepositoryVersionEntity repositoryVersion = EasyMock.createNiceMock(RepositoryVersionEntity.class);
+    expect(repositoryVersion.getStackId()).andReturn(stackId).atLeastOnce();
+
+    expect(upgradeContext.getDirection()).andReturn(Direction.UPGRADE).atLeastOnce();
+    expect(upgradeContext.getRepositoryVersion()).andReturn(repositoryVersion).atLeastOnce();
+    replay(repositoryVersion, upgradeContext);
 
     Map<String, String> commandParams = new HashMap<>();
     commandParams.put("clusterName", "c1");
-    commandParams.put("upgrade_direction", "UPGRADE");
-    commandParams.put("original_stack", "HDP-2.4");
-    commandParams.put("target_stack", "HDP-2.5");
 
     ExecutionCommand executionCommand = new ExecutionCommand();
     executionCommand.setCommandParams(commandParams);
@@ -140,12 +161,16 @@ public class UpgradeUserKerberosDescriptorTest {
 
   @Test
   public void testDowngrade() throws Exception {
+    StackId stackId = new StackId("HDP", "2.5");
+    RepositoryVersionEntity repositoryVersion = EasyMock.createNiceMock(RepositoryVersionEntity.class);
+    expect(repositoryVersion.getStackId()).andReturn(stackId).atLeastOnce();
+
+    expect(upgradeContext.getDirection()).andReturn(Direction.DOWNGRADE).atLeastOnce();
+    expect(upgradeContext.getRepositoryVersion()).andReturn(repositoryVersion).atLeastOnce();
+    replay(repositoryVersion, upgradeContext);
 
     Map<String, String> commandParams = new HashMap<>();
     commandParams.put("clusterName", "c1");
-    commandParams.put("upgrade_direction", "DOWNGRADE");
-    commandParams.put("original_stack", "HDP-2.4");
-    commandParams.put("target_stack", "HDP-2.5");
 
     ExecutionCommand executionCommand = new ExecutionCommand();
     executionCommand.setCommandParams(commandParams);
@@ -188,12 +213,19 @@ public class UpgradeUserKerberosDescriptorTest {
   }
 
   private void prepareFields() throws NoSuchFieldException {
-    String[] fieldsNames = {"artifactDAO","clusters","ambariMetaInfo","kerberosDescriptorFactory"};
-    for(String fieldName : fieldsNames)
-    {
-      Field clustersField = UpgradeUserKerberosDescriptor.class.getDeclaredField(fieldName);
-      clustersField.setAccessible(true);
-      fields.put(fieldName, clustersField);
+    String[] fieldsNames = { "artifactDAO", "clusters", "ambariMetaInfo",
+        "kerberosDescriptorFactory", "m_upgradeContextFactory" };
+
+    for (String fieldName : fieldsNames) {
+      try {
+        Field clustersField = UpgradeUserKerberosDescriptor.class.getDeclaredField(fieldName);
+        clustersField.setAccessible(true);
+        fields.put(fieldName, clustersField);
+      } catch( NoSuchFieldException noSuchFieldException ){
+        Field clustersField = UpgradeUserKerberosDescriptor.class.getSuperclass().getDeclaredField(fieldName);
+        clustersField.setAccessible(true);
+        fields.put(fieldName, clustersField);        
+      }
     }
   }
   private void injectFields(UpgradeUserKerberosDescriptor action) throws IllegalAccessException {
@@ -201,5 +233,6 @@ public class UpgradeUserKerberosDescriptorTest {
     fields.get("clusters").set(action, clusters);
     fields.get("ambariMetaInfo").set(action, ambariMetaInfo);
     fields.get("kerberosDescriptorFactory").set(action, kerberosDescriptorFactory);
+    fields.get("m_upgradeContextFactory").set(action, upgradeContextFactory);
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/test/python/TestStackFeature.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/TestStackFeature.py b/ambari-server/src/test/python/TestStackFeature.py
index 0116a7a..230734c 100644
--- a/ambari-server/src/test/python/TestStackFeature.py
+++ b/ambari-server/src/test/python/TestStackFeature.py
@@ -28,6 +28,32 @@ from unittest import TestCase
 Logger.initialize_logger()
 
 class TestStackFeature(TestCase):
+  """
+  EU Upgrade (HDP 2.5 to HDP 2.6)
+    - STOP
+      hostLevelParams/stack_name = HDP
+      hostLevelParams/stack_version = 2.5
+      hostLevelParams/current_version = 2.5.0.0-1237
+      commandParams/version = 2.5.0.0-1237
+    - START
+      hostLevelParams/stack_name = HDP
+      hostLevelParams/stack_version = 2.6
+      hostLevelParams/current_version = 2.5.0.0-1237
+      commandParams/version = 2.6.0.0-334
+
+  EU Downgrade (HDP 2.6 to HDP 2.5)
+    - STOP
+    hostLevelParams/stack_name = HDP
+    hostLevelParams/stack_version = 2.6
+    hostLevelParams/current_version = 2.5.0.0-1237
+    commandParams/version = 2.6.0.0-334
+    - START
+    hostLevelParams/stack_name = HDP
+    hostLevelParams/stack_version = 2.5
+    hostLevelParams/current_version = 2.5.0.0-1237
+    commandParams/version = 2.5.0.0-1237
+  """
+
   def test_get_stack_feature_version_missing_params(self):
     try:
       stack_feature_version = get_stack_feature_version({})
@@ -122,7 +148,7 @@ class TestStackFeature(TestCase):
         "current_version":  "2.4.0.0-1234"
       },
       "commandParams": {
-        "original_stack": "2.4",
+        "source_stack": "2.4",
         "target_stack": "2.5",
         "upgrade_direction": "upgrade",
         "version": "2.5.9.9-9999"
@@ -143,8 +169,8 @@ class TestStackFeature(TestCase):
         "current_version":"2.4.0.0-1234"
       },
       "commandParams":{
-        "original_stack":"2.4",
-        "target_stack":"2.5",
+        "source_stack":"2.5",
+        "target_stack":"2.4",
         "upgrade_direction":"downgrade",
         "version":"2.4.0.0-1234",
         "downgrade_from_version": "2.5.9.9-9999"
@@ -166,10 +192,10 @@ class TestStackFeature(TestCase):
         "current_version":"2.4.0.0-1234"
       },
       "commandParams":{
-        "original_stack":"2.4",
-        "target_stack":"2.5",
+        "source_stack":"2.5",
+        "target_stack":"2.4",
         "upgrade_direction":"downgrade",
-        "version":"2.4.0.0-1234",
+        "version":"2.5.9.9-9999",
         "downgrade_from_version":"2.5.9.9-9999"
       }
     }
@@ -189,10 +215,10 @@ class TestStackFeature(TestCase):
         "custom_command":"STOP"
       },
       "commandParams":{
-        "original_stack":"2.4",
-        "target_stack":"2.5",
+        "source_stack":"2.5",
+        "target_stack":"2.4",
         "upgrade_direction":"downgrade",
-        "version":"2.4.0.0-1234",
+        "version":"2.5.9.9-9999",
         "downgrade_from_version":"2.5.9.9-9999"
       }
     }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json
index 7f77d83..3aadf2c 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json
@@ -25,7 +25,7 @@
         "upgrade_type": "nonrolling_upgrade",
         "version": "2.3.2.0-2844", 
         "forceRefreshConfigTagsBeforeExecution": "*", 
-        "original_stack": "HDP-2.2", 
+        "source_stack": "HDP-2.2",
         "command_timeout": "1200", 
         "target_stack": "HDP-2.3", 
         "desired_namenode_role": "standby", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu_standby.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu_standby.json b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu_standby.json
index 87b18af..2d48ff6 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu_standby.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu_standby.json
@@ -25,7 +25,7 @@
         "upgrade_type": "nonrolling_upgrade",
         "version": "2.3.2.0-2844", 
         "forceRefreshConfigTagsBeforeExecution": "*", 
-        "original_stack": "HDP-2.2", 
+        "source_stack": "HDP-2.2",
         "command_timeout": "1200", 
         "target_stack": "HDP-2.3", 
         "desired_namenode_role": "standby", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/test/python/stacks/2.1/configs/hive-metastore-upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/hive-metastore-upgrade.json b/ambari-server/src/test/python/stacks/2.1/configs/hive-metastore-upgrade.json
index 99fcba0..021695b 100644
--- a/ambari-server/src/test/python/stacks/2.1/configs/hive-metastore-upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.1/configs/hive-metastore-upgrade.json
@@ -13,7 +13,7 @@
         "upgrade_type": "nonrolling_upgrade",
         "version": "2.3.2.0-2950", 
         "forceRefreshConfigTagsBeforeExecution": "*", 
-        "original_stack": "HDP-2.3", 
+        "source_stack": "HDP-2.3",
         "command_timeout": "1200", 
         "target_stack": "HDP-2.3", 
         "script_type": "PYTHON"

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/test/python/stacks/2.2/configs/knox_upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/knox_upgrade.json b/ambari-server/src/test/python/stacks/2.2/configs/knox_upgrade.json
index a9db11c..1805c3b 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/knox_upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/knox_upgrade.json
@@ -22,7 +22,7 @@
         "upgrade_type": "rolling_upgrade",
         "command_retry_max_attempt_count": "3", 
         "version": "2.3.0.0-2096", 
-        "original_stack": "HDP-2.3", 
+        "source_stack": "HDP-2.3",
         "command_retry_enabled": "false", 
         "command_timeout": "1200", 
         "target_stack": "HDP-2.3", 


[16/31] ambari git commit: AMBARI-21427. Assigning hosts concurrently to same config group may fail with "org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException: Config group already exist". (stoader)

Posted by ab...@apache.org.
AMBARI-21427. Assigning hosts concurrently to same config group may fail with "org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException: Config group already exist". (stoader)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/639f4523
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/639f4523
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/639f4523

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 639f4523fdf49c8e0dddf79074cdb7eb4e43940c
Parents: 70cf77e
Author: Vitaly Brodetskyi <vb...@hortonworks.com>
Authored: Tue Jul 11 00:55:59 2017 +0300
Committer: Vitaly Brodetskyi <vb...@hortonworks.com>
Committed: Tue Jul 11 00:55:59 2017 +0300

----------------------------------------------------------------------
 .../ambari/server/topology/AmbariContext.java   | 81 +++++++++++++++-----
 1 file changed, 62 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/639f4523/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
index 106d7c8..dee0e6c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -30,6 +30,7 @@ import java.util.SortedSet;
 import java.util.TreeSet;
 import java.util.concurrent.Callable;
 import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.Lock;
 
 import javax.annotation.Nullable;
 import javax.inject.Inject;
@@ -69,9 +70,11 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.ConfigFactory;
+import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.SecurityType;
+import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.configgroup.ConfigGroup;
 import org.apache.ambari.server.utils.RetryHelper;
 import org.slf4j.Logger;
@@ -79,6 +82,8 @@ import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.Striped;
+import com.google.inject.Provider;
 
 
 /**
@@ -99,6 +104,12 @@ public class AmbariContext {
   @Inject
   ConfigFactory configFactory;
 
+  /**
+   * Used for getting configuration property values from stack and services.
+   */
+  @Inject
+  private Provider<ConfigHelper> configHelper;
+
   private static AmbariManagementController controller;
   private static ClusterController clusterController;
   //todo: task id's.  Use existing mechanism for getting next task id sequence
@@ -112,6 +123,16 @@ public class AmbariContext {
 
   private final static Logger LOG = LoggerFactory.getLogger(AmbariContext.class);
 
+
+  /**
+   * When config groups are created using Blueprints these are created when
+   * hosts join a hostgroup and are added to the corresponding config group.
+   * Since hosts join in parallel there might be a race condition in creating
+   * the config group a host is to be added to. Thus we need to synchronize
+   * the creation of config groups with the same name.
+   */
+  private Striped<Lock> configGroupCreateLock = Striped.lazyWeakLock(1);
+
   public boolean isClusterKerberosEnabled(long clusterId) {
     Cluster cluster;
     try {
@@ -167,9 +188,10 @@ public class AmbariContext {
 
   public void createAmbariResources(ClusterTopology topology, String clusterName, SecurityType securityType, String repoVersion) {
     Stack stack = topology.getBlueprint().getStack();
+    StackId stackId = new StackId(stack.getName(), stack.getVersion());
 
     createAmbariClusterResource(clusterName, stack.getName(), stack.getVersion(), securityType, repoVersion);
-    createAmbariServiceAndComponentResources(topology, clusterName);
+    createAmbariServiceAndComponentResources(topology, clusterName, stackId, repoVersion);
   }
 
   public void createAmbariClusterResource(String clusterName, String stackName, String stackVersion, SecurityType securityType, String repoVersion) {
@@ -196,7 +218,8 @@ public class AmbariContext {
     }
   }
 
-  public void createAmbariServiceAndComponentResources(ClusterTopology topology, String clusterName) {
+  public void createAmbariServiceAndComponentResources(ClusterTopology topology, String clusterName,
+      StackId stackId, String repositoryVersion) {
     Collection<String> services = topology.getBlueprint().getServices();
 
     try {
@@ -205,11 +228,13 @@ public class AmbariContext {
     } catch (AmbariException e) {
       throw new RuntimeException("Failed to persist service and component resources: " + e, e);
     }
-    Set<ServiceRequest> serviceRequests = new HashSet<ServiceRequest>();
-    Set<ServiceComponentRequest> componentRequests = new HashSet<ServiceComponentRequest>();
+    Set<ServiceRequest> serviceRequests = new HashSet<>();
+    Set<ServiceComponentRequest> componentRequests = new HashSet<>();
     for (String service : services) {
       String credentialStoreEnabled = topology.getBlueprint().getCredentialStoreEnabled(service);
-      serviceRequests.add(new ServiceRequest(clusterName, service, null, credentialStoreEnabled));
+      serviceRequests.add(new ServiceRequest(clusterName, service, stackId.getStackId(),
+          repositoryVersion, null, credentialStoreEnabled));
+
       for (String component : topology.getBlueprint().getComponents(service)) {
         String recoveryEnabled = topology.getBlueprint().getRecoveryEnabled(service, component);
         componentRequests.add(new ServiceComponentRequest(clusterName, service, component, null, recoveryEnabled));
@@ -223,14 +248,14 @@ public class AmbariContext {
     }
     // set all services state to INSTALLED->STARTED
     // this is required so the user can start failed services at the service level
-    Map<String, Object> installProps = new HashMap<String, Object>();
+    Map<String, Object> installProps = new HashMap<>();
     installProps.put(ServiceResourceProvider.SERVICE_SERVICE_STATE_PROPERTY_ID, "INSTALLED");
     installProps.put(ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
-    Map<String, Object> startProps = new HashMap<String, Object>();
+    Map<String, Object> startProps = new HashMap<>();
     startProps.put(ServiceResourceProvider.SERVICE_SERVICE_STATE_PROPERTY_ID, "STARTED");
     startProps.put(ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
-    Predicate predicate = new EqualsPredicate<String>(
-        ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
+    Predicate predicate = new EqualsPredicate<>(
+      ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
     try {
       getServiceResourceProvider().updateResources(
           new RequestImpl(null, Collections.singleton(installProps), null, null), predicate);
@@ -262,9 +287,9 @@ public class AmbariContext {
     }
     String clusterName = cluster.getClusterName();
 
-    Map<String, Object> properties = new HashMap<String, Object>();
+    Map<String, Object> properties = new HashMap<>();
     properties.put(HostResourceProvider.HOST_CLUSTER_NAME_PROPERTY_ID, clusterName);
-    properties.put(HostResourceProvider.HOST_NAME_PROPERTY_ID, hostName);
+    properties.put(HostResourceProvider.HOST_HOST_NAME_PROPERTY_ID, hostName);
     properties.put(HostResourceProvider.HOST_RACK_INFO_PROPERTY_ID, host.getRackInfo());
 
     try {
@@ -275,7 +300,7 @@ public class AmbariContext {
           hostName, e.toString()), e);
     }
 
-    final Set<ServiceComponentHostRequest> requests = new HashSet<ServiceComponentHostRequest>();
+    final Set<ServiceComponentHostRequest> requests = new HashSet<>();
 
     for (Map.Entry<String, Collection<String>> entry : components.entrySet()) {
       String service = entry.getKey();
@@ -328,11 +353,17 @@ public class AmbariContext {
   }
 
   public void registerHostWithConfigGroup(final String hostName, final ClusterTopology topology, final String groupName) {
+    String qualifiedGroupName = getConfigurationGroupName(topology.getBlueprint().getName(), groupName);
+
+    Lock configGroupLock = configGroupCreateLock.get(qualifiedGroupName);
+
     try {
+      configGroupLock.lock();
+
       boolean hostAdded = RetryHelper.executeWithRetry(new Callable<Boolean>() {
         @Override
         public Boolean call() throws Exception {
-          return addHostToExistingConfigGroups(hostName, topology, groupName);
+          return addHostToExistingConfigGroups(hostName, topology, qualifiedGroupName);
         }
       });
       if (!hostAdded) {
@@ -342,6 +373,9 @@ public class AmbariContext {
       LOG.error("Unable to register config group for host: ", e);
       throw new RuntimeException("Unable to register config group for host: " + hostName);
     }
+    finally {
+      configGroupLock.unlock();
+    }
   }
 
   public RequestStatusResponse installHost(String hostName, String clusterName, Collection<String> skipInstallForComponents, Collection<String> dontSkipInstallForComponents, boolean skipFailure) {
@@ -549,7 +583,7 @@ public class AmbariContext {
   /**
    * Add the new host to an existing config group.
    */
-  private boolean addHostToExistingConfigGroups(String hostName, ClusterTopology topology, String groupName) {
+  private boolean addHostToExistingConfigGroups(String hostName, ClusterTopology topology, String configGroupName) {
     boolean addedHost = false;
     Clusters clusters;
     Cluster cluster;
@@ -563,9 +597,8 @@ public class AmbariContext {
     // I don't know of a method to get config group by name
     //todo: add a method to get config group by name
     Map<Long, ConfigGroup> configGroups = cluster.getConfigGroups();
-    String qualifiedGroupName = getConfigurationGroupName(topology.getBlueprint().getName(), groupName);
     for (ConfigGroup group : configGroups.values()) {
-      if (group.getName().equals(qualifiedGroupName)) {
+      if (group.getName().equals(configGroupName)) {
         try {
           Host host = clusters.getHost(hostName);
           addedHost = true;
@@ -589,7 +622,7 @@ public class AmbariContext {
    * and the hosts associated with the host group are assigned to the config group.
    */
   private void createConfigGroupsAndRegisterHost(ClusterTopology topology, String groupName) throws AmbariException {
-    Map<String, Map<String, Config>> groupConfigs = new HashMap<String, Map<String, Config>>();
+    Map<String, Map<String, Config>> groupConfigs = new HashMap<>();
     Stack stack = topology.getBlueprint().getStack();
 
     // get the host-group config with cluster creation template overrides
@@ -608,7 +641,7 @@ public class AmbariContext {
       //todo: attributes
       Map<String, Config> serviceConfigs = groupConfigs.get(service);
       if (serviceConfigs == null) {
-        serviceConfigs = new HashMap<String, Config>();
+        serviceConfigs = new HashMap<>();
         groupConfigs.put(service, serviceConfigs);
       }
       serviceConfigs.put(type, config);
@@ -669,6 +702,16 @@ public class AmbariContext {
     return String.format("%s:%s", bpName, hostGroupName);
   }
 
+  /**
+   * Gets an instance of {@link ConfigHelper} for classes which are not
+   * dependency injected.
+   *
+   * @return a {@link ConfigHelper} instance.
+   */
+  public ConfigHelper getConfigHelper() {
+    return configHelper.get();
+  }
+
   private synchronized HostResourceProvider getHostResourceProvider() {
     if (hostResourceProvider == null) {
       hostResourceProvider = (HostResourceProvider)


[24/31] ambari git commit: AMBARI-21445. Fixes the following bugs : (1). Make Hive Kerberos keytab files group non-readable (2). HiveServer2 Authentication via LDAP to work correctly (3). Remove leading while spaces for the hive-env and hive-interactive-

Posted by ab...@apache.org.
AMBARI-21445. Fixes the following bugs : (1). Make Hive Kerberos keytab files group non-readable (2). HiveServer2 Authentication via LDAP to work correctly (3). Remove leading while spaces for the hive-env and hive-interactive-env template.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/eb3d3ea6
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/eb3d3ea6
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/eb3d3ea6

Branch: refs/heads/branch-feature-logsearch-ui
Commit: eb3d3ea6e5eb9464a135f851658d4aa5b3988efa
Parents: 9f788c3
Author: Swapan Shridhar <ss...@hortonworks.com>
Authored: Tue Jul 11 15:37:08 2017 -0700
Committer: Swapan Shridhar <ss...@hortonworks.com>
Committed: Wed Jul 12 11:55:44 2017 -0700

----------------------------------------------------------------------
 .../0.12.0.2.0/package/scripts/params_linux.py  |   4 +
 .../0.12.0.2.0/package/scripts/service_check.py |   3 +-
 .../services/HIVE/configuration/hive-env.xml    |  78 +++++-----
 .../HIVE/configuration/hive-interactive-env.xml |  62 ++++----
 .../stacks/HDP/2.6/services/HIVE/kerberos.json  | 151 +++++++++++++++++++
 .../stacks/HDP/2.6/services/YARN/kerberos.json  |   2 +-
 6 files changed, 228 insertions(+), 72 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/eb3d3ea6/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
index 21b3d8b..9939536 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
@@ -849,3 +849,7 @@ ranger_hive_metastore_lookup = default('/configurations/ranger-hive-plugin-prope
 
 if security_enabled:
   hive_metastore_principal_with_host = hive_metastore_principal.replace('_HOST', hostname.lower())
+
+# For ldap - hive_check
+hive_ldap_user= config['configurations']['hive-env'].get('alert_ldap_username','')
+hive_ldap_passwd=config['configurations']['hive-env'].get('alert_ldap_password','')
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/eb3d3ea6/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/service_check.py
index d144c34..271fff9 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/service_check.py
@@ -123,7 +123,8 @@ class HiveServiceCheckDefault(HiveServiceCheck):
                                params.hive_server_principal, kinit_cmd, params.smokeuser,
                                transport_mode=params.hive_transport_mode, http_endpoint=params.hive_http_endpoint,
                                ssl=params.hive_ssl, ssl_keystore=ssl_keystore,
-                               ssl_password=ssl_password)
+                               ssl_password=ssl_password, ldap_username=params.hive_ldap_user,
+                               ldap_password=params.hive_ldap_passwd)
         Logger.info("Successfully connected to {0} on port {1}".format(address, server_port))
         workable_server_available = True
       except:

http://git-wip-us.apache.org/repos/asf/ambari/blob/eb3d3ea6/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-env.xml
index a6cf1bc..929c10d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-env.xml
@@ -60,56 +60,56 @@
     <display-name>hive-env template</display-name>
     <description>This is the jinja template for hive-env.sh file</description>
     <value>
-      export HADOOP_USER_CLASSPATH_FIRST=true  #this prevents old metrics libs from mapreduce lib from bringing in old jar deps overriding HIVE_LIB
-      if [ "$SERVICE" = "cli" ]; then
-      if [ -z "$DEBUG" ]; then
-      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseNUMA -XX:+UseParallelGC -XX:-UseGCOverheadLimit"
-      else
-      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
-      fi
-      fi
+export HADOOP_USER_CLASSPATH_FIRST=true  #this prevents old metrics libs from mapreduce lib from bringing in old jar deps overriding HIVE_LIB
+if [ "$SERVICE" = "cli" ]; then
+  if [ -z "$DEBUG" ]; then
+    export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseNUMA -XX:+UseParallelGC -XX:-UseGCOverheadLimit"
+  else
+    export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+  fi
+fi
 
-      # The heap size of the jvm stared by hive shell script can be controlled via:
+# The heap size of the jvm stared by hive shell script can be controlled via:
 
-      if [ "$SERVICE" = "metastore" ]; then
-      export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore
-      else
-      export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client
-      fi
+if [ "$SERVICE" = "metastore" ]; then
+  export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore
+else
+  export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client
+fi
 
-      export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m"
-      export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS{{heap_dump_opts}}"
+export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m"
+export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS{{heap_dump_opts}}"
 
-      # Larger heap size may be required when running queries over large number of files or partitions.
-      # By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
-      # appropriate for hive server (hwi etc).
+# Larger heap size may be required when running queries over large number of files or partitions.
+# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
+# appropriate for hive server (hwi etc).
 
 
-      # Set HADOOP_HOME to point to a specific hadoop install directory
-      HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+# Set HADOOP_HOME to point to a specific hadoop install directory
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
 
-      export HIVE_HOME=${HIVE_HOME:-{{hive_home_dir}}}
+export HIVE_HOME=${HIVE_HOME:-{{hive_home_dir}}}
 
-      # Hive Configuration Directory can be controlled by:
-      export HIVE_CONF_DIR=${HIVE_CONF_DIR:-{{hive_config_dir}}}
+# Hive Configuration Directory can be controlled by:
+export HIVE_CONF_DIR=${HIVE_CONF_DIR:-{{hive_config_dir}}}
 
-      # Folder containing extra libraries required for hive compilation/execution can be controlled by:
-      if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
-        if [ -f "${HIVE_AUX_JARS_PATH}" ]; then
-          export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
-        elif [ -d "/usr/hdp/current/hive-webhcat/share/hcatalog" ]; then
-          export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar
-        fi
-      elif [ -d "/usr/hdp/current/hive-webhcat/share/hcatalog" ]; then
-        export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar
-      fi
+# Folder containing extra libraries required for hive compilation/execution can be controlled by:
+if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
+  if [ -f "${HIVE_AUX_JARS_PATH}" ]; then
+    export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
+  elif [ -d "/usr/hdp/current/hive-webhcat/share/hcatalog" ]; then
+    export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar
+  fi
+elif [ -d "/usr/hdp/current/hive-webhcat/share/hcatalog" ]; then
+  export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar
+fi
 
-      export METASTORE_PORT={{hive_metastore_port}}
+export METASTORE_PORT={{hive_metastore_port}}
 
-      {% if sqla_db_used or lib_dir_available %}
-      export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:{{jdbc_libs_dir}}"
-      export JAVA_LIBRARY_PATH="$JAVA_LIBRARY_PATH:{{jdbc_libs_dir}}"
-      {% endif %}
+{% if sqla_db_used or lib_dir_available %}
+export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:{{jdbc_libs_dir}}"
+export JAVA_LIBRARY_PATH="$JAVA_LIBRARY_PATH:{{jdbc_libs_dir}}"
+{% endif %}
     </value>
     <value-attributes>
       <type>content</type>

http://git-wip-us.apache.org/repos/asf/ambari/blob/eb3d3ea6/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-env.xml
index ada4859..86720f4 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-env.xml
@@ -100,47 +100,47 @@
     <display-name>hive-interactive-env template</display-name>
     <description>This is the jinja template for hive-env.sh file</description>
     <value>
-      if [ "$SERVICE" = "cli" ]; then
-      if [ -z "$DEBUG" ]; then
-      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
-      else
-      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
-      fi
-      fi
+if [ "$SERVICE" = "cli" ]; then
+  if [ -z "$DEBUG" ]; then
+    export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
+  else
+    export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+  fi
+fi
 
-      # The heap size of the jvm stared by hive shell script can be controlled via:
+# The heap size of the jvm stared by hive shell script can be controlled via:
 
-      if [ "$SERVICE" = "metastore" ]; then
-      export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore
-      else
-      export HADOOP_HEAPSIZE={{hive_interactive_heapsize}} # Setting for HiveServer2 and Client
-      fi
+if [ "$SERVICE" = "metastore" ]; then
+  export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore
+else
+  export HADOOP_HEAPSIZE={{hive_interactive_heapsize}} # Setting for HiveServer2 and Client
+fi
 
-      export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m"
-      export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS{{heap_dump_opts}}"
+export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m"
+export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS{{heap_dump_opts}}"
 
-      # Larger heap size may be required when running queries over large number of files or partitions.
-      # By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
-      # appropriate for hive server (hwi etc).
+# Larger heap size may be required when running queries over large number of files or partitions.
+# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
+# appropriate for hive server (hwi etc).
 
 
-      # Set HADOOP_HOME to point to a specific hadoop install directory
-      HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+# Set HADOOP_HOME to point to a specific hadoop install directory
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
 
-      # Hive Configuration Directory can be controlled by:
-      export HIVE_CONF_DIR={{hive_server_interactive_conf_dir}}
+# Hive Configuration Directory can be controlled by:
+export HIVE_CONF_DIR={{hive_server_interactive_conf_dir}}
 
-      # Add additional hcatalog jars
-      if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
-        export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
-      else
-        export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-server2-hive2/lib/hive-hcatalog-core.jar
-      fi
+# Add additional hcatalog jars
+if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
+  export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
+else
+  export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-server2-hive2/lib/hive-hcatalog-core.jar
+fi
 
-      export METASTORE_PORT={{hive_metastore_port}}
+export METASTORE_PORT={{hive_metastore_port}}
 
-      # Spark assembly contains a conflicting copy of HiveConf from hive-1.2
-      export HIVE_SKIP_SPARK_ASSEMBLY=true
+# Spark assembly contains a conflicting copy of HiveConf from hive-1.2
+export HIVE_SKIP_SPARK_ASSEMBLY=true
 
     </value>
     <value-attributes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/eb3d3ea6/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/kerberos.json
new file mode 100644
index 0000000..b6e57e1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/kerberos.json
@@ -0,0 +1,151 @@
+{
+  "services": [
+    {
+      "name": "HIVE",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+        {
+          "hive-site": {
+            "hive.metastore.sasl.enabled": "true",
+            "hive.server2.authentication": "KERBEROS"
+          }
+        },
+        {
+          "ranger-hive-audit": {
+            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
+            "xasecure.audit.jaas.Client.option.storeKey": "false",
+            "xasecure.audit.jaas.Client.option.serviceName": "solr",
+            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "HIVE_METASTORE",
+          "identities": [
+            {
+              "name": "/HIVE/HIVE_SERVER/hive_server_hive",
+              "principal": {
+                "configuration": "hive-site/hive.metastore.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "hive-site/hive.metastore.kerberos.keytab.file"
+              }
+            }
+          ]
+        },
+        {
+          "name": "HIVE_SERVER",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "hive_server_hive",
+              "principal": {
+                "value": "hive/_HOST@${realm}",
+                "type": "service",
+                "configuration": "hive-site/hive.server2.authentication.kerberos.principal",
+                "local_username": "${hive-env/hive_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hive.service.keytab",
+                "owner": {
+                  "name": "${hive-env/hive_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hive-site/hive.server2.authentication.kerberos.keytab"
+              }
+            },
+            {
+              "name": "atlas_kafka",
+              "reference": "/HIVE/HIVE_SERVER/hive_server_hive",
+              "principal": {
+                "configuration": "hive-atlas-application.properties/atlas.jaas.KafkaClient.option.principal"
+              },
+              "keytab": {
+                "configuration": "hive-atlas-application.properties/atlas.jaas.KafkaClient.option.keyTab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "hive-site/hive.server2.authentication.spnego.principal"
+              },
+              "keytab": {
+                "configuration": "hive-site/hive.server2.authentication.spnego.keytab"
+              }
+            },
+            {
+              "name": "ranger_audit",
+              "reference": "/HIVE/HIVE_SERVER/hive_server_hive",
+              "principal": {
+                "configuration": "ranger-hive-audit/xasecure.audit.jaas.Client.option.principal"
+              },
+              "keytab": {
+                "configuration": "ranger-hive-audit/xasecure.audit.jaas.Client.option.keyTab"
+              }
+            }
+          ]
+        },
+        {
+          "name": "HIVE_SERVER_INTERACTIVE",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "/HIVE/HIVE_SERVER/hive_server_hive"
+            },
+            {
+              "name": "/HIVE/HIVE_SERVER/spnego"
+            },
+            {
+              "name": "/YARN/NODEMANAGER/llap_zk_hive"
+            }
+          ]
+        },
+        {
+          "name": "WEBHCAT_SERVER",
+          "identities": [
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "webhcat-site/templeton.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "webhcat-site/templeton.kerberos.keytab"
+              }
+            }
+          ],
+          "configurations": [
+            {
+              "core-site": {
+                "hadoop.proxyuser.HTTP.hosts": "${clusterHostInfo/webhcat_server_host|append(core-site/hadoop.proxyuser.HTTP.hosts, \\\\,, true)}"
+              }
+            },
+            {
+              "webhcat-site": {
+                "templeton.kerberos.secret": "secret",
+                "templeton.hive.properties": "hive.metastore.local=false,hive.metastore.uris=${clusterHostInfo/hive_metastore_host|each(thrift://%s:9083, \\\\,, \\s*\\,\\s*)},hive.metastore.sasl.enabled=true,hive.metastore.execute.setugi=true,hive.metastore.warehouse.dir=/apps/hive/warehouse,hive.exec.mode.local.auto=false,hive.metastore.kerberos.principal=hive/_HOST@${realm}"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/eb3d3ea6/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
index b1501b8..60d50eb 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
@@ -117,7 +117,7 @@
                 },
                 "group": {
                   "name": "${cluster-env/user_group}",
-                  "access": "r"
+                  "access": ""
                 },
                 "configuration": "hive-interactive-site/hive.llap.zk.sm.keytab.file"
               },


[12/31] ambari git commit: AMBARI-21427. Assigning hosts concurrently to same config group may fail with 'org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException: Config group already exist'. (stoader)

Posted by ab...@apache.org.
AMBARI-21427. Assigning hosts concurrently to same config group may fail with 'org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException: Config group already exist'. (stoader)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/3c9f125c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/3c9f125c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/3c9f125c

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 3c9f125cc08269558f35a971c321777d331de1ca
Parents: 7f3d3b2
Author: Toader, Sebastian <st...@hortonworks.com>
Authored: Mon Jul 10 13:02:20 2017 +0200
Committer: Toader, Sebastian <st...@hortonworks.com>
Committed: Mon Jul 10 13:02:45 2017 +0200

----------------------------------------------------------------------
 .../ambari/server/topology/AmbariContext.java   | 28 +++++++++++++++++---
 1 file changed, 24 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/3c9f125c/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
index 9b64edc..dee0e6c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
@@ -30,6 +30,7 @@ import java.util.SortedSet;
 import java.util.TreeSet;
 import java.util.concurrent.Callable;
 import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.Lock;
 
 import javax.annotation.Nullable;
 import javax.inject.Inject;
@@ -81,6 +82,7 @@ import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.Striped;
 import com.google.inject.Provider;
 
 
@@ -121,6 +123,16 @@ public class AmbariContext {
 
   private final static Logger LOG = LoggerFactory.getLogger(AmbariContext.class);
 
+
+  /**
+   * When config groups are created using Blueprints these are created when
+   * hosts join a hostgroup and are added to the corresponding config group.
+   * Since hosts join in parallel there might be a race condition in creating
+   * the config group a host is to be added to. Thus we need to synchronize
+   * the creation of config groups with the same name.
+   */
+  private Striped<Lock> configGroupCreateLock = Striped.lazyWeakLock(1);
+
   public boolean isClusterKerberosEnabled(long clusterId) {
     Cluster cluster;
     try {
@@ -341,11 +353,17 @@ public class AmbariContext {
   }
 
   public void registerHostWithConfigGroup(final String hostName, final ClusterTopology topology, final String groupName) {
+    String qualifiedGroupName = getConfigurationGroupName(topology.getBlueprint().getName(), groupName);
+
+    Lock configGroupLock = configGroupCreateLock.get(qualifiedGroupName);
+
     try {
+      configGroupLock.lock();
+
       boolean hostAdded = RetryHelper.executeWithRetry(new Callable<Boolean>() {
         @Override
         public Boolean call() throws Exception {
-          return addHostToExistingConfigGroups(hostName, topology, groupName);
+          return addHostToExistingConfigGroups(hostName, topology, qualifiedGroupName);
         }
       });
       if (!hostAdded) {
@@ -355,6 +373,9 @@ public class AmbariContext {
       LOG.error("Unable to register config group for host: ", e);
       throw new RuntimeException("Unable to register config group for host: " + hostName);
     }
+    finally {
+      configGroupLock.unlock();
+    }
   }
 
   public RequestStatusResponse installHost(String hostName, String clusterName, Collection<String> skipInstallForComponents, Collection<String> dontSkipInstallForComponents, boolean skipFailure) {
@@ -562,7 +583,7 @@ public class AmbariContext {
   /**
    * Add the new host to an existing config group.
    */
-  private boolean addHostToExistingConfigGroups(String hostName, ClusterTopology topology, String groupName) {
+  private boolean addHostToExistingConfigGroups(String hostName, ClusterTopology topology, String configGroupName) {
     boolean addedHost = false;
     Clusters clusters;
     Cluster cluster;
@@ -576,9 +597,8 @@ public class AmbariContext {
     // I don't know of a method to get config group by name
     //todo: add a method to get config group by name
     Map<Long, ConfigGroup> configGroups = cluster.getConfigGroups();
-    String qualifiedGroupName = getConfigurationGroupName(topology.getBlueprint().getName(), groupName);
     for (ConfigGroup group : configGroups.values()) {
-      if (group.getName().equals(qualifiedGroupName)) {
+      if (group.getName().equals(configGroupName)) {
         try {
           Host host = clusters.getHost(hostName);
           addedHost = true;


[23/31] ambari git commit: AMBARI-21447 Log Feeder should support logs without date (time only) (mgergely)

Posted by ab...@apache.org.
AMBARI-21447 Log Feeder should support logs without date (time only) (mgergely)

Change-Id: I853447134873b10fdd3fd604fd84630a9caf9d03


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9f788c38
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9f788c38
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9f788c38

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 9f788c386667bfeb82fff7c35287a5fdb175c349
Parents: 31b9d77
Author: Miklos Gergely <mg...@hortonworks.com>
Authored: Wed Jul 12 16:55:48 2017 +0200
Committer: Miklos Gergely <mg...@hortonworks.com>
Committed: Wed Jul 12 16:55:48 2017 +0200

----------------------------------------------------------------------
 .../ambari/logfeeder/mapper/MapperDate.java     | 42 +++++++++++++-------
 1 file changed, 28 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/9f788c38/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperDate.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperDate.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperDate.java
index 305688b..e099161 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperDate.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperDate.java
@@ -19,6 +19,7 @@
 
 package org.apache.ambari.logfeeder.mapper;
 
+import java.text.ParseException;
 import java.text.SimpleDateFormat;
 import java.util.Calendar;
 import java.util.Date;
@@ -79,20 +80,7 @@ public class MapperDate extends Mapper {
           jsonObj.put(LogFeederConstants.IN_MEMORY_TIMESTAMP, ((Date) value).getTime());
         } else if (targetDateFormatter != null) {
           if (srcDateFormatter != null) {
-            Date srcDate = srcDateFormatter.parse(value.toString());
-            //set year in src_date when src_date does not have year component
-            if (!srcDateFormatter.toPattern().contains("yy")) {
-              Calendar currentCalendar = Calendar.getInstance();
-              Calendar logDateCalendar = Calendar.getInstance();
-              logDateCalendar.setTimeInMillis(srcDate.getTime());
-              if (logDateCalendar.get(Calendar.MONTH) > currentCalendar.get(Calendar.MONTH)) {
-                // set previous year as a log year  when log month is grater than current month
-                srcDate = DateUtils.setYears(srcDate, currentCalendar.get(Calendar.YEAR) - 1);
-              } else {
-                // set current year as a log year
-                srcDate = DateUtils.setYears(srcDate, currentCalendar.get(Calendar.YEAR));
-              }
-            }
+            Date srcDate = getSourceDate(value);
             value = targetDateFormatter.format(srcDate);
             jsonObj.put(LogFeederConstants.IN_MEMORY_TIMESTAMP, srcDate.getTime());
           } else {
@@ -111,4 +99,30 @@ public class MapperDate extends Mapper {
     }
     return value;
   }
+
+  private Date getSourceDate(Object value) throws ParseException {
+    Date srcDate = srcDateFormatter.parse(value.toString());
+    
+    Calendar currentCalendar = Calendar.getInstance();
+    
+    if (!srcDateFormatter.toPattern().contains("dd")) {
+      //set year/month/date in src_date when src_date does not have date component
+      srcDate = DateUtils.setYears(srcDate, currentCalendar.get(Calendar.YEAR));
+      srcDate = DateUtils.setMonths(srcDate, currentCalendar.get(Calendar.MONTH));
+      srcDate = DateUtils.setDays(srcDate, currentCalendar.get(Calendar.DAY_OF_MONTH));
+      // if with the current date the time stamp is after the current one, it must be previous day
+      if (srcDate.getTime() > currentCalendar.getTimeInMillis()) {
+        srcDate = DateUtils.addDays(srcDate, -1);
+      }      
+    } else if (!srcDateFormatter.toPattern().contains("yy")) {
+      //set year in src_date when src_date does not have year component
+      srcDate = DateUtils.setYears(srcDate, currentCalendar.get(Calendar.YEAR));
+      // if with the current year the time stamp is after the current one, it must be previous year
+      if (srcDate.getTime() > currentCalendar.getTimeInMillis()) {
+        srcDate = DateUtils.addYears(srcDate, -1);
+      }
+    }
+    
+    return srcDate;
+  }
 }


[28/31] ambari git commit: AMBARI-21426. Apply ZEPPELIN-2698 related changes in Ambari(prabhjyotsingh via Venkata Sairam)

Posted by ab...@apache.org.
AMBARI-21426. Apply ZEPPELIN-2698 related changes in Ambari(prabhjyotsingh via Venkata Sairam)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/63186bf3
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/63186bf3
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/63186bf3

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 63186bf3eb1bf0501e0c2450f85467a0bc6adf12
Parents: 853a5d4
Author: Venkata Sairam <ve...@gmail.com>
Authored: Fri Jul 14 12:35:26 2017 +0530
Committer: Venkata Sairam <ve...@gmail.com>
Committed: Fri Jul 14 12:35:52 2017 +0530

----------------------------------------------------------------------
 .../ZEPPELIN/0.6.0.2.5/configuration/zeppelin-env.xml              | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/63186bf3/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-env.xml b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-env.xml
index 4032b2c..80ac2bb 100644
--- a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-env.xml
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-env.xml
@@ -95,7 +95,7 @@ export ZEPPELIN_INTP_CLASSPATH_OVERRIDES="{{external_dependency_conf}}"
 ## Kerberos ticket refresh setting
 ##
 export KINIT_FAIL_THRESHOLD=5
-export LAUNCH_KERBEROS_REFRESH_INTERVAL=1d
+export KERBEROS_REFRESH_INTERVAL=1d
 
 ## Use provided spark installation ##
 ## defining SPARK_HOME makes Zeppelin run spark interpreter process using spark-submit


[04/31] ambari git commit: AMBARI-21430 - Allow Multiple Versions of Stack Tools to Co-Exist (jonathanhurley)

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-secured.json b/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-secured.json
index 4e7d857..bcadd03 100644
--- a/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-secured.json
+++ b/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-secured.json
@@ -1,873 +1,873 @@
 {
     "localComponents": [
-        "SECONDARY_NAMENODE", 
-        "HDFS_CLIENT", 
-        "DATANODE", 
-        "NAMENODE", 
-        "RANGER_ADMIN", 
-        "RANGER_TAGSYNC", 
-        "RANGER_USERSYNC", 
-        "ZOOKEEPER_SERVER", 
-        "ZOOKEEPER_CLIENT", 
-        "KERBEROS_CLIENT", 
+        "SECONDARY_NAMENODE",
+        "HDFS_CLIENT",
+        "DATANODE",
+        "NAMENODE",
+        "RANGER_ADMIN",
+        "RANGER_TAGSYNC",
+        "RANGER_USERSYNC",
+        "ZOOKEEPER_SERVER",
+        "ZOOKEEPER_CLIENT",
+        "KERBEROS_CLIENT",
         "RANGER_KMS_SERVER"
-    ], 
+    ],
     "configuration_attributes": {
-        "ranger-kms-site": {}, 
-        "ranger-hdfs-audit": {}, 
-        "ssl-client": {}, 
-        "ranger-admin-site": {}, 
-        "kms-log4j": {}, 
-        "ranger-hdfs-policymgr-ssl": {}, 
-        "tagsync-application-properties": {}, 
-        "ranger-env": {}, 
-        "ranger-ugsync-site": {}, 
-        "ranger-hdfs-plugin-properties": {}, 
-        "ranger-kms-security": {}, 
-        "kerberos-env": {}, 
-        "kms-properties": {}, 
-        "admin-properties": {}, 
-        "ranger-kms-policymgr-ssl": {}, 
+        "ranger-kms-site": {},
+        "ranger-hdfs-audit": {},
+        "ssl-client": {},
+        "ranger-admin-site": {},
+        "kms-log4j": {},
+        "ranger-hdfs-policymgr-ssl": {},
+        "tagsync-application-properties": {},
+        "ranger-env": {},
+        "ranger-ugsync-site": {},
+        "ranger-hdfs-plugin-properties": {},
+        "ranger-kms-security": {},
+        "kerberos-env": {},
+        "kms-properties": {},
+        "admin-properties": {},
+        "ranger-kms-policymgr-ssl": {},
         "hdfs-site": {
             "final": {
-                "dfs.datanode.data.dir": "true", 
-                "dfs.namenode.http-address": "true", 
-                "dfs.datanode.failed.volumes.tolerated": "true", 
-                "dfs.support.append": "true", 
-                "dfs.namenode.name.dir": "true", 
+                "dfs.datanode.data.dir": "true",
+                "dfs.namenode.http-address": "true",
+                "dfs.datanode.failed.volumes.tolerated": "true",
+                "dfs.support.append": "true",
+                "dfs.namenode.name.dir": "true",
                 "dfs.webhdfs.enabled": "true"
             }
-        }, 
-        "ranger-tagsync-site": {}, 
-        "tagsync-log4j": {}, 
-        "ranger-kms-audit": {}, 
-        "hadoop-policy": {}, 
-        "hdfs-log4j": {}, 
-        "usersync-log4j": {}, 
-        "krb5-conf": {}, 
-        "kms-site": {}, 
+        },
+        "ranger-tagsync-site": {},
+        "tagsync-log4j": {},
+        "ranger-kms-audit": {},
+        "hadoop-policy": {},
+        "hdfs-log4j": {},
+        "usersync-log4j": {},
+        "krb5-conf": {},
+        "kms-site": {},
         "core-site": {
             "final": {
                 "fs.defaultFS": "true"
             }
-        }, 
-        "hadoop-env": {}, 
-        "zookeeper-log4j": {}, 
-        "ssl-server": {}, 
-        "ranger-site": {}, 
-        "zookeeper-env": {}, 
-        "admin-log4j": {}, 
-        "zoo.cfg": {}, 
-        "ranger-hdfs-security": {}, 
-        "usersync-properties": {}, 
-        "kms-env": {}, 
-        "dbks-site": {}, 
+        },
+        "hadoop-env": {},
+        "zookeeper-log4j": {},
+        "ssl-server": {},
+        "ranger-site": {},
+        "zookeeper-env": {},
+        "admin-log4j": {},
+        "zoo.cfg": {},
+        "ranger-hdfs-security": {},
+        "usersync-properties": {},
+        "kms-env": {},
+        "dbks-site": {},
         "cluster-env": {}
-    }, 
-    "public_hostname": "c6401.ambari.apache.org", 
-    "commandId": "43-0", 
-    "hostname": "c6401.ambari.apache.org", 
-    "kerberosCommandParams": [], 
-    "serviceName": "RANGER_KMS", 
-    "role": "RANGER_KMS_SERVER", 
-    "forceRefreshConfigTagsBeforeExecution": [], 
-    "requestId": 43, 
+    },
+    "public_hostname": "c6401.ambari.apache.org",
+    "commandId": "43-0",
+    "hostname": "c6401.ambari.apache.org",
+    "kerberosCommandParams": [],
+    "serviceName": "RANGER_KMS",
+    "role": "RANGER_KMS_SERVER",
+    "forceRefreshConfigTagsBeforeExecution": [],
+    "requestId": 43,
     "agentConfigParams": {
         "agent": {
             "parallel_execution": 0
         }
-    }, 
-    "clusterName": "c1", 
-    "commandType": "EXECUTION_COMMAND", 
-    "taskId": 200, 
-    "roleParams": {}, 
+    },
+    "clusterName": "c1",
+    "commandType": "EXECUTION_COMMAND",
+    "taskId": 200,
+    "roleParams": {},
     "configurationTags": {
         "ranger-kms-site": {
             "tag": "version1467026737262"
-        }, 
+        },
         "ranger-hdfs-audit": {
             "tag": "version1466705299922"
-        }, 
+        },
         "ssl-client": {
             "tag": "version1"
-        }, 
+        },
         "ranger-admin-site": {
             "tag": "version1467016680635"
-        }, 
+        },
         "kms-log4j": {
             "tag": "version1467026737262"
-        }, 
+        },
         "ranger-hdfs-policymgr-ssl": {
             "tag": "version1466705299922"
-        }, 
+        },
         "tagsync-application-properties": {
             "tag": "version1467016680511"
-        }, 
+        },
         "ranger-env": {
             "tag": "version1466705299949"
-        }, 
+        },
         "ranger-ugsync-site": {
             "tag": "version1467016680537"
-        }, 
+        },
         "ranger-hdfs-plugin-properties": {
             "tag": "version1466705299922"
-        }, 
+        },
         "ranger-kms-security": {
             "tag": "version1467026737262"
-        }, 
+        },
         "kerberos-env": {
             "tag": "version1467016537243"
-        }, 
+        },
         "admin-log4j": {
             "tag": "version1466705299949"
-        }, 
+        },
         "admin-properties": {
             "tag": "version1466705299949"
-        }, 
+        },
         "ranger-kms-policymgr-ssl": {
             "tag": "version1467026737262"
-        }, 
+        },
         "hdfs-site": {
             "tag": "version1467016680401"
-        }, 
+        },
         "ranger-tagsync-site": {
             "tag": "version1467016680586"
-        }, 
+        },
         "zoo.cfg": {
             "tag": "version1"
-        }, 
+        },
         "ranger-kms-audit": {
             "tag": "version1467026737262"
-        }, 
+        },
         "hadoop-policy": {
             "tag": "version1"
-        }, 
+        },
         "hdfs-log4j": {
             "tag": "version1"
-        }, 
+        },
         "usersync-log4j": {
             "tag": "version1466705299949"
-        }, 
+        },
         "krb5-conf": {
             "tag": "version1467016537243"
-        }, 
+        },
         "kms-site": {
             "tag": "version1467026751210"
-        }, 
+        },
         "core-site": {
             "tag": "version1467026751256"
-        }, 
+        },
         "hadoop-env": {
             "tag": "version1467016680446"
-        }, 
+        },
         "zookeeper-log4j": {
             "tag": "version1"
-        }, 
+        },
         "ssl-server": {
             "tag": "version1"
-        }, 
+        },
         "ranger-site": {
             "tag": "version1466705299949"
-        }, 
+        },
         "zookeeper-env": {
             "tag": "version1467016680492"
-        }, 
+        },
         "kms-properties": {
             "tag": "version1467026737262"
-        }, 
+        },
         "tagsync-log4j": {
             "tag": "version1466705299949"
-        }, 
+        },
         "ranger-hdfs-security": {
             "tag": "version1466705299922"
-        }, 
+        },
         "usersync-properties": {
             "tag": "version1466705299949"
-        }, 
+        },
         "kms-env": {
             "tag": "version1467026737262"
-        }, 
+        },
         "dbks-site": {
             "tag": "version1467026751234"
-        }, 
+        },
         "cluster-env": {
             "tag": "version1467016680567"
         }
-    }, 
-    "roleCommand": "START", 
+    },
+    "roleCommand": "START",
     "hostLevelParams": {
-        "agent_stack_retry_on_unavailability": "false", 
-        "stack_name": "HDP", 
-        "package_version": "2_5_0_0_*", 
+        "agent_stack_retry_on_unavailability": "false",
+        "stack_name": "HDP",
+        "package_version": "2_5_0_0_*",
         "custom_mysql_jdbc_name": "mysql-connector-java.jar",
         "previous_custom_mysql_jdbc_name": "mysql-connector-java-old.jar",
-        "host_sys_prepped": "false", 
-        "ambari_db_rca_username": "mapred", 
-        "current_version": "2.5.0.0-801", 
-        "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar", 
-        "agent_stack_retry_count": "5", 
-        "stack_version": "2.5", 
-        "jdk_name": "jdk-8u60-linux-x64.tar.gz", 
-        "ambari_db_rca_driver": "org.postgresql.Driver", 
-        "java_home": "/usr/jdk64/jdk1.7.0_45", 
-        "repository_version_id": "1", 
-        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
-        "not_managed_hdfs_path_list": "[\"/tmp\"]", 
-        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
-        "java_version": "8", 
-        "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.5\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]", 
-        "package_list": "[{\"name\":\"ranger_${stack_version}-kms\",\"condition\":\"\",\"skipUpgrade\":false}]", 
-        "db_name": "ambari", 
-        "group_list": "[\"kms\",\"ranger\",\"hadoop\",\"users\"]", 
-        "agentCacheDir": "/var/lib/ambari-agent/cache", 
-        "ambari_db_rca_password": "mapred", 
-        "jce_name": "UnlimitedJCEPolicyJDK7.zip", 
-        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar", 
-        "db_driver_filename": "mysql-connector-java.jar", 
-        "user_list": "[\"kms\",\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]", 
+        "host_sys_prepped": "false",
+        "ambari_db_rca_username": "mapred",
+        "current_version": "2.5.0.0-801",
+        "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar",
+        "agent_stack_retry_count": "5",
+        "stack_version": "2.5",
+        "jdk_name": "jdk-8u60-linux-x64.tar.gz",
+        "ambari_db_rca_driver": "org.postgresql.Driver",
+        "java_home": "/usr/jdk64/jdk1.7.0_45",
+        "repository_version_id": "1",
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
+        "not_managed_hdfs_path_list": "[\"/tmp\"]",
+        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
+        "java_version": "8",
+        "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.5\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]",
+        "package_list": "[{\"name\":\"ranger_${stack_version}-kms\",\"condition\":\"\",\"skipUpgrade\":false}]",
+        "db_name": "ambari",
+        "group_list": "[\"kms\",\"ranger\",\"hadoop\",\"users\"]",
+        "agentCacheDir": "/var/lib/ambari-agent/cache",
+        "ambari_db_rca_password": "mapred",
+        "jce_name": "UnlimitedJCEPolicyJDK7.zip",
+        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
+        "db_driver_filename": "mysql-connector-java.jar",
+        "user_list": "[\"kms\",\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]",
         "clientsToUpdateConfigs": "[\"*\"]"
-    }, 
+    },
     "commandParams": {
-        "service_package_folder": "common-services/RANGER_KMS/0.5.0.2.3/package", 
-        "script": "scripts/kms_server.py", 
-        "hooks_folder": "HDP/2.0.6/hooks", 
-        "version": "2.5.0.0-801", 
-        "max_duration_for_retries": "0", 
-        "command_retry_enabled": "false", 
-        "command_timeout": "600", 
+        "service_package_folder": "common-services/RANGER_KMS/0.5.0.2.3/package",
+        "script": "scripts/kms_server.py",
+        "hooks_folder": "HDP/2.0.6/hooks",
+        "version": "2.5.0.0-801",
+        "max_duration_for_retries": "0",
+        "command_retry_enabled": "false",
+        "command_timeout": "600",
         "script_type": "PYTHON"
-    }, 
-    "forceRefreshConfigTags": [], 
-    "stageId": 0, 
+    },
+    "forceRefreshConfigTags": [],
+    "stageId": 0,
     "clusterHostInfo": {
         "snamenode_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ambari_server_use_ssl": [
             "false"
-        ], 
+        ],
         "all_ping_ports": [
             "8670"
-        ], 
+        ],
         "ranger_tagsync_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ranger_kms_server_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ranger_usersync_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "all_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "slave_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "namenode_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ambari_server_port": [
             "8080"
-        ], 
+        ],
         "ranger_admin_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "all_racks": [
             "/default-rack"
-        ], 
+        ],
         "all_ipv4_ips": [
             "172.22.83.73"
-        ], 
+        ],
         "ambari_server_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "zookeeper_hosts": [
             "c6401.ambari.apache.org"
         ]
-    }, 
+    },
     "configurations": {
         "ranger-kms-site": {
-            "ranger.service.https.port": "9393", 
-            "ranger.service.https.attrib.ssl.enabled": "false", 
-            "xa.webapp.dir": "./webapp", 
-            "ranger.service.host": "{{kms_host}}", 
-            "ranger.service.shutdown.port": "7085", 
-            "ranger.contextName": "/kms", 
+            "ranger.service.https.port": "9393",
+            "ranger.service.https.attrib.ssl.enabled": "false",
+            "xa.webapp.dir": "./webapp",
+            "ranger.service.host": "{{kms_host}}",
+            "ranger.service.shutdown.port": "7085",
+            "ranger.contextName": "/kms",
             "ranger.service.http.port": "{{kms_port}}"
-        }, 
+        },
         "ranger-hdfs-audit": {
             "xasecure.audit.destination.solr.zookeepers": "NONE",
-            "xasecure.audit.destination.solr.urls": "", 
-            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool", 
+            "xasecure.audit.destination.solr.urls": "",
+            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool",
             "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool",
-            "xasecure.audit.destination.hdfs": "true", 
-            "xasecure.audit.destination.solr": "false", 
-            "xasecure.audit.provider.summary.enabled": "false", 
-            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit", 
+            "xasecure.audit.destination.hdfs": "true",
+            "xasecure.audit.destination.solr": "false",
+            "xasecure.audit.provider.summary.enabled": "false",
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
             "xasecure.audit.is.enabled": "true"
-        }, 
+        },
         "ssl-client": {
-            "ssl.client.truststore.reload.interval": "10000", 
-            "ssl.client.keystore.password": "bigdata", 
-            "ssl.client.truststore.type": "jks", 
-            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks", 
-            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks", 
-            "ssl.client.truststore.password": "bigdata", 
+            "ssl.client.truststore.reload.interval": "10000",
+            "ssl.client.keystore.password": "bigdata",
+            "ssl.client.truststore.type": "jks",
+            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks",
+            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks",
+            "ssl.client.truststore.password": "bigdata",
             "ssl.client.keystore.type": "jks"
-        }, 
+        },
         "ranger-admin-site": {
-            "ranger.admin.kerberos.cookie.domain": "{{ranger_host}}", 
-            "ranger.kms.service.user.hdfs": "hdfs", 
-            "ranger.spnego.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}", 
-            "ranger.plugins.hive.serviceuser": "hive", 
-            "ranger.lookup.kerberos.keytab": "/etc/security/keytabs/rangerlookup.service.keytab", 
-            "ranger.plugins.kms.serviceuser": "kms", 
-            "ranger.service.https.attrib.ssl.enabled": "false", 
-            "ranger.sso.browser.useragent": "Mozilla,chrome", 
-            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01", 
-            "ranger.plugins.hbase.serviceuser": "hbase", 
+            "ranger.admin.kerberos.cookie.domain": "{{ranger_host}}",
+            "ranger.kms.service.user.hdfs": "hdfs",
+            "ranger.spnego.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
+            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}",
+            "ranger.plugins.hive.serviceuser": "hive",
+            "ranger.lookup.kerberos.keytab": "/etc/security/keytabs/rangerlookup.service.keytab",
+            "ranger.plugins.kms.serviceuser": "kms",
+            "ranger.service.https.attrib.ssl.enabled": "false",
+            "ranger.sso.browser.useragent": "Mozilla,chrome",
+            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01",
+            "ranger.plugins.hbase.serviceuser": "hbase",
             "ranger.plugins.hdfs.serviceuser": "hdfs",
-            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}", 
-            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net", 
-            "ranger.plugins.knox.serviceuser": "knox", 
-            "ranger.ldap.base.dn": "dc=example,dc=com", 
-            "ranger.sso.publicKey": "", 
-            "ranger.admin.kerberos.cookie.path": "/", 
+            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}",
+            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net",
+            "ranger.plugins.knox.serviceuser": "knox",
+            "ranger.ldap.base.dn": "dc=example,dc=com",
+            "ranger.sso.publicKey": "",
+            "ranger.admin.kerberos.cookie.path": "/",
             "ranger.service.https.attrib.clientAuth": "want",
-            "ranger.jpa.jdbc.user": "{{ranger_db_user}}", 
-            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})", 
-            "ranger.ldap.group.roleattribute": "cn", 
-            "ranger.plugins.kafka.serviceuser": "kafka", 
-            "ranger.admin.kerberos.principal": "rangeradmin/_HOST@EXAMPLE.COM", 
-            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
+            "ranger.jpa.jdbc.user": "{{ranger_db_user}}",
+            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})",
+            "ranger.ldap.group.roleattribute": "cn",
+            "ranger.plugins.kafka.serviceuser": "kafka",
+            "ranger.admin.kerberos.principal": "rangeradmin/_HOST@EXAMPLE.COM",
+            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
             "ranger.credential.provider.path": "/etc/ranger/admin/rangeradmin.jceks",
-            "ranger.ldap.referral": "ignore", 
-            "ranger.service.http.port": "6080", 
-            "ranger.ldap.user.searchfilter": "(uid={0})", 
-            "ranger.plugins.atlas.serviceuser": "atlas", 
-            "ranger.truststore.password": "changeit", 
-            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
-            "ranger.audit.solr.password": "NONE", 
-            "ranger.audit.solr.zookeepers": "NONE", 
-            "ranger.lookup.kerberos.principal": "rangerlookup/_HOST@EXAMPLE.COM", 
-            "ranger.service.https.port": "6182", 
-            "ranger.plugins.storm.serviceuser": "storm", 
-            "ranger.externalurl": "{{ranger_external_url}}", 
-            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
-            "ranger.kms.service.user.hive": "", 
-            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
-            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}", 
-            "ranger.service.host": "{{ranger_host}}", 
+            "ranger.ldap.referral": "ignore",
+            "ranger.service.http.port": "6080",
+            "ranger.ldap.user.searchfilter": "(uid={0})",
+            "ranger.plugins.atlas.serviceuser": "atlas",
+            "ranger.truststore.password": "changeit",
+            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+            "ranger.audit.solr.password": "NONE",
+            "ranger.audit.solr.zookeepers": "NONE",
+            "ranger.lookup.kerberos.principal": "rangerlookup/_HOST@EXAMPLE.COM",
+            "ranger.service.https.port": "6182",
+            "ranger.plugins.storm.serviceuser": "storm",
+            "ranger.externalurl": "{{ranger_external_url}}",
+            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+            "ranger.kms.service.user.hive": "",
+            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}",
+            "ranger.service.host": "{{ranger_host}}",
             "ranger.service.https.attrib.keystore.keyalias": "rangeradmin",
-            "ranger.service.https.attrib.keystore.pass": "xasecure", 
-            "ranger.unixauth.remote.login.enabled": "true", 
-            "ranger.jpa.jdbc.credential.alias": "rangeradmin", 
-            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
-            "ranger.audit.solr.username": "ranger_solr", 
-            "ranger.sso.enabled": "false", 
-            "ranger.audit.solr.urls": "", 
-            "ranger.ldap.ad.domain": "", 
-            "ranger.plugins.yarn.serviceuser": "yarn", 
-            "ranger.audit.source.type": "solr", 
-            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
-            "ranger.ldap.url": "{{ranger_ug_ldap_url}}", 
-            "ranger.authentication.method": "UNIX", 
-            "ranger.service.http.enabled": "true", 
-            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}", 
-            "ranger.ldap.ad.referral": "ignore", 
-            "ranger.ldap.ad.base.dn": "dc=example,dc=com", 
-            "ranger.jpa.jdbc.password": "_", 
-            "ranger.spnego.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
-            "ranger.sso.providerurl": "", 
-            "ranger.unixauth.service.hostname": "{{ugsync_host}}", 
-            "ranger.admin.kerberos.keytab": "/etc/security/keytabs/rangeradmin.service.keytab", 
-            "ranger.admin.kerberos.token.valid.seconds": "30", 
-            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver", 
+            "ranger.service.https.attrib.keystore.pass": "xasecure",
+            "ranger.unixauth.remote.login.enabled": "true",
+            "ranger.jpa.jdbc.credential.alias": "rangeradmin",
+            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+            "ranger.audit.solr.username": "ranger_solr",
+            "ranger.sso.enabled": "false",
+            "ranger.audit.solr.urls": "",
+            "ranger.ldap.ad.domain": "",
+            "ranger.plugins.yarn.serviceuser": "yarn",
+            "ranger.audit.source.type": "solr",
+            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
+            "ranger.ldap.url": "{{ranger_ug_ldap_url}}",
+            "ranger.authentication.method": "UNIX",
+            "ranger.service.http.enabled": "true",
+            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}",
+            "ranger.ldap.ad.referral": "ignore",
+            "ranger.ldap.ad.base.dn": "dc=example,dc=com",
+            "ranger.jpa.jdbc.password": "_",
+            "ranger.spnego.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+            "ranger.sso.providerurl": "",
+            "ranger.unixauth.service.hostname": "{{ugsync_host}}",
+            "ranger.admin.kerberos.keytab": "/etc/security/keytabs/rangeradmin.service.keytab",
+            "ranger.admin.kerberos.token.valid.seconds": "30",
+            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver",
             "ranger.unixauth.service.port": "5151"
-        }, 
+        },
         "kms-log4j": {
             "content": "\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#    http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License. See accompanying LICENSE file.\n#\n\n# If the Java System property 'kms.log.dir' is not defined at KMS start up time\n# Setup sets its value to '${kms.home}/logs'\n\nlog4j.appender.kms=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.kms.DatePattern='.'yyyy-MM-dd\nlog4j.appender.kms.File=${kms.log.dir}/kms.log\nlog4j.appender.kms.Append=true\nlog4j.appender.kms.layout=org.apache.log4j.PatternLayout\
 nlog4j.appender.kms.layout.ConversionPattern=%d{ISO8601} %-5p %c{1} - %m%n\n\nlog4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.kms-audit.DatePattern='.'yyyy-MM-dd\nlog4j.appender.kms-audit.File=${kms.log.dir}/kms-audit.log\nlog4j.appender.kms-audit.Append=true\nlog4j.appender.kms-audit.layout=org.apache.log4j.PatternLayout\nlog4j.appender.kms-audit.layout.ConversionPattern=%d{ISO8601} %m%n\n\nlog4j.logger.kms-audit=INFO, kms-audit\nlog4j.additivity.kms-audit=false\n\nlog4j.rootLogger=ALL, kms\nlog4j.logger.org.apache.hadoop.conf=ERROR\nlog4j.logger.org.apache.hadoop=INFO\nlog4j.logger.com.sun.jersey.server.wadl.generators.WadlGeneratorJAXBGrammarGenerator=OFF"
-        }, 
+        },
         "ranger-hdfs-policymgr-ssl": {
-            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks", 
-            "xasecure.policymgr.clientssl.truststore.password": "changeit", 
-            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}", 
-            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks", 
-            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}", 
+            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks",
+            "xasecure.policymgr.clientssl.truststore.password": "changeit",
+            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}",
+            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks",
+            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}",
             "xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
-        }, 
+        },
         "tagsync-application-properties": {
-            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181", 
-            "atlas.kafka.security.protocol": "SASL_PLAINTEXT", 
-            "atlas.jaas.KafkaClient.option.principal": "{{tagsync_jaas_principal}}", 
-            "atlas.jaas.KafkaClient.option.keyTab": "{{tagsync_keytab_path}}", 
-            "atlas.kafka.entities.group.id": "ranger_entities_consumer", 
-            "atlas.jaas.KafkaClient.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule", 
-            "atlas.jaas.KafkaClient.option.serviceName": "kafka", 
-            "atlas.kafka.bootstrap.servers": "localhost:6667", 
-            "atlas.jaas.KafkaClient.option.useKeyTab": "true", 
-            "atlas.jaas.KafkaClient.option.storeKey": "true", 
-            "atlas.jaas.KafkaClient.loginModuleControlFlag": "required", 
+            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181",
+            "atlas.kafka.security.protocol": "SASL_PLAINTEXT",
+            "atlas.jaas.KafkaClient.option.principal": "{{tagsync_jaas_principal}}",
+            "atlas.jaas.KafkaClient.option.keyTab": "{{tagsync_keytab_path}}",
+            "atlas.kafka.entities.group.id": "ranger_entities_consumer",
+            "atlas.jaas.KafkaClient.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "atlas.jaas.KafkaClient.option.serviceName": "kafka",
+            "atlas.kafka.bootstrap.servers": "localhost:6667",
+            "atlas.jaas.KafkaClient.option.useKeyTab": "true",
+            "atlas.jaas.KafkaClient.option.storeKey": "true",
+            "atlas.jaas.KafkaClient.loginModuleControlFlag": "required",
             "atlas.kafka.sasl.kerberos.service.name": "kafka"
-        }, 
+        },
         "ranger-env": {
-            "ranger_solr_shards": "1", 
-            "ranger_solr_config_set": "ranger_audits", 
-            "ranger_user": "ranger", 
-            "xml_configurations_supported": "true", 
-            "ranger-atlas-plugin-enabled": "No", 
-            "ranger-hbase-plugin-enabled": "No", 
-            "ranger-yarn-plugin-enabled": "No", 
-            "bind_anonymous": "false", 
-            "ranger_admin_username": "amb_ranger_admin", 
-            "admin_password": "admin", 
-            "is_solrCloud_enabled": "false", 
-            "ranger-storm-plugin-enabled": "No", 
-            "ranger-hdfs-plugin-enabled": "No", 
-            "ranger_group": "ranger", 
-            "ranger-knox-plugin-enabled": "No", 
-            "ranger_admin_log_dir": "/var/log/ranger/admin", 
-            "ranger-kafka-plugin-enabled": "No", 
-            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306", 
-            "ranger-hive-plugin-enabled": "No", 
-            "xasecure.audit.destination.solr": "false", 
-            "ranger_pid_dir": "/var/run/ranger", 
-            "xasecure.audit.destination.hdfs": "true", 
-            "admin_username": "admin", 
-            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit", 
-            "create_db_dbuser": "true", 
-            "ranger_solr_collection_name": "ranger_audits", 
-            "ranger_admin_password": "P1!q9xa96SMi5NCl", 
+            "ranger_solr_shards": "1",
+            "ranger_solr_config_set": "ranger_audits",
+            "ranger_user": "ranger",
+            "xml_configurations_supported": "true",
+            "ranger-atlas-plugin-enabled": "No",
+            "ranger-hbase-plugin-enabled": "No",
+            "ranger-yarn-plugin-enabled": "No",
+            "bind_anonymous": "false",
+            "ranger_admin_username": "amb_ranger_admin",
+            "admin_password": "admin",
+            "is_solrCloud_enabled": "false",
+            "ranger-storm-plugin-enabled": "No",
+            "ranger-hdfs-plugin-enabled": "No",
+            "ranger_group": "ranger",
+            "ranger-knox-plugin-enabled": "No",
+            "ranger_admin_log_dir": "/var/log/ranger/admin",
+            "ranger-kafka-plugin-enabled": "No",
+            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306",
+            "ranger-hive-plugin-enabled": "No",
+            "xasecure.audit.destination.solr": "false",
+            "ranger_pid_dir": "/var/run/ranger",
+            "xasecure.audit.destination.hdfs": "true",
+            "admin_username": "admin",
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
+            "create_db_dbuser": "true",
+            "ranger_solr_collection_name": "ranger_audits",
+            "ranger_admin_password": "P1!q9xa96SMi5NCl",
             "ranger_usersync_log_dir": "/var/log/ranger/usersync"
-        }, 
+        },
         "ranger-ugsync-site": {
-            "ranger.usersync.ldap.binddn": "", 
-            "ranger.usersync.policymgr.username": "rangerusersync", 
-            "ranger.usersync.policymanager.mockrun": "false", 
-            "ranger.usersync.group.searchbase": "", 
-            "ranger.usersync.ldap.bindalias": "testldapalias", 
-            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks", 
-            "ranger.usersync.port": "5151", 
-            "ranger.usersync.pagedresultssize": "500", 
-            "ranger.usersync.group.memberattributename": "", 
-            "ranger.usersync.kerberos.principal": "rangerusersync/_HOST@EXAMPLE.COM", 
-            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder", 
-            "ranger.usersync.ldap.referral": "ignore", 
-            "ranger.usersync.group.searchfilter": "", 
-            "ranger.usersync.ldap.user.objectclass": "person", 
-            "ranger.usersync.logdir": "{{usersync_log_dir}}", 
-            "ranger.usersync.ldap.user.searchfilter": "", 
-            "ranger.usersync.ldap.groupname.caseconversion": "none", 
-            "ranger.usersync.ldap.ldapbindpassword": "", 
-            "ranger.usersync.unix.minUserId": "500", 
-            "ranger.usersync.policymanager.maxrecordsperapicall": "1000", 
-            "ranger.usersync.group.nameattribute": "", 
-            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password", 
-            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks", 
-            "ranger.usersync.user.searchenabled": "false", 
-            "ranger.usersync.group.usermapsyncenabled": "true", 
-            "ranger.usersync.ldap.bindkeystore": "", 
-            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof", 
-            "ranger.usersync.kerberos.keytab": "/etc/security/keytabs/rangerusersync.service.keytab", 
-            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe", 
-            "ranger.usersync.group.objectclass": "", 
-            "ranger.usersync.ldap.user.searchscope": "sub", 
-            "ranger.usersync.unix.password.file": "/etc/passwd", 
-            "ranger.usersync.ldap.user.nameattribute": "", 
-            "ranger.usersync.pagedresultsenabled": "true", 
-            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}", 
-            "ranger.usersync.group.search.first.enabled": "false", 
-            "ranger.usersync.group.searchenabled": "false", 
-            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder", 
-            "ranger.usersync.ssl": "true", 
-            "ranger.usersync.ldap.url": "", 
-            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org", 
-            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
-            "ranger.usersync.ldap.user.searchbase": "", 
-            "ranger.usersync.ldap.username.caseconversion": "none", 
-            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
-            "ranger.usersync.keystore.password": "UnIx529p", 
-            "ranger.usersync.unix.group.file": "/etc/group", 
-            "ranger.usersync.filesource.file": "/tmp/usergroup.txt", 
-            "ranger.usersync.group.searchscope": "", 
-            "ranger.usersync.truststore.password": "changeit", 
-            "ranger.usersync.enabled": "true", 
-            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000", 
+            "ranger.usersync.ldap.binddn": "",
+            "ranger.usersync.policymgr.username": "rangerusersync",
+            "ranger.usersync.policymanager.mockrun": "false",
+            "ranger.usersync.group.searchbase": "",
+            "ranger.usersync.ldap.bindalias": "testldapalias",
+            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks",
+            "ranger.usersync.port": "5151",
+            "ranger.usersync.pagedresultssize": "500",
+            "ranger.usersync.group.memberattributename": "",
+            "ranger.usersync.kerberos.principal": "rangerusersync/_HOST@EXAMPLE.COM",
+            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder",
+            "ranger.usersync.ldap.referral": "ignore",
+            "ranger.usersync.group.searchfilter": "",
+            "ranger.usersync.ldap.user.objectclass": "person",
+            "ranger.usersync.logdir": "{{usersync_log_dir}}",
+            "ranger.usersync.ldap.user.searchfilter": "",
+            "ranger.usersync.ldap.groupname.caseconversion": "none",
+            "ranger.usersync.ldap.ldapbindpassword": "",
+            "ranger.usersync.unix.minUserId": "500",
+            "ranger.usersync.policymanager.maxrecordsperapicall": "1000",
+            "ranger.usersync.group.nameattribute": "",
+            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password",
+            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks",
+            "ranger.usersync.user.searchenabled": "false",
+            "ranger.usersync.group.usermapsyncenabled": "true",
+            "ranger.usersync.ldap.bindkeystore": "",
+            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof",
+            "ranger.usersync.kerberos.keytab": "/etc/security/keytabs/rangerusersync.service.keytab",
+            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe",
+            "ranger.usersync.group.objectclass": "",
+            "ranger.usersync.ldap.user.searchscope": "sub",
+            "ranger.usersync.unix.password.file": "/etc/passwd",
+            "ranger.usersync.ldap.user.nameattribute": "",
+            "ranger.usersync.pagedresultsenabled": "true",
+            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}",
+            "ranger.usersync.group.search.first.enabled": "false",
+            "ranger.usersync.group.searchenabled": "false",
+            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder",
+            "ranger.usersync.ssl": "true",
+            "ranger.usersync.ldap.url": "",
+            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org",
+            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+            "ranger.usersync.ldap.user.searchbase": "",
+            "ranger.usersync.ldap.username.caseconversion": "none",
+            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+            "ranger.usersync.keystore.password": "UnIx529p",
+            "ranger.usersync.unix.group.file": "/etc/group",
+            "ranger.usersync.filesource.file": "/tmp/usergroup.txt",
+            "ranger.usersync.group.searchscope": "",
+            "ranger.usersync.truststore.password": "changeit",
+            "ranger.usersync.enabled": "true",
+            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000",
             "ranger.usersync.filesource.text.delimiter": ","
-        }, 
+        },
         "ranger-hdfs-plugin-properties": {
-            "hadoop.rpc.protection": "authentication", 
-            "ranger-hdfs-plugin-enabled": "No", 
-            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
-            "policy_user": "ambari-qa", 
-            "common.name.for.certificate": "", 
+            "hadoop.rpc.protection": "authentication",
+            "ranger-hdfs-plugin-enabled": "No",
+            "REPOSITORY_CONFIG_USERNAME": "hadoop",
+            "policy_user": "ambari-qa",
+            "common.name.for.certificate": "",
             "REPOSITORY_CONFIG_PASSWORD": "hadoop"
-        }, 
+        },
         "ranger-kms-security": {
-            "ranger.plugin.kms.policy.pollIntervalMs": "30000", 
-            "ranger.plugin.kms.service.name": "{{repo_name}}", 
-            "ranger.plugin.kms.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient", 
-            "ranger.plugin.kms.policy.rest.ssl.config.file": "/etc/ranger/kms/conf/ranger-policymgr-ssl.xml", 
-            "ranger.plugin.kms.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache", 
+            "ranger.plugin.kms.policy.pollIntervalMs": "30000",
+            "ranger.plugin.kms.service.name": "{{repo_name}}",
+            "ranger.plugin.kms.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient",
+            "ranger.plugin.kms.policy.rest.ssl.config.file": "/etc/ranger/kms/conf/ranger-policymgr-ssl.xml",
+            "ranger.plugin.kms.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache",
             "ranger.plugin.kms.policy.rest.url": "{{policymgr_mgr_url}}"
-        }, 
+        },
         "kerberos-env": {
-            "kdc_hosts": "c6401.ambari.apache.org", 
-            "manage_auth_to_local": "true", 
-            "install_packages": "true", 
-            "realm": "EXAMPLE.COM", 
-            "encryption_types": "aes des3-cbc-sha1 rc4 des-cbc-md5", 
-            "ad_create_attributes_template": "\n{\n  \"objectClass\": [\"top\", \"person\", \"organizationalPerson\", \"user\"],\n  \"cn\": \"$principal_name\",\n  #if( $is_service )\n  \"servicePrincipalName\": \"$principal_name\",\n  #end\n  \"userPrincipalName\": \"$normalized_principal\",\n  \"unicodePwd\": \"$password\",\n  \"accountExpires\": \"0\",\n  \"userAccountControl\": \"66048\"\n}", 
-            "kdc_create_attributes": "", 
-            "admin_server_host": "c6401.ambari.apache.org", 
-            "group": "ambari-managed-principals", 
-            "password_length": "20", 
-            "ldap_url": "", 
-            "manage_identities": "true", 
-            "password_min_lowercase_letters": "1", 
-            "create_ambari_principal": "true", 
-            "service_check_principal_name": "${cluster_name|toLower()}-${short_date}", 
-            "executable_search_paths": "/usr/bin, /usr/kerberos/bin, /usr/sbin, /usr/lib/mit/bin, /usr/lib/mit/sbin", 
-            "password_chat_timeout": "5", 
-            "kdc_type": "mit-kdc", 
-            "set_password_expiry": "false", 
-            "password_min_punctuation": "1", 
-            "container_dn": "", 
-            "case_insensitive_username_rules": "false", 
-            "password_min_whitespace": "0", 
-            "password_min_uppercase_letters": "1", 
+            "kdc_hosts": "c6401.ambari.apache.org",
+            "manage_auth_to_local": "true",
+            "install_packages": "true",
+            "realm": "EXAMPLE.COM",
+            "encryption_types": "aes des3-cbc-sha1 rc4 des-cbc-md5",
+            "ad_create_attributes_template": "\n{\n  \"objectClass\": [\"top\", \"person\", \"organizationalPerson\", \"user\"],\n  \"cn\": \"$principal_name\",\n  #if( $is_service )\n  \"servicePrincipalName\": \"$principal_name\",\n  #end\n  \"userPrincipalName\": \"$normalized_principal\",\n  \"unicodePwd\": \"$password\",\n  \"accountExpires\": \"0\",\n  \"userAccountControl\": \"66048\"\n}",
+            "kdc_create_attributes": "",
+            "admin_server_host": "c6401.ambari.apache.org",
+            "group": "ambari-managed-principals",
+            "password_length": "20",
+            "ldap_url": "",
+            "manage_identities": "true",
+            "password_min_lowercase_letters": "1",
+            "create_ambari_principal": "true",
+            "service_check_principal_name": "${cluster_name|toLower()}-${short_date}",
+            "executable_search_paths": "/usr/bin, /usr/kerberos/bin, /usr/sbin, /usr/lib/mit/bin, /usr/lib/mit/sbin",
+            "password_chat_timeout": "5",
+            "kdc_type": "mit-kdc",
+            "set_password_expiry": "false",
+            "password_min_punctuation": "1",
+            "container_dn": "",
+            "case_insensitive_username_rules": "false",
+            "password_min_whitespace": "0",
+            "password_min_uppercase_letters": "1",
             "password_min_digits": "1"
-        }, 
+        },
         "kms-properties": {
-            "REPOSITORY_CONFIG_USERNAME": "keyadmin", 
-            "db_user": "rangerkms01", 
-            "DB_FLAVOR": "MYSQL", 
-            "db_password": "rangerkms01", 
-            "KMS_MASTER_KEY_PASSWD": "StrongPassword01", 
-            "db_root_user": "root", 
-            "db_name": "rangerkms01", 
-            "db_host": "c6401.ambari.apache.org", 
-            "db_root_password": "vagrant", 
-            "SQL_CONNECTOR_JAR": "{{driver_curl_target}}", 
+            "REPOSITORY_CONFIG_USERNAME": "keyadmin",
+            "db_user": "rangerkms01",
+            "DB_FLAVOR": "MYSQL",
+            "db_password": "rangerkms01",
+            "KMS_MASTER_KEY_PASSWD": "StrongPassword01",
+            "db_root_user": "root",
+            "db_name": "rangerkms01",
+            "db_host": "c6401.ambari.apache.org",
+            "db_root_password": "vagrant",
+            "SQL_CONNECTOR_JAR": "{{driver_curl_target}}",
             "REPOSITORY_CONFIG_PASSWORD": "keyadmin"
-        }, 
+        },
         "admin-properties": {
-            "db_user": "rangeradmin01", 
-            "DB_FLAVOR": "MYSQL", 
-            "db_password": "rangeradmin01", 
-            "db_root_user": "root", 
-            "policymgr_external_url": "http://c6401.ambari.apache.org:6080", 
-            "db_name": "ranger01", 
-            "db_host": "c6401.ambari.apache.org", 
-            "db_root_password": "vagrant", 
+            "db_user": "rangeradmin01",
+            "DB_FLAVOR": "MYSQL",
+            "db_password": "rangeradmin01",
+            "db_root_user": "root",
+            "policymgr_external_url": "http://c6401.ambari.apache.org:6080",
+            "db_name": "ranger01",
+            "db_host": "c6401.ambari.apache.org",
+            "db_root_password": "vagrant",
             "SQL_CONNECTOR_JAR": "{{driver_curl_target}}"
-        }, 
+        },
         "ranger-kms-policymgr-ssl": {
-            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/ranger-kms/conf/ranger-plugin-keystore.jks", 
-            "xasecure.policymgr.clientssl.truststore.password": "changeit", 
-            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}", 
-            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/ranger-kms/conf/ranger-plugin-truststore.jks", 
-            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}", 
+            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/ranger-kms/conf/ranger-plugin-keystore.jks",
+            "xasecure.policymgr.clientssl.truststore.password": "changeit",
+            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}",
+            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/ranger-kms/conf/ranger-plugin-truststore.jks",
+            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}",
             "xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
-        }, 
+        },
         "hdfs-site": {
-            "dfs.namenode.checkpoint.period": "21600", 
-            "dfs.namenode.avoid.write.stale.datanode": "true", 
-            "dfs.permissions.superusergroup": "hdfs", 
-            "dfs.namenode.startup.delay.block.deletion.sec": "3600", 
-            "dfs.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM", 
-            "dfs.heartbeat.interval": "3", 
-            "dfs.content-summary.limit": "5000", 
-            "dfs.support.append": "true", 
-            "dfs.datanode.address": "0.0.0.0:1019", 
-            "dfs.cluster.administrators": " hdfs", 
-            "dfs.namenode.audit.log.async": "true", 
-            "dfs.datanode.balance.bandwidthPerSec": "6250000", 
-            "dfs.namenode.safemode.threshold-pct": "1", 
-            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", 
-            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020", 
-            "dfs.permissions.enabled": "true", 
-            "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", 
-            "dfs.client.read.shortcircuit": "true", 
-            "dfs.https.port": "50470", 
-            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470", 
-            "nfs.file.dump.dir": "/tmp/.hdfs-nfs", 
-            "dfs.blocksize": "134217728", 
-            "dfs.blockreport.initialDelay": "120", 
-            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode", 
-            "dfs.namenode.fslock.fair": "false", 
-            "dfs.datanode.max.transfer.threads": "4096", 
-            "dfs.secondary.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "dfs.replication": "3", 
-            "dfs.namenode.handler.count": "50", 
-            "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
-            "fs.permissions.umask-mode": "022", 
-            "dfs.namenode.stale.datanode.interval": "30000", 
-            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
-            "dfs.datanode.failed.volumes.tolerated": "0", 
-            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data", 
-            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070", 
-            "dfs.webhdfs.enabled": "true", 
-            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding", 
-            "dfs.namenode.accesstime.precision": "0", 
-            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
-            "dfs.datanode.https.address": "0.0.0.0:50475", 
-            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary", 
-            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090", 
-            "nfs.exports.allowed.hosts": "* rw", 
-            "dfs.namenode.checkpoint.txns": "1000000", 
-            "dfs.datanode.http.address": "0.0.0.0:1022", 
-            "dfs.datanode.du.reserved": "33011188224", 
-            "dfs.client.read.shortcircuit.streams.cache.size": "4096", 
-            "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", 
-            "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "dfs.http.policy": "HTTP_ONLY", 
-            "dfs.block.access.token.enable": "true", 
-            "dfs.client.retry.policy.enabled": "false", 
-            "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", 
-            "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab", 
-            "dfs.namenode.name.dir.restore": "true", 
-            "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", 
-            "dfs.journalnode.https-address": "0.0.0.0:8481", 
-            "dfs.journalnode.http-address": "0.0.0.0:8480", 
-            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
-            "dfs.namenode.avoid.read.stale.datanode": "true", 
-            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
-            "dfs.datanode.data.dir.perm": "750", 
-            "dfs.encryption.key.provider.uri": "kms://http@c6401.ambari.apache.org:9292/kms", 
-            "dfs.replication.max": "50", 
+            "dfs.namenode.checkpoint.period": "21600",
+            "dfs.namenode.avoid.write.stale.datanode": "true",
+            "dfs.permissions.superusergroup": "hdfs",
+            "dfs.namenode.startup.delay.block.deletion.sec": "3600",
+            "dfs.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
+            "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM",
+            "dfs.heartbeat.interval": "3",
+            "dfs.content-summary.limit": "5000",
+            "dfs.support.append": "true",
+            "dfs.datanode.address": "0.0.0.0:1019",
+            "dfs.cluster.administrators": " hdfs",
+            "dfs.namenode.audit.log.async": "true",
+            "dfs.datanode.balance.bandwidthPerSec": "6250000",
+            "dfs.namenode.safemode.threshold-pct": "1",
+            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
+            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020",
+            "dfs.permissions.enabled": "true",
+            "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
+            "dfs.client.read.shortcircuit": "true",
+            "dfs.https.port": "50470",
+            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470",
+            "nfs.file.dump.dir": "/tmp/.hdfs-nfs",
+            "dfs.blocksize": "134217728",
+            "dfs.blockreport.initialDelay": "120",
+            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode",
+            "dfs.namenode.fslock.fair": "false",
+            "dfs.datanode.max.transfer.threads": "4096",
+            "dfs.secondary.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
+            "dfs.replication": "3",
+            "dfs.namenode.handler.count": "50",
+            "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+            "fs.permissions.umask-mode": "022",
+            "dfs.namenode.stale.datanode.interval": "30000",
+            "dfs.datanode.ipc.address": "0.0.0.0:8010",
+            "dfs.datanode.failed.volumes.tolerated": "0",
+            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data",
+            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070",
+            "dfs.webhdfs.enabled": "true",
+            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding",
+            "dfs.namenode.accesstime.precision": "0",
+            "dfs.namenode.write.stale.datanode.ratio": "1.0f",
+            "dfs.datanode.https.address": "0.0.0.0:50475",
+            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary",
+            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090",
+            "nfs.exports.allowed.hosts": "* rw",
+            "dfs.namenode.checkpoint.txns": "1000000",
+            "dfs.datanode.http.address": "0.0.0.0:1022",
+            "dfs.datanode.du.reserved": "33011188224",
+            "dfs.client.read.shortcircuit.streams.cache.size": "4096",
+            "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
+            "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
+            "dfs.http.policy": "HTTP_ONLY",
+            "dfs.block.access.token.enable": "true",
+            "dfs.client.retry.policy.enabled": "false",
+            "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
+            "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab",
+            "dfs.namenode.name.dir.restore": "true",
+            "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
+            "dfs.journalnode.https-address": "0.0.0.0:8481",
+            "dfs.journalnode.http-address": "0.0.0.0:8480",
+            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
+            "dfs.namenode.avoid.read.stale.datanode": "true",
+            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
+            "dfs.datanode.data.dir.perm": "750",
+            "dfs.encryption.key.provider.uri": "kms://http@c6401.ambari.apache.org:9292/kms",
+            "dfs.replication.max": "50",
             "dfs.namenode.name.dir": "/grid/0/hadoop/hdfs/namenode"
-        }, 
+        },
         "ranger-tagsync-site": {
-            "ranger.tagsync.dest.ranger.ssl.config.filename": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks", 
-            "ranger.tagsync.source.atlasrest.username": "", 
-            "ranger.tagsync.logdir": "/var/log/ranger/tagsync", 
-            "ranger.tagsync.source.atlasrest.download.interval.millis": "", 
-            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks", 
-            "ranger.tagsync.source.file.check.interval.millis": "", 
-            "ranger.tagsync.source.atlasrest.endpoint": "", 
-            "ranger.tagsync.dest.ranger.username": "rangertagsync", 
-            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}", 
-            "ranger.tagsync.kerberos.principal": "rangertagsync/_HOST@EXAMPLE.COM", 
-            "ranger.tagsync.kerberos.keytab": "/etc/security/keytabs/rangertagsync.service.keytab", 
-            "ranger.tagsync.source.atlas": "false", 
-            "ranger.tagsync.source.atlasrest": "false", 
-            "ranger.tagsync.source.file": "false", 
+            "ranger.tagsync.dest.ranger.ssl.config.filename": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks",
+            "ranger.tagsync.source.atlasrest.username": "",
+            "ranger.tagsync.logdir": "/var/log/ranger/tagsync",
+            "ranger.tagsync.source.atlasrest.download.interval.millis": "",
+            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks",
+            "ranger.tagsync.source.file.check.interval.millis": "",
+            "ranger.tagsync.source.atlasrest.endpoint": "",
+            "ranger.tagsync.dest.ranger.username": "rangertagsync",
+            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}",
+            "ranger.tagsync.kerberos.principal": "rangertagsync/_HOST@EXAMPLE.COM",
+            "ranger.tagsync.kerberos.keytab": "/etc/security/keytabs/rangertagsync.service.keytab",
+            "ranger.tagsync.source.atlas": "false",
+            "ranger.tagsync.source.atlasrest": "false",
+            "ranger.tagsync.source.file": "false",
             "ranger.tagsync.source.file.filename": ""
-        }, 
+        },
         "tagsync-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/tagsync
 .log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n"
-        }, 
+        },
         "ranger-kms-audit": {
-            "xasecure.audit.destination.solr.zookeepers": "NONE", 
-            "xasecure.audit.destination.solr.urls": "", 
-            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/ranger/kms/audit/solr/spool", 
-            "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/ranger/kms/audit/hdfs/spool", 
-            "xasecure.audit.destination.hdfs": "true", 
-            "xasecure.audit.destination.solr": "true", 
+            "xasecure.audit.destination.solr.zookeepers": "NONE",
+            "xasecure.audit.destination.solr.urls": "",
+            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/ranger/kms/audit/solr/spool",
+            "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/ranger/kms/audit/hdfs/spool",
+            "xasecure.audit.destination.hdfs": "true",
+            "xasecure.audit.destination.solr": "true",
             "xasecure.audit.provider.summary.enabled": "false",
-            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit", 
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
             "xasecure.audit.is.enabled": "true"
-        }, 
+        },
         "hadoop-policy": {
-            "security.job.client.protocol.acl": "*", 
-            "security.job.task.protocol.acl": "*", 
-            "security.datanode.protocol.acl": "*", 
-            "security.namenode.protocol.acl": "*", 
-            "security.client.datanode.protocol.acl": "*", 
-            "security.inter.tracker.protocol.acl": "*", 
-            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop", 
-            "security.client.protocol.acl": "*", 
-            "security.refresh.policy.protocol.acl": "hadoop", 
-            "security.admin.operations.protocol.acl": "hadoop", 
+            "security.job.client.protocol.acl": "*",
+            "security.job.task.protocol.acl": "*",
+            "security.datanode.protocol.acl": "*",
+            "security.namenode.protocol.acl": "*",
+            "security.client.datanode.protocol.acl": "*",
+            "security.inter.tracker.protocol.acl": "*",
+            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
+            "security.client.protocol.acl": "*",
+            "security.refresh.policy.protocol.acl": "hadoop",
+            "security.admin.operations.protocol.acl": "hadoop",
             "security.inter.datanode.protocol.acl": "*"
-        }, 
+        },
         "hdfs-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.lo
 gger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender
 .console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logg
 er}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=
 false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# NameNode metrics logging.\n# The default is to retain two namenode-metrics.log files up to 64MB each.\n#\nnamenode.metrics.logger=INFO,NullAppender\nlog4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}\nlog4j.additivity.NameNodeMetricsLog=false\nlog4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log\nlog4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n\nlog4j.appender.NNMETRICSRFA.MaxBackupIndex=1\nlog4j.appender.NNMETRICSRFA.MaxFileSize=64MB\n\n#\n# mapred audit logging\n#\nmapred
 .audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging leve
 ls\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateCh
 ange=WARN"
-        }, 
+        },
         "usersync-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/usersync.
 log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n"
-        }, 
+        },
         "krb5-conf": {
-            "domains": "", 
-            "manage_krb5_conf": "true", 
-            "content": "\n[libdefaults]\n  renew_lifetime = 7d\n  forwardable = true\n  default_realm = {{realm}}\n  ticket_lifetime = 24h\n  dns_lookup_realm = false\n  dns_lookup_kdc = false\n  default_ccache_name = /tmp/krb5cc_%{uid}\n  #default_tgs_enctypes = {{encryption_types}}\n  #default_tkt_enctypes = {{encryption_types}}\n{% if domains %}\n[domain_realm]\n{%- for domain in domains.split(',') %}\n  {{domain|trim()}} = {{realm}}\n{%- endfor %}\n{% endif %}\n[logging]\n  default = FILE:/var/log/krb5kdc.log\n  admin_server = FILE:/var/log/kadmind.log\n  kdc = FILE:/var/log/krb5kdc.log\n\n[realms]\n  {{realm}} = {\n{%- if kdc_hosts > 0 -%}\n{%- set kdc_host_list = kdc_hosts.split(',')  -%}\n{%- if kdc_host_list and kdc_host_list|length > 0 %}\n    admin_server = {{admin_server_host|default(kdc_host_list[0]|trim(), True)}}\n{%- if kdc_host_list -%}\n{% for kdc_host in kdc_host_list %}\n    kdc = {{kdc_host|trim()}}\n{%- endfor -%}\n{% endif %}\n{%- endif %}\n{%- endif %}\n  }\n\
 n{# Append additional realm declarations below #}", 
+            "domains": "",
+            "manage_krb5_conf": "true",
+            "content": "\n[libdefaults]\n  renew_lifetime = 7d\n  forwardable = true\n  default_realm = {{realm}}\n  ticket_lifetime = 24h\n  dns_lookup_realm = false\n  dns_lookup_kdc = false\n  default_ccache_name = /tmp/krb5cc_%{uid}\n  #default_tgs_enctypes = {{encryption_types}}\n  #default_tkt_enctypes = {{encryption_types}}\n{% if domains %}\n[domain_realm]\n{%- for domain in domains.split(',') %}\n  {{domain|trim()}} = {{realm}}\n{%- endfor %}\n{% endif %}\n[logging]\n  default = FILE:/var/log/krb5kdc.log\n  admin_server = FILE:/var/log/kadmind.log\n  kdc = FILE:/var/log/krb5kdc.log\n\n[realms]\n  {{realm}} = {\n{%- if kdc_hosts > 0 -%}\n{%- set kdc_host_list = kdc_hosts.split(',')  -%}\n{%- if kdc_host_list and kdc_host_list|length > 0 %}\n    admin_server = {{admin_server_host|default(kdc_host_list[0]|trim(), True)}}\n{%- if kdc_host_list -%}\n{% for kdc_host in kdc_host_list %}\n    kdc = {{kdc_host|trim()}}\n{%- endfor -%}\n{% endif %}\n{%- endif %}\n{%- endif %}\n  }\n\
 n{# Append additional realm declarations below #}",
             "conf_dir": "/etc"
-        }, 
+        },
         "kms-site": {
-            "hadoop.kms.proxyuser.ranger.hosts": "*", 
-            "hadoop.kms.authentication.type": "kerberos", 
-            "hadoop.kms.proxyuser.ranger.groups": "*", 
-            "hadoop.kms.authentication.signer.secret.provider.zookeeper.path": "/hadoop-kms/hadoop-auth-signature-secret", 
-            "hadoop.kms.security.authorization.manager": "org.apache.ranger.authorization.kms.authorizer.RangerKmsAuthorizer", 
-            "hadoop.kms.authentication.kerberos.name.rules": "RULE:[1:$1@$0](ambari-qa-test_cluster01@EXAMPLE.COM)s/.*/ambari-qa/\nRULE:[1:$1@$0](hdfs-test_cluster01@EXAMPLE.COM)s/.*/hdfs/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nRULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](rangeradmin@EXAMPLE.COM)s/.*/ranger/\nRULE:[2:$1@$0](rangerkms@EXAMPLE.COM)s/.*/keyadmin/\nRULE:[2:$1@$0](rangertagsync@EXAMPLE.COM)s/.*/rangertagsync/\nRULE:[2:$1@$0](rangerusersync@EXAMPLE.COM)s/.*/rangerusersync/\nDEFAULT", 
-            "hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
-            "hadoop.kms.current.key.cache.timeout.ms": "30000", 
-            "hadoop.kms.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
-            "hadoop.kms.audit.aggregation.window.ms": "10000", 
-            "hadoop.kms.proxyuser.ranger.users": "*", 
-            "hadoop.kms.authentication.signer.secret.provider.zookeeper.auth.type": "kerberos", 
-            "hadoop.kms.key.provider.uri": "dbks://http@localhost:9292/kms", 
-            "hadoop.security.keystore.JavaKeyStoreProvider.password": "none", 
-            "hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "hadoop.kms.authentication.signer.secret.provider": "random", 
-            "hadoop.kms.authentication.signer.secret.provider.zookeeper.connection.string": "#HOSTNAME#:#PORT#,...", 
-            "hadoop.kms.cache.enable": "true", 
-            "hadoop.kms.cache.timeout.ms": "600000", 
+            "hadoop.kms.proxyuser.ranger.hosts": "*",
+            "hadoop.kms.authentication.type": "kerberos",
+            "hadoop.kms.proxyuser.ranger.groups": "*",
+            "hadoop.kms.authentication.signer.secret.provider.zookeeper.path": "/hadoop-kms/hadoop-auth-signature-secret",
+            "hadoop.kms.security.authorization.manager": "org.apache.ranger.authorization.kms.authorizer.RangerKmsAuthorizer",
+            "hadoop.kms.authentication.kerberos.name.rules": "RULE:[1:$1@$0](ambari-qa-test_cluster01@EXAMPLE.COM)s/.*/ambari-qa/\nRULE:[1:$1@$0](hdfs-test_cluster01@EXAMPLE.COM)s/.*/hdfs/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nRULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](rangeradmin@EXAMPLE.COM)s/.*/ranger/\nRULE:[2:$1@$0](rangerkms@EXAMPLE.COM)s/.*/keyadmin/\nRULE:[2:$1@$0](rangertagsync@EXAMPLE.COM)s/.*/rangertagsync/\nRULE:[2:$1@$0](rangerusersync@EXAMPLE.COM)s/.*/rangerusersync/\nDEFAULT",
+            "hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+            "hadoop.kms.current.key.cache.timeout.ms": "30000",
+            "hadoop.kms.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+            "hadoop.kms.audit.aggregation.window.ms": "10000",
+            "hadoop.kms.proxyuser.ranger.users": "*",
+            "hadoop.kms.authentication.signer.secret.provider.zookeeper.auth.type": "kerberos",
+            "hadoop.kms.key.provider.uri": "dbks://http@localhost:9292/kms",
+            "hadoop.security.keystore.JavaKeyStoreProvider.password": "none",
+            "hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
+            "hadoop.kms.authentication.signer.secret.provider": "random",
+            "hadoop.kms.authentication.signer.secret.provider.zookeeper.connection.string": "#HOSTNAME#:#PORT#,...",
+            "hadoop.kms.cache.enable": "true",
+            "hadoop.kms.cache.timeout.ms": "600000",
             "hadoop.kms.authentication.kerberos.principal": "*"
-        }, 
+        },
         "core-site": {
-            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py", 
-            "hadoop.proxyuser.hdfs.groups": "*", 
-            "fs.trash.interval": "360", 
-            "ipc.server.tcpnodelay": "true", 
-            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec", 
-            "ipc.client.idlethreshold": "8000", 
-            "io.file.buffer.size": "131072", 
-            "hadoop.proxyuser.ambari-server-test_cluster01.groups": "*", 
-            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", 
-            "hadoop.security.authentication": "kerberos", 
-            "mapreduce.jobtracker.webinterface.trusted": "false", 
-            "hadoop.proxyuser.kms.groups": "*", 
-            "hadoop.proxyuser.hdfs.hosts": "*", 
-            "hadoop.proxyuser.HTTP.groups": "users", 
-            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020", 
-            "ha.failover-controller.active-standby-elector.zk.op.retries": "120", 
-            "hadoop.security.key.provider.path": "kms://http@c6401.ambari.apache.org:9292/kms", 
-            "hadoop.security.authorization": "true", 
-            "hadoop.http.authentication.simple.anonymous.allowed": "true", 
-            "ipc.client.connect.max.retries": "50", 
-            "hadoop.security.auth_to_local": "RULE:[1:$1@$0](ambari-qa-test_cluster01@EXAMPLE.COM)s/.*/ambari-qa/\nRULE:[1:$1@$0](hdfs-test_cluster01@EXAMPLE.COM)s/.*/hdfs/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nRULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](rangeradmin@EXAMPLE.COM)s/.*/ranger/\nRULE:[2:$1@$0](rangerkms@EXAMPLE.COM)s/.*/keyadmin/\nRULE:[2:$1@$0](rangertagsync@EXAMPLE.COM)s/.*/rangertagsync/\nRULE:[2:$1@$0](rangerusersync@EXAMPLE.COM)s/.*/rangerusersync/\nDEFAULT", 
-            "hadoop.proxyuser.ambari-server-test_cluster01.hosts": "c6401.ambari.apache.org", 
+            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py",
+            "hadoop.proxyuser.hdfs.groups": "*",
+            "fs.trash.interval": "360",
+            "ipc.server.tcpnodelay": "true",
+            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec",
+            "ipc.client.idlethreshold": "8000",
+            "io.file.buffer.size": "131072",
+            "hadoop.proxyuser.ambari-server-test_cluster01.groups": "*",
+            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
+            "hadoop.security.authentication": "kerberos",
+            "mapreduce.jobtracker.webinterface.trusted": "false",
+            "hadoop.proxyuser.kms.groups": "*",
+            "hadoop.proxyuser.hdfs.hosts": "*",
+            "hadoop.proxyuser.HTTP.groups": "users",
+            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020",
+            "ha.failover-controller.active-standby-elector.zk.op.retries": "120",
+            "hadoop.security.key.provider.path": "kms://http@c6401.ambari.apache.org:9292/kms",
+            "hadoop.security.authorization": "true",
+            "hadoop.http.authentication.simple.anonymous.allowed": "true",
+            "ipc.client.connect.max.retries": "50",
+            "hadoop.security.auth_to_local": "RULE:[1:$1@$0](ambari-qa-test_cluster01@EXAMPLE.COM)s/.*/ambari-qa/\nRULE:[1:$1@$0](hdfs-test_cluster01@EXAMPLE.COM)s/.*/hdfs/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nRULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](rangeradmin@EXAMPLE.COM)s/.*/ranger/\nRULE:[2:$1@$0](rangerkms@EXAMPLE.COM)s/.*/keyadmin/\nRULE:[2:$1@$0](rangertagsync@EXAMPLE.COM)s/.*/rangertagsync/\nRULE:[2:$1@$0](rangerusersync@EXAMPLE.COM)s/.*/rangerusersync/\nDEFAULT",
+            "hadoop.proxyuser.ambari-server-test_cluster01.hosts": "c6401.ambari.apache.org",
             "ipc.client.connection.maxidletime": "30000"
-        }, 
+        },
         "hadoop-env": {
-            "keyserver_port": "", 
-            "proxyuser_group": "users", 
-            "hdfs_user_nproc_limit": "65536", 
-            "hdfs_log_dir_prefix": "/var/log/hadoop", 
-            "hdfs_user_nofile_limit": "128000", 
-            "hdfs_user": "hdfs", 
+            "keyserver_port": "",
+            "proxyuser_group": "users",
+            "hdfs_user_nproc_limit": "65536",
+            "hdfs_log_dir_prefix": "/var/log/hadoop",
+            "hdfs_user_nofile_limit": "128000",
+            "hdfs_user": "hdfs",
             "hdfs_principal_name": "hdfs-cl1@EXAMPLE.COM",
-            "keyserver_host": " ", 
-            "namenode_opt_maxnewsize": "128m", 
-            "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab", 
-            "namenode_opt_maxpermsize": "256m", 
-            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
 {{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
 node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/ha

<TRUNCATED>

[20/31] ambari git commit: AMBARI-21392. Cleanup relevant Kerberos identities when a service is removed (amagyar)

Posted by ab...@apache.org.
AMBARI-21392. Cleanup relevant Kerberos identities when a service is removed (amagyar)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e767aa44
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e767aa44
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e767aa44

Branch: refs/heads/branch-feature-logsearch-ui
Commit: e767aa44d872bab9ac0c416684f80b2b662347e5
Parents: 0b397cd
Author: Attila Magyar <am...@hortonworks.com>
Authored: Tue Jul 11 20:10:12 2017 +0200
Committer: Attila Magyar <am...@hortonworks.com>
Committed: Tue Jul 11 20:10:12 2017 +0200

----------------------------------------------------------------------
 .../controller/DeleteIdentityHandler.java       |  77 ++++++++--
 .../server/controller/KerberosHelper.java       |   2 +-
 .../server/controller/KerberosHelperImpl.java   |   5 +-
 .../utilities/KerberosIdentityCleaner.java      |  88 +++--------
 .../utilities/RemovableIdentities.java          | 145 +++++++++++++++++++
 .../controller/utilities/UsedIdentities.java    | 101 +++++++++++++
 .../ServiceComponentUninstalledEvent.java       |   6 +
 .../server/events/ServiceRemovedEvent.java      |  29 ++--
 .../ambari/server/orm/dao/ClusterDAO.java       |  15 ++
 .../orm/entities/ClusterConfigEntity.java       |   3 +
 .../org/apache/ambari/server/state/Cluster.java |   7 +
 .../apache/ambari/server/state/ServiceImpl.java |  14 +-
 .../server/state/cluster/ClusterImpl.java       |   9 ++
 .../AbstractKerberosDescriptorContainer.java    |  12 ++
 .../kerberos/KerberosComponentDescriptor.java   |  15 --
 .../kerberos/KerberosIdentityDescriptor.java    |  14 +-
 .../utilities/KerberosIdentityCleanerTest.java  | 102 +++++++++++--
 .../server/orm/dao/ServiceConfigDAOTest.java    |  12 ++
 18 files changed, 520 insertions(+), 136 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/e767aa44/ambari-server/src/main/java/org/apache/ambari/server/controller/DeleteIdentityHandler.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/DeleteIdentityHandler.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/DeleteIdentityHandler.java
index aa098b6..3329e76 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/DeleteIdentityHandler.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/DeleteIdentityHandler.java
@@ -17,12 +17,13 @@
  */
 package org.apache.ambari.server.controller;
 
-import static com.google.common.collect.Sets.newHashSet;
+import static java.util.Collections.singleton;
+import static java.util.stream.Collectors.toSet;
 import static org.apache.ambari.server.controller.KerberosHelperImpl.BASE_LOG_DIR;
 
 import java.io.File;
+import java.lang.reflect.Type;
 import java.util.ArrayList;
-import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -45,10 +46,15 @@ import org.apache.ambari.server.serveraction.kerberos.KDCType;
 import org.apache.ambari.server.serveraction.kerberos.KerberosOperationHandler;
 import org.apache.ambari.server.serveraction.kerberos.KerberosServerAction;
 import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostServerActionEvent;
 import org.apache.ambari.server.utils.StageUtils;
 
+import com.google.gson.reflect.TypeToken;
+
+
 /**
  * I delete kerberos identities (principals and keytabs) of a given component.
  */
@@ -78,7 +84,7 @@ class DeleteIdentityHandler {
     if (manageIdentities) {
       addPrepareDeleteIdentity(cluster, hostParamsJson, event, commandParameters, stageContainer);
       addDestroyPrincipals(cluster, hostParamsJson, event, commandParameters, stageContainer);
-      addDeleteKeytab(cluster, newHashSet(commandParameters.component.getHostName()), hostParamsJson, commandParameters, stageContainer);
+      addDeleteKeytab(cluster, commandParameters.getAffectedHostNames(), hostParamsJson, commandParameters, stageContainer);
     }
     addFinalize(cluster, hostParamsJson, event, stageContainer, commandParameters);
   }
@@ -172,15 +178,15 @@ class DeleteIdentityHandler {
 
 
   public static class CommandParams {
-    private final Component component;
-    private final List<String> identities;
+    private final List<Component> components;
+    private final Set<String> identities;
     private final String authName;
     private final File dataDirectory;
     private final String defaultRealm;
     private final KDCType kdcType;
 
-    public CommandParams(Component component, List<String> identities, String authName, File dataDirectory, String defaultRealm, KDCType kdcType) {
-      this.component = component;
+    public CommandParams(List<Component> components, Set<String> identities, String authName, File dataDirectory, String defaultRealm, KDCType kdcType) {
+      this.components = components;
       this.identities = identities;
       this.authName = authName;
       this.dataDirectory = dataDirectory;
@@ -194,11 +200,15 @@ class DeleteIdentityHandler {
       commandParameters.put(KerberosServerAction.DEFAULT_REALM, defaultRealm);
       commandParameters.put(KerberosServerAction.KDC_TYPE, kdcType.name());
       commandParameters.put(KerberosServerAction.IDENTITY_FILTER, StageUtils.getGson().toJson(identities));
-      commandParameters.put(KerberosServerAction.COMPONENT_FILTER, StageUtils.getGson().toJson(component));
+      commandParameters.put(KerberosServerAction.COMPONENT_FILTER, StageUtils.getGson().toJson(components));
       commandParameters.put(KerberosServerAction.DATA_DIRECTORY, dataDirectory.getAbsolutePath());
       return commandParameters;
     }
 
+    public Set<String> getAffectedHostNames() {
+      return components.stream().map(Component::getHostName).collect(toSet());
+    }
+
     public String asJson() {
       return StageUtils.getGson().toJson(asMap());
     }
@@ -211,22 +221,57 @@ class DeleteIdentityHandler {
       processServiceComponents(
         getCluster(),
         kerberosDescriptor,
-        Collections.singletonList(getComponentFilter()),
+        componentFilter(),
         getIdentityFilter(),
         dataDirectory(),
-        calculateConfig(kerberosDescriptor),
-        new HashMap<String, Map<String, String>>(),
+        calculateConfig(kerberosDescriptor, serviceNames()),
+        new HashMap<>(),
         false,
-        new HashMap<String, Set<String>>());
+        new HashMap<>());
       return createCommandReport(0, HostRoleStatus.COMPLETED, "{}", actionLog.getStdOut(), actionLog.getStdErr());
     }
 
-    protected Component getComponentFilter() {
-      return StageUtils.getGson().fromJson(getCommandParameterValue(KerberosServerAction.COMPONENT_FILTER), Component.class);
+    private Set<String> serviceNames() {
+      return componentFilter().stream().map(component -> component.getServiceName()).collect(toSet());
+    }
+
+    private List<Component> componentFilter() {
+      Type jsonType = new TypeToken<List<Component>>() {}.getType();
+      return StageUtils.getGson().fromJson(getCommandParameterValue(KerberosServerAction.COMPONENT_FILTER), jsonType);
+    }
+
+    /**
+     * Cleaning identities is asynchronous, it can happen that the service and its configuration is already deleted at this point.
+     * We're extending the actual config with the properties of the latest deleted configuration of the service.
+     * The service configuration is needed because principal names may contain placeholder variables which are replaced based on the service configuration.
+     */
+    private Map<String, Map<String, String>> calculateConfig(KerberosDescriptor kerberosDescriptor, Set<String> serviceNames) throws AmbariException {
+      Map<String, Map<String, String>> actualConfig = getKerberosHelper().calculateConfigurations(getCluster(), null, kerberosDescriptor.getProperties());
+      extendWithDeletedConfigOfService(actualConfig, serviceNames);
+      return actualConfig;
+    }
+
+    private void extendWithDeletedConfigOfService(Map<String, Map<String, String>> configToBeExtended, Set<String> serviceNames) throws AmbariException {
+      Set<String> deletedConfigTypes = serviceNames.stream()
+        .flatMap(serviceName -> configTypesOfService(serviceName).stream())
+        .collect(toSet());
+      for (Config deletedConfig : getCluster().getLatestConfigsWithTypes(deletedConfigTypes)) {
+        configToBeExtended.put(deletedConfig.getType(), deletedConfig.getProperties());
+      }
     }
 
-    private Map<String, Map<String, String>> calculateConfig(KerberosDescriptor kerberosDescriptor) throws AmbariException {
-      return getKerberosHelper().calculateConfigurations(getCluster(), null, kerberosDescriptor.getProperties());
+    private Set<String> configTypesOfService(String serviceName) {
+      try {
+        StackId stackId = getCluster().getCurrentStackVersion();
+        StackServiceRequest stackServiceRequest = new StackServiceRequest(stackId.getStackName(), stackId.getStackVersion(), serviceName);
+        return AmbariServer.getController().getStackServices(singleton(stackServiceRequest)).stream()
+          .findFirst()
+          .orElseThrow(() -> new IllegalArgumentException("Could not find stack service " + serviceName))
+          .getConfigTypes()
+          .keySet();
+      } catch (AmbariException e) {
+        throw new RuntimeException(e);
+      }
     }
 
     private String dataDirectory() {

http://git-wip-us.apache.org/repos/asf/ambari/blob/e767aa44/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
index cc0c048..3819863 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
@@ -233,7 +233,7 @@ public interface KerberosHelper {
                                          RequestStageContainer requestStageContainer, Boolean manageIdentities)
       throws AmbariException, KerberosOperationException;
 
-  void deleteIdentity(Cluster cluster, Component component, List<String> identities) throws AmbariException, KerberosOperationException;
+  void deleteIdentities(Cluster cluster, List<Component> components, Set<String> identities) throws AmbariException, KerberosOperationException;
 
   /**
    * Updates the relevant configurations for the components specified in the service filter.

http://git-wip-us.apache.org/repos/asf/ambari/blob/e767aa44/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
index b30f8f6..e5b7afd 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
@@ -301,17 +301,18 @@ public class KerberosHelperImpl implements KerberosHelper {
    * Deletes the kerberos identities of the given component, even if the component is already deleted.
    */
   @Override
-  public void deleteIdentity(Cluster cluster, Component component, List<String> identities) throws AmbariException, KerberosOperationException {
+  public void deleteIdentities(Cluster cluster, List<Component> components, Set<String> identities) throws AmbariException, KerberosOperationException {
     if (identities.isEmpty()) {
       return;
     }
+    LOG.info("Deleting identities: ", identities);
     KerberosDetails kerberosDetails = getKerberosDetails(cluster, null);
     validateKDCCredentials(kerberosDetails, cluster);
     File dataDirectory = createTemporaryDirectory();
     RoleCommandOrder roleCommandOrder = ambariManagementController.getRoleCommandOrder(cluster);
     DeleteIdentityHandler handler = new DeleteIdentityHandler(customCommandExecutionHelper, configuration.getDefaultServerTaskTimeout(), stageFactory, ambariManagementController);
     DeleteIdentityHandler.CommandParams commandParameters = new DeleteIdentityHandler.CommandParams(
-      component,
+      components,
       identities,
       ambariManagementController.getAuthName(),
       dataDirectory,

http://git-wip-us.apache.org/repos/asf/ambari/blob/e767aa44/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleaner.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleaner.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleaner.java
index 0a8462f..7ec4a6e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleaner.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleaner.java
@@ -17,26 +17,12 @@
  */
 package org.apache.ambari.server.controller.utilities;
 
-import static org.apache.ambari.server.state.kerberos.AbstractKerberosDescriptor.nullToEmpty;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.controller.KerberosHelper;
 import org.apache.ambari.server.events.ServiceComponentUninstalledEvent;
+import org.apache.ambari.server.events.ServiceRemovedEvent;
 import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
-import org.apache.ambari.server.serveraction.kerberos.Component;
 import org.apache.ambari.server.serveraction.kerberos.KerberosMissingAdminCredentialsException;
-import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.SecurityType;
-import org.apache.ambari.server.state.Service;
-import org.apache.ambari.server.state.kerberos.KerberosComponentDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosIdentityDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosServiceDescriptor;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -69,67 +55,29 @@ public class KerberosIdentityCleaner {
   @Subscribe
   public void componentRemoved(ServiceComponentUninstalledEvent event) throws KerberosMissingAdminCredentialsException {
     try {
-      Cluster cluster = clusters.getCluster(event.getClusterId());
-      if (cluster.getSecurityType() != SecurityType.KERBEROS) {
-        return;
-      }
-      KerberosComponentDescriptor descriptor = componentDescriptor(cluster, event.getServiceName(), event.getComponentName());
-      if (descriptor == null) {
-        LOG.info("No kerberos descriptor for {}", event);
-        return;
-      }
-      List<String> identitiesToRemove = identityNames(skipSharedIdentities(descriptor.getIdentitiesSkipReferences(), cluster, event));
-      LOG.info("Deleting identities {} after an event {}",  identitiesToRemove, event);
-      kerberosHelper.deleteIdentity(cluster, new Component(event.getHostName(), event.getServiceName(), event.getComponentName()), identitiesToRemove);
+      LOG.info("Removing identities after {}", event);
+      RemovableIdentities
+        .ofComponent(clusters.getCluster(event.getClusterId()), event, kerberosHelper)
+        .remove(kerberosHelper);
     } catch (Exception e) {
       LOG.error("Error while deleting kerberos identity after an event: " + event, e);
     }
   }
 
-  private KerberosComponentDescriptor componentDescriptor(Cluster cluster, String serviceName, String componentName) throws AmbariException {
-    KerberosServiceDescriptor serviceDescriptor = kerberosHelper.getKerberosDescriptor(cluster).getService(serviceName);
-    return serviceDescriptor == null ? null : serviceDescriptor.getComponent(componentName);
-  }
-
-  private List<String> identityNames(List<KerberosIdentityDescriptor> identities) {
-    List<String> result = new ArrayList<>();
-    for (KerberosIdentityDescriptor each : identities) { result.add(each.getName()); }
-    return result;
-  }
-
-  private List<KerberosIdentityDescriptor> skipSharedIdentities(List<KerberosIdentityDescriptor> candidates, Cluster cluster, ServiceComponentUninstalledEvent event) throws AmbariException {
-    List<KerberosIdentityDescriptor> activeIdentities = activeIdentities(cluster, kerberosHelper.getKerberosDescriptor(cluster), event);
-    List<KerberosIdentityDescriptor> result = new ArrayList<>();
-    for (KerberosIdentityDescriptor candidate : candidates) {
-      if (!candidate.isShared(activeIdentities)) {
-        result.add(candidate);
-      } else {
-        LOG.debug("Skip removing shared identity: {}", candidate.getName());
-      }
-    }
-    return result;
-  }
-
-  private List<KerberosIdentityDescriptor> activeIdentities(Cluster cluster, KerberosDescriptor root, ServiceComponentUninstalledEvent event) {
-    List<KerberosIdentityDescriptor> result = new ArrayList<>();
-    result.addAll(nullToEmpty(root.getIdentities()));
-    for (Map.Entry<String, Service> serviceEntry : cluster.getServices().entrySet()) {
-      KerberosServiceDescriptor serviceDescriptor = root.getService(serviceEntry.getKey());
-      if (serviceDescriptor == null) {
-        continue;
-      }
-      result.addAll(nullToEmpty(serviceDescriptor.getIdentities()));
-      for (String componentName : serviceEntry.getValue().getServiceComponents().keySet()) {
-        if (!sameComponent(event, componentName, serviceEntry.getKey())) {
-          result.addAll(serviceDescriptor.getComponentIdentities(componentName));
-        }
-      }
+  /**
+   * Removes kerberos identities (principals and keytabs) after a service was uninstalled.
+   * Keeps the identity if either the principal or the keytab is used by an other service
+   */
+  @Subscribe
+  public void serviceRemoved(ServiceRemovedEvent event) {
+    try {
+      LOG.info("Removing identities after {}", event);
+      RemovableIdentities
+        .ofService(clusters.getCluster(event.getClusterId()), event, kerberosHelper)
+        .remove(kerberosHelper);
+    } catch (Exception e) {
+      LOG.error("Error while deleting kerberos identity after an event: " + event, e);
     }
-    return result;
-  }
-
-  private boolean sameComponent(ServiceComponentUninstalledEvent event, String componentName, String serviceName) {
-    return event.getServiceName().equals(serviceName) && event.getComponentName().equals(componentName);
   }
 }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e767aa44/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/RemovableIdentities.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/RemovableIdentities.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/RemovableIdentities.java
new file mode 100644
index 0000000..d4bb501
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/RemovableIdentities.java
@@ -0,0 +1,145 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.controller.utilities;
+
+import static java.util.Collections.emptyList;
+import static java.util.Collections.singletonList;
+import static java.util.stream.Collectors.toList;
+import static java.util.stream.Collectors.toSet;
+
+import java.util.Collection;
+import java.util.List;
+import java.util.Objects;
+import java.util.Set;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.controller.KerberosHelper;
+import org.apache.ambari.server.controller.utilities.UsedIdentities.ComponentExclude;
+import org.apache.ambari.server.controller.utilities.UsedIdentities.ServiceExclude;
+import org.apache.ambari.server.events.ServiceComponentUninstalledEvent;
+import org.apache.ambari.server.events.ServiceRemovedEvent;
+import org.apache.ambari.server.serveraction.kerberos.Component;
+import org.apache.ambari.server.serveraction.kerberos.KerberosOperationException;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.SecurityType;
+import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.kerberos.KerberosIdentityDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosServiceDescriptor;
+
+/**
+ * I represent a group of kerberos identities which are to be deleted after a service or a component was removed.
+ * My instances provide methods for removing the candidates, excluding those that are still used by other components or services.
+ */
+public class RemovableIdentities {
+  private final List<KerberosIdentityDescriptor> candidateIdentities;
+  private final UsedIdentities usedIdentities;
+  private final Cluster cluster;
+  private final List<Component> components;
+
+  /**
+   * Populate the identities with the identities of the removed service and its components
+   */
+  public static RemovableIdentities ofService(Cluster cluster, ServiceRemovedEvent event, KerberosHelper kerberosHelper) throws AmbariException {
+    if (cluster.getSecurityType() != SecurityType.KERBEROS) {
+      return RemovableIdentities.none();
+    }
+    KerberosServiceDescriptor serviceDescriptor = kerberosHelper.getKerberosDescriptor(cluster).getService(event.getServiceName());
+    if (serviceDescriptor == null) {
+      return RemovableIdentities.none();
+    }
+    UsedIdentities usedIdentities = UsedIdentities.populate(cluster, excludeService(event.getServiceName()), ComponentExclude.NONE, kerberosHelper);
+    return new RemovableIdentities(
+      serviceDescriptor.getIdentitiesSkipReferences(),
+      usedIdentities,
+      cluster,
+      event.getComponents());
+  }
+
+  /**
+   * Populate the identities with the identities of the removed component
+   */
+  public static RemovableIdentities ofComponent(Cluster cluster, ServiceComponentUninstalledEvent event, KerberosHelper kerberosHelper) throws AmbariException {
+    if (cluster.getSecurityType() != SecurityType.KERBEROS) {
+      return RemovableIdentities.none();
+    }
+    KerberosServiceDescriptor serviceDescriptor = kerberosHelper.getKerberosDescriptor(cluster).getService(event.getServiceName());
+    if (serviceDescriptor == null) {
+      return RemovableIdentities.none();
+    }
+    UsedIdentities usedIdentities = UsedIdentities.populate(
+      cluster,
+      ServiceExclude.NONE,
+      excludeComponent(event.getServiceName(), event.getComponentName(), event.getHostName()),
+      kerberosHelper);
+    return new RemovableIdentities(
+      componentIdentities(singletonList(event.getComponentName()), serviceDescriptor),
+      usedIdentities,
+      cluster,
+      singletonList(event.getComponent()));
+  }
+
+  /**
+   * Populates the identities with an empty list
+   */
+  public static RemovableIdentities none() throws AmbariException {
+    return new RemovableIdentities(emptyList(), UsedIdentities.none(), null, null);
+  }
+
+  private static ServiceExclude excludeService(String excludedServiceName) {
+    return serviceName -> excludedServiceName.equals(serviceName);
+  }
+
+  private static ComponentExclude excludeComponent(String excludedServiceName, String excludedComponentName, String excludedHostName) {
+    return (serviceName, componentName, hosts) -> excludedServiceName.equals(serviceName)
+      && excludedComponentName.equals(componentName)
+      && hostNames(hosts).equals(singletonList(excludedHostName));
+  }
+
+  private static List<String> hostNames(Collection<ServiceComponentHost> hosts) {
+    return hosts.stream().map(ServiceComponentHost::getHostName).collect(toList());
+  }
+
+  private static List<KerberosIdentityDescriptor> componentIdentities(List<String> componentNames, KerberosServiceDescriptor serviceDescriptor) throws AmbariException {
+    return componentNames.stream()
+      .map(componentName -> serviceDescriptor.getComponent(componentName))
+      .filter(Objects::nonNull)
+      .flatMap(componentDescriptor -> componentDescriptor.getIdentitiesSkipReferences().stream())
+      .collect(toList());
+  }
+
+  private RemovableIdentities(List<KerberosIdentityDescriptor> candidateIdentities, UsedIdentities usedIdentities, Cluster cluster, List<Component> components) {
+    this.candidateIdentities = candidateIdentities;
+    this.usedIdentities = usedIdentities;
+    this.cluster = cluster;
+    this.components = components;
+  }
+
+  /**
+   * Remove all identities which are not used by other services or components
+   */
+  public void remove(KerberosHelper kerberosHelper) throws AmbariException, KerberosOperationException {
+    Set<String> identitiesToRemove = skipUsed().stream().map(KerberosIdentityDescriptor::getName).collect(toSet());
+    if (!identitiesToRemove.isEmpty()) {
+      kerberosHelper.deleteIdentities(cluster, components, identitiesToRemove);
+    }
+  }
+
+  private List<KerberosIdentityDescriptor> skipUsed() throws AmbariException {
+    return candidateIdentities.stream().filter(each -> !usedIdentities.contains(each)).collect(toList());
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/e767aa44/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/UsedIdentities.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/UsedIdentities.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/UsedIdentities.java
new file mode 100644
index 0000000..46f5642
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/UsedIdentities.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.controller.utilities;
+
+import static java.util.Collections.emptyList;
+import static java.util.stream.Collectors.toList;
+import static org.apache.ambari.server.state.kerberos.AbstractKerberosDescriptor.nullToEmpty;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.controller.KerberosHelper;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.ServiceComponent;
+import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosIdentityDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosServiceDescriptor;
+
+/**
+ * I represent a group of identities that are still used by any non-excluded component or service
+ */
+public class UsedIdentities {
+  private final List<KerberosIdentityDescriptor> used;
+
+  public static UsedIdentities none() throws AmbariException {
+    return new UsedIdentities(emptyList());
+  }
+
+  /**
+   * Get all identities of the installed services and components. Skip service or component that is excluded.
+   */
+  public static UsedIdentities populate(Cluster cluster, ServiceExclude serviceExclude, ComponentExclude componentExclude, KerberosHelper kerberosHelper) throws AmbariException {
+    List<KerberosIdentityDescriptor> result = new ArrayList<>();
+    KerberosDescriptor root = kerberosHelper.getKerberosDescriptor(cluster);
+    result.addAll(nullToEmpty(root.getIdentities()));
+    for (Service service : cluster.getServices().values()) {
+      if (serviceExclude.shouldExclude(service.getName())) {
+        continue;
+      }
+      KerberosServiceDescriptor serviceDescriptor = root.getService(service.getName());
+      if (serviceDescriptor != null) {
+        result.addAll(nullToEmpty(serviceDescriptor.getIdentities()));
+        result.addAll(nullToEmpty(componentIdentities(serviceDescriptor, service, componentExclude)));
+      }
+    }
+    return new UsedIdentities(result);
+  }
+
+  private static List<KerberosIdentityDescriptor> componentIdentities(KerberosServiceDescriptor serviceDescriptor, Service service, ComponentExclude componentExclude) {
+    return service.getServiceComponents().values()
+      .stream()
+      .filter(component -> !isComponentExcluded(service, componentExclude, component))
+      .flatMap(component -> serviceDescriptor.getComponentIdentities(component.getName()).stream())
+      .collect(toList());
+  }
+
+  private static boolean isComponentExcluded(Service service, ComponentExclude componentExclude, ServiceComponent component) {
+    return component.getServiceComponentHosts().isEmpty()
+      || componentExclude.shouldExclude(service.getName(), component.getName(), component.getServiceComponentHosts().values());
+  }
+
+  private UsedIdentities(List<KerberosIdentityDescriptor> used) {
+    this.used = used;
+  }
+
+  /**
+   * @return true if there is an identity in the used list with the same keytab or principal name than the given identity
+   */
+  public boolean contains(KerberosIdentityDescriptor identity) {
+    return used.stream().anyMatch(each -> identity.isShared(each));
+  }
+
+  public interface ServiceExclude {
+    boolean shouldExclude(String serviceName);
+    ServiceExclude NONE = serviceName -> false; // default implementation, exclude nothing
+  }
+
+  public interface ComponentExclude {
+    boolean shouldExclude(String serviceName, String componentName, Collection<ServiceComponentHost> hosts);
+    ComponentExclude NONE = (serviceName, componentName, hosts) -> false; // default implementation, exclude nothing
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/e767aa44/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceComponentUninstalledEvent.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceComponentUninstalledEvent.java b/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceComponentUninstalledEvent.java
index 5b55339..8acc401 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceComponentUninstalledEvent.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceComponentUninstalledEvent.java
@@ -17,6 +17,8 @@
  */
 package org.apache.ambari.server.events;
 
+import org.apache.ambari.server.serveraction.kerberos.Component;
+
 /**
  * The {@link ServiceComponentUninstalledEvent} class is fired when a service
  * component is successfully uninstalled.
@@ -85,4 +87,8 @@ public class ServiceComponentUninstalledEvent extends ServiceEvent {
     buffer.append("}");
     return buffer.toString();
   }
+
+  public Component getComponent() {
+    return new Component(getHostName(), getServiceName(), getComponentName());
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/e767aa44/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceRemovedEvent.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceRemovedEvent.java b/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceRemovedEvent.java
index aca00a8..de96342 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceRemovedEvent.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceRemovedEvent.java
@@ -17,23 +17,24 @@
  */
 package org.apache.ambari.server.events;
 
+import static java.util.stream.Collectors.toList;
+
+import java.util.List;
+
+import org.apache.ambari.server.serveraction.kerberos.Component;
+
 /**
  * The {@link ServiceRemovedEvent} class is fired when a service is successfully
  * removed.
  */
 public class ServiceRemovedEvent extends ServiceEvent {
-  /**
-   * Constructor.
-   *
-   * @param clusterId
-   * @param stackName
-   * @param stackVersion
-   * @param serviceName
-   */
+  private final List<Component> components;
+
   public ServiceRemovedEvent(long clusterId, String stackName,
-      String stackVersion, String serviceName) {
+                             String stackVersion, String serviceName, List<Component> components) {
     super(AmbariEventType.SERVICE_REMOVED_SUCCESS, clusterId, stackName,
-        stackVersion, serviceName);
+      stackVersion, serviceName);
+    this.components = components;
   }
 
   /**
@@ -49,4 +50,12 @@ public class ServiceRemovedEvent extends ServiceEvent {
     buffer.append("}");
     return buffer.toString();
   }
+
+  public List<Component> getComponents() {
+    return components;
+  }
+
+  public List<String> getComponentNames() {
+    return components.stream().map(Component::getServiceComponentName).collect(toList());
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/e767aa44/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterDAO.java
index a23b914..d0f8d0b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterDAO.java
@@ -217,6 +217,21 @@ public class ClusterDAO {
   }
 
   /**
+   * Gets the latest configurations for a given stack with any of the given config types.
+   * This method does not take into account the configuration being enabled.
+   */
+  @RequiresSession
+  public List<ClusterConfigEntity> getLatestConfigurationsWithTypes(long clusterId, StackId stackId, Collection<String> configTypes) {
+    StackEntity stackEntity = stackDAO.find(stackId.getStackName(), stackId.getStackVersion());
+    return daoUtils.selectList(
+      entityManagerProvider.get()
+      .createNamedQuery("ClusterConfigEntity.findLatestConfigsByStackWithTypes", ClusterConfigEntity.class)
+      .setParameter("clusterId", clusterId)
+      .setParameter("stack", stackEntity)
+      .setParameter("types", configTypes));
+  }
+
+  /**
    * Gets the latest configurations for a given stack for all of the
    * configurations of the specified cluster.
    *

http://git-wip-us.apache.org/repos/asf/ambari/blob/e767aa44/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntity.java
index 34f3034..3a74367 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntity.java
@@ -62,6 +62,9 @@ import org.apache.commons.lang.builder.EqualsBuilder;
         name = "ClusterConfigEntity.findLatestConfigsByStack",
         query = "SELECT clusterConfig FROM ClusterConfigEntity clusterConfig WHERE clusterConfig.clusterId = :clusterId AND clusterConfig.stack = :stack AND clusterConfig.selectedTimestamp = (SELECT MAX(clusterConfig2.selectedTimestamp) FROM ClusterConfigEntity clusterConfig2 WHERE clusterConfig2.clusterId=:clusterId AND clusterConfig2.stack=:stack AND clusterConfig2.type = clusterConfig.type)"),
     @NamedQuery(
+        name = "ClusterConfigEntity.findLatestConfigsByStackWithTypes",
+        query = "SELECT clusterConfig FROM ClusterConfigEntity clusterConfig WHERE clusterConfig.type IN :types AND clusterConfig.clusterId = :clusterId AND clusterConfig.stack = :stack AND clusterConfig.selectedTimestamp = (SELECT MAX(clusterConfig2.selectedTimestamp) FROM ClusterConfigEntity clusterConfig2 WHERE clusterConfig2.clusterId=:clusterId AND clusterConfig2.stack=:stack AND clusterConfig2.type = clusterConfig.type)"),
+    @NamedQuery(
         name = "ClusterConfigEntity.findNotMappedClusterConfigsToService",
         query = "SELECT clusterConfig FROM ClusterConfigEntity clusterConfig WHERE clusterConfig.serviceConfigEntities IS EMPTY AND clusterConfig.type != 'cluster-env'"),
     @NamedQuery(

http://git-wip-us.apache.org/repos/asf/ambari/blob/e767aa44/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java b/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
index b4f7120..9597ba1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
@@ -267,6 +267,13 @@ public interface Cluster {
   Config getConfig(String configType, String versionTag);
 
   /**
+   * Get latest (including inactive ones) configurations with any of the given types.
+   * This method does not take into account the configuration being enabled.
+   * @return the list of configurations with the given types
+   */
+  List<Config> getLatestConfigsWithTypes(Collection<String> types);
+
+  /**
    * Gets the specific config that matches the specified type and version.  This not
    * necessarily a DESIRED configuration that applies to a cluster.
    * @param configType  the config type to find

http://git-wip-us.apache.org/repos/asf/ambari/blob/e767aa44/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
index 5084703..74d79c8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
@@ -51,6 +51,7 @@ import org.apache.ambari.server.orm.entities.ServiceConfigEntity;
 import org.apache.ambari.server.orm.entities.ServiceDesiredStateEntity;
 import org.apache.ambari.server.orm.entities.ServiceDesiredStateEntityPK;
 import org.apache.ambari.server.orm.entities.StackEntity;
+import org.apache.ambari.server.serveraction.kerberos.Component;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -588,6 +589,7 @@ public class ServiceImpl implements Service {
   @Override
   @Transactional
   public void delete() throws AmbariException {
+    List<Component> components = getComponents(); // XXX temporal coupling, need to call this BEFORE deletingAllComponents
     deleteAllComponents();
     deleteAllServiceConfigs();
 
@@ -601,11 +603,21 @@ public class ServiceImpl implements Service {
     }
 
     ServiceRemovedEvent event = new ServiceRemovedEvent(getClusterId(), stackId.getStackName(),
-        stackId.getStackVersion(), getName());
+        stackId.getStackVersion(), getName(), components);
 
     eventPublisher.publish(event);
   }
 
+  private List<Component> getComponents() {
+    List<Component> result = new ArrayList<>();
+    for (ServiceComponent component : getServiceComponents().values()) {
+      for (ServiceComponentHost host : component.getServiceComponentHosts().values()) {
+        result.add(new Component(host.getHostName(), getName(), component.getName()));
+      }
+    }
+    return result;
+  }
+
   @Transactional
   protected void removeEntities() throws AmbariException {
     serviceDesiredStateDAO.removeByPK(serviceDesiredStateEntityPK);

http://git-wip-us.apache.org/repos/asf/ambari/blob/e767aa44/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
index 06b6217..c950d67 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
@@ -35,6 +35,7 @@ import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.CopyOnWriteArrayList;
 import java.util.concurrent.locks.ReadWriteLock;
+import java.util.stream.Collectors;
 
 import javax.annotation.Nullable;
 import javax.persistence.EntityManager;
@@ -1125,6 +1126,14 @@ public class ClusterImpl implements Cluster {
   }
 
   @Override
+  public List<Config> getLatestConfigsWithTypes(Collection<String> types) {
+    return clusterDAO.getLatestConfigurationsWithTypes(clusterId, getDesiredStackVersion(), types)
+      .stream()
+      .map(clusterConfigEntity -> configFactory.createExisting(this, clusterConfigEntity))
+      .collect(Collectors.toList());
+  }
+
+  @Override
   public Config getConfigByVersion(String configType, Long configVersion) {
     clusterGlobalLock.readLock().lock();
     try {

http://git-wip-us.apache.org/repos/asf/ambari/blob/e767aa44/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptorContainer.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptorContainer.java b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptorContainer.java
index 0a89c1d..5658133 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptorContainer.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptorContainer.java
@@ -18,6 +18,8 @@
 
 package org.apache.ambari.server.state.kerberos;
 
+import static java.util.stream.Collectors.toList;
+
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
@@ -777,6 +779,16 @@ public abstract class AbstractKerberosDescriptorContainer extends AbstractKerber
     return map;
   }
 
+  /**
+   * @return identities which are not references to other identities
+   */
+  public List<KerberosIdentityDescriptor> getIdentitiesSkipReferences() {
+    return nullToEmpty(getIdentities())
+      .stream()
+      .filter(identity -> !identity.getReferencedServiceName().isPresent() && identity.getName() != null && !identity.getName().startsWith("/"))
+      .collect(toList());
+  }
+
   @Override
   public int hashCode() {
     return super.hashCode() +

http://git-wip-us.apache.org/repos/asf/ambari/blob/e767aa44/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosComponentDescriptor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosComponentDescriptor.java b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosComponentDescriptor.java
index 41d1f65..768a17e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosComponentDescriptor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosComponentDescriptor.java
@@ -17,9 +17,7 @@
  */
 package org.apache.ambari.server.state.kerberos;
 
-import java.util.ArrayList;
 import java.util.Collection;
-import java.util.List;
 import java.util.Map;
 
 /**
@@ -113,19 +111,6 @@ public class KerberosComponentDescriptor extends AbstractKerberosDescriptorConta
     return null;
   }
 
-  /**
-   * @return identities which are not references to other identities
-   */
-  public List<KerberosIdentityDescriptor> getIdentitiesSkipReferences() {
-    List<KerberosIdentityDescriptor> result = new ArrayList<>();
-    for (KerberosIdentityDescriptor each : nullToEmpty(getIdentities())) {
-      if (!each.getReferencedServiceName().isPresent() && each.getName() != null && !each.getName().startsWith("/")) {
-        result.add(each);
-      }
-    }
-    return result;
-  }
-
   @Override
   public int hashCode() {
     return 35 * super.hashCode();

http://git-wip-us.apache.org/repos/asf/ambari/blob/e767aa44/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosIdentityDescriptor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosIdentityDescriptor.java b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosIdentityDescriptor.java
index 2023793..911723b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosIdentityDescriptor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosIdentityDescriptor.java
@@ -17,10 +17,8 @@
  */
 package org.apache.ambari.server.state.kerberos;
 
-import java.util.List;
 import java.util.Map;
 
-import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.collections.Predicate;
 import org.apache.ambari.server.collections.PredicateUtils;
 
@@ -371,16 +369,12 @@ public class KerberosIdentityDescriptor extends AbstractKerberosDescriptor {
     }
   }
 
+
   /**
-   * @return true if this identity either has the same principal or keytab as any of the given identities.
+   * @return true if the given identity has the same principal or keytab as me
    */
-  public boolean isShared(List<KerberosIdentityDescriptor> identities) throws AmbariException {
-    for (KerberosIdentityDescriptor each : identities) {
-      if (hasSamePrincipal(each) || hasSameKeytab(each)) {
-        return true;
-      }
-    }
-    return false;
+  public boolean isShared(KerberosIdentityDescriptor that) {
+    return hasSamePrincipal(that) || hasSameKeytab(that);
   }
 
   private boolean hasSameKeytab(KerberosIdentityDescriptor that) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/e767aa44/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleanerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleanerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleanerTest.java
index d22c92e..027f339 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleanerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleanerTest.java
@@ -18,15 +18,20 @@
 package org.apache.ambari.server.controller.utilities;
 
 import static com.google.common.collect.Lists.newArrayList;
+import static com.google.common.collect.Sets.newHashSet;
+import static java.util.Collections.singletonList;
 import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.expectLastCall;
 import static org.easymock.EasyMock.reset;
 
+import java.util.ArrayList;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 
 import org.apache.ambari.server.controller.KerberosHelper;
 import org.apache.ambari.server.events.ServiceComponentUninstalledEvent;
+import org.apache.ambari.server.events.ServiceRemovedEvent;
 import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
 import org.apache.ambari.server.serveraction.kerberos.Component;
 import org.apache.ambari.server.serveraction.kerberos.KerberosMissingAdminCredentialsException;
@@ -35,6 +40,7 @@ import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.SecurityType;
 import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.ServiceComponent;
+import org.apache.ambari.server.state.ServiceComponentHost;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptorFactory;
 import org.easymock.EasyMockRule;
@@ -47,6 +53,7 @@ import org.junit.Test;
 public class KerberosIdentityCleanerTest extends EasyMockSupport {
   @Rule public EasyMockRule mocks = new EasyMockRule(this);
   private static final String HOST = "c6401";
+  private static final String HOST2 = "c6402";
   private static final String OOZIE = "OOZIE";
   private static final String OOZIE_SERVER = "OOZIE_SERVER";
   private static final String OOZIE_2 = "OOZIE2";
@@ -55,6 +62,9 @@ public class KerberosIdentityCleanerTest extends EasyMockSupport {
   private static final String RESOURCE_MANAGER_2 = "RESOURCE_MANAGER2";
   private static final String YARN = "YARN";
   private static final String RESOURCE_MANAGER = "RESOURCE_MANAGER";
+  private static final String HDFS = "HDFS";
+  private static final String NAMENODE = "NAMENODE";
+  private static final String DATANODE = "DATANODE";
   private static final long CLUSTER_ID = 1;
   @Mock private KerberosHelper kerberosHelper;
   @Mock private Clusters clusters;
@@ -66,8 +76,8 @@ public class KerberosIdentityCleanerTest extends EasyMockSupport {
 
   @Test
   public void removesAllKerberosIdentitesOfComponentAfterComponentWasUninstalled() throws Exception {
-    installComponent(OOZIE, OOZIE_SERVER);
-    kerberosHelper.deleteIdentity(cluster, new Component(HOST, OOZIE, OOZIE_SERVER), newArrayList("oozie_server1", "oozie_server2"));
+    installComponent(OOZIE, OOZIE_SERVER, HOST);
+    kerberosHelper.deleteIdentities(cluster, singletonList(new Component(HOST, OOZIE, OOZIE_SERVER)), newHashSet("oozie_server1", "oozie_server2"));
     expectLastCall().once();
     replayAll();
     uninstallComponent(OOZIE, OOZIE_SERVER, HOST);
@@ -83,9 +93,9 @@ public class KerberosIdentityCleanerTest extends EasyMockSupport {
 
   @Test
   public void skipsRemovingIdentityThatIsSharedByPrincipalName() throws Exception {
-    installComponent(OOZIE, OOZIE_SERVER);
-    installComponent(OOZIE_2, OOZIE_SERVER_2);
-    kerberosHelper.deleteIdentity(cluster, new Component(HOST, OOZIE, OOZIE_SERVER), newArrayList("oozie_server1"));
+    installComponent(OOZIE, OOZIE_SERVER, HOST);
+    installComponent(OOZIE_2, OOZIE_SERVER_2, HOST);
+    kerberosHelper.deleteIdentities(cluster, singletonList(new Component(HOST, OOZIE, OOZIE_SERVER)), newHashSet("oozie_server1"));
     expectLastCall().once();
     replayAll();
     uninstallComponent(OOZIE, OOZIE_SERVER, HOST);
@@ -94,9 +104,9 @@ public class KerberosIdentityCleanerTest extends EasyMockSupport {
 
   @Test
   public void skipsRemovingIdentityThatIsSharedByKeyTabFilePath() throws Exception {
-    installComponent(YARN, RESOURCE_MANAGER);
-    installComponent(YARN_2, RESOURCE_MANAGER_2);
-    kerberosHelper.deleteIdentity(cluster, new Component(HOST, YARN, RESOURCE_MANAGER), newArrayList("rm_unique"));
+    installComponent(YARN, RESOURCE_MANAGER, HOST);
+    installComponent(YARN_2, RESOURCE_MANAGER_2, HOST);
+    kerberosHelper.deleteIdentities(cluster, singletonList(new Component(HOST, YARN, RESOURCE_MANAGER)), newHashSet("rm_unique"));
     expectLastCall().once();
     replayAll();
     uninstallComponent(YARN, RESOURCE_MANAGER, HOST);
@@ -112,11 +122,43 @@ public class KerberosIdentityCleanerTest extends EasyMockSupport {
     verifyAll();
   }
 
-  private void installComponent(String serviceName, final String componentName) {
+  @Test
+  public void skipsRemovingIdentityIfComponentIsStillInstalledOnADifferentHost() throws Exception {
+    installComponent(OOZIE, OOZIE_SERVER, HOST, HOST2);
+    replayAll();
+    uninstallComponent(OOZIE, OOZIE_SERVER, HOST);
+    verifyAll();
+  }
+
+  @Test
+  public void removesServiceIdentitiesSkipComponentIdentitiesAfterServiceWasUninstalled() throws Exception {
+    installComponent(OOZIE, OOZIE_SERVER, HOST);
+    kerberosHelper.deleteIdentities(cluster, hdfsComponents(), newHashSet("hdfs-service"));
+    expectLastCall().once();
+    replayAll();
+    uninstallService(HDFS, hdfsComponents());
+    verifyAll();
+  }
+
+  private ArrayList<Component> hdfsComponents() {
+    return newArrayList(new Component(HOST, HDFS, NAMENODE), new Component(HOST, HDFS, DATANODE));
+  }
+
+  private void installComponent(String serviceName, String componentName, String... hostNames) {
     Service service = createMock(serviceName + "_" + componentName, Service.class);
+    ServiceComponent component = createMock(componentName, ServiceComponent.class);
+    expect(component.getName()).andReturn(componentName).anyTimes();
+    Map<String, ServiceComponentHost> hosts = new HashMap<>();
+    expect(component.getServiceComponentHosts()).andReturn(hosts).anyTimes();
+    for (String hostName : hostNames) {
+      ServiceComponentHost host = createMock(hostName, ServiceComponentHost.class);
+      expect(host.getHostName()).andReturn(hostName).anyTimes();
+      hosts.put(hostName, host);
+    }
     installedServices.put(serviceName, service);
+    expect(service.getName()).andReturn(serviceName).anyTimes();
     expect(service.getServiceComponents()).andReturn(new HashMap<String, ServiceComponent>() {{
-      put(componentName, null);
+      put(componentName, component);
     }}).anyTimes();
   }
 
@@ -124,6 +166,10 @@ public class KerberosIdentityCleanerTest extends EasyMockSupport {
     kerberosIdentityCleaner.componentRemoved(new ServiceComponentUninstalledEvent(CLUSTER_ID, "any", "any", service, component, host, false));
   }
 
+  private void uninstallService(String service, List<Component> components) throws KerberosMissingAdminCredentialsException {
+    kerberosIdentityCleaner.serviceRemoved(new ServiceRemovedEvent(CLUSTER_ID, "any", "any", service, components));
+  }
+
   @Before
   public void setUp() throws Exception {
     kerberosIdentityCleaner = new KerberosIdentityCleaner(new AmbariEventPublisher(), kerberosHelper, clusters);
@@ -139,7 +185,8 @@ public class KerberosIdentityCleanerTest extends EasyMockSupport {
       "              'name': '/HDFS/NAMENODE/hdfs'" +
       "            }," +
       "            {" +
-      "              'name': 'oozie_server1'" +
+      "              'name': 'oozie_server1'," +
+      "              'principal': { 'value': 'oozie1/_HOST@EXAMPLE.COM' }" +
       "            }," +"" +
       "            {" +
       "              'name': 'oozie_server2'," +
@@ -193,6 +240,39 @@ public class KerberosIdentityCleanerTest extends EasyMockSupport {
       "          ]" +
       "        }" +
       "      ]" +
+      "    }," +
+      "    {" +
+      "      'name': 'HDFS'," +
+      "      'identities': [" +
+      "            {" +
+      "              'name': 'hdfs-service'" +
+      "            }," +
+      "            {" +
+      "              'name': 'shared'," +
+      "              'principal': { 'value': 'oozie/_HOST@EXAMPLE.COM' }" +
+      "            }," +
+      "            {" +
+      "              'name': '/YARN/RESOURCE_MANAGER/rm'" +
+      "            }," +
+      "          ]," +
+      "      'components': [" +
+      "        {" +
+      "          'name': 'NAMENODE'," +
+      "          'identities': [" +
+      "            {" +
+      "              'name': 'namenode'" +
+      "            }" +
+      "          ]" +
+      "        }," +
+      "        {" +
+      "          'name': 'DATANODE'," +
+      "          'identities': [" +
+      "            {" +
+      "              'name': 'datanode'" +
+      "            }" +
+      "          ]" +
+      "        }" +
+      "      ]" +
       "    }" +
       "  ]" +
       "}");

http://git-wip-us.apache.org/repos/asf/ambari/blob/e767aa44/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java
index 406349a..80cb4dc 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java
@@ -17,6 +17,8 @@
  */
 package org.apache.ambari.server.orm.dao;
 
+import static java.util.Arrays.asList;
+
 import java.sql.SQLException;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -482,6 +484,16 @@ public class ServiceConfigDAOTest {
     Assert.assertTrue(entity.isSelected());
   }
 
+  @Test
+  public void testGetLatestClusterConfigsWithTypes() throws Exception {
+    initClusterEntities();
+    ClusterEntity clusterEntity = clusterDAO.findByName("c1");
+    List<ClusterConfigEntity> entities = clusterDAO.getLatestConfigurationsWithTypes(clusterEntity.getClusterId(), HDP_01, asList("oozie-site"));
+    Assert.assertEquals(1, entities.size());
+    entities = clusterDAO.getLatestConfigurationsWithTypes(clusterEntity.getClusterId(), HDP_01, asList("no-such-type"));
+    Assert.assertTrue(entities.isEmpty());
+  }
+
   /**
    * Tests getting latest and enabled configurations when there is a
    * configuration group. Configurations for configuration groups are not


[13/31] ambari git commit: AMBARI-21210 ADDENDUM Add ability to Log Search to test a log entry if it is parseable (mgergely)

Posted by ab...@apache.org.
AMBARI-21210 ADDENDUM Add ability to Log Search to test a log entry if it is parseable (mgergely)

Change-Id: Icb847dc5cc9b6f63eb02cffe8046c78be0e585dc


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c0882898
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c0882898
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c0882898

Branch: refs/heads/branch-feature-logsearch-ui
Commit: c0882898deed4b6f0ecbd6f12cd935dc6b75cfdf
Parents: 3c9f125
Author: Miklos Gergely <mg...@hortonworks.com>
Authored: Mon Jul 10 14:45:41 2017 +0200
Committer: Miklos Gergely <mg...@hortonworks.com>
Committed: Mon Jul 10 14:45:41 2017 +0200

----------------------------------------------------------------------
 .../org/apache/ambari/logfeeder/common/LogEntryParseTester.java    | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/c0882898/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/common/LogEntryParseTester.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/common/LogEntryParseTester.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/common/LogEntryParseTester.java
index 97bc3a2..5356159 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/common/LogEntryParseTester.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/common/LogEntryParseTester.java
@@ -76,7 +76,7 @@ public class LogEntryParseTester {
     ConfigHandler configHandler = new ConfigHandler();
     Input input = configHandler.getTestInput(inputConfig, logId);
     final Map<String, Object> result = new HashMap<>();
-    input.init();
+    input.getFirstFilter().init();
     input.addOutput(new Output() {
       @Override
       public void write(String block, InputMarker inputMarker) throws Exception {


[05/31] ambari git commit: AMBARI-21430 - Allow Multiple Versions of Stack Tools to Co-Exist (jonathanhurley)

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-default.json b/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-default.json
index 05cb78a..cafbede 100644
--- a/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-default.json
+++ b/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-default.json
@@ -1,55 +1,55 @@
 {
     "localComponents": [
-        "SECONDARY_NAMENODE", 
-        "HDFS_CLIENT", 
-        "DATANODE", 
-        "NAMENODE", 
-        "ZOOKEEPER_SERVER", 
-        "ZOOKEEPER_CLIENT", 
-        "RANGER_USERSYNC", 
-        "RANGER_ADMIN", 
+        "SECONDARY_NAMENODE",
+        "HDFS_CLIENT",
+        "DATANODE",
+        "NAMENODE",
+        "ZOOKEEPER_SERVER",
+        "ZOOKEEPER_CLIENT",
+        "RANGER_USERSYNC",
+        "RANGER_ADMIN",
         "RANGER_TAGSYNC",
         "RANGER_KMS_SERVER"
-    ], 
+    ],
     "configuration_attributes": {
-        "ranger-hdfs-audit": {}, 
-        "ssl-client": {}, 
-        "ranger-admin-site": {}, 
-        "ranger-hdfs-policymgr-ssl": {}, 
-        "tagsync-application-properties": {}, 
-        "ranger-env": {}, 
-        "usersync-log4j": {}, 
-        "admin-properties": {}, 
-        "ranger-ugsync-site": {}, 
+        "ranger-hdfs-audit": {},
+        "ssl-client": {},
+        "ranger-admin-site": {},
+        "ranger-hdfs-policymgr-ssl": {},
+        "tagsync-application-properties": {},
+        "ranger-env": {},
+        "usersync-log4j": {},
+        "admin-properties": {},
+        "ranger-ugsync-site": {},
         "hdfs-site": {
             "final": {
-                "dfs.datanode.data.dir": "true", 
-                "dfs.namenode.http-address": "true", 
-                "dfs.datanode.failed.volumes.tolerated": "true", 
-                "dfs.support.append": "true", 
-                "dfs.namenode.name.dir": "true", 
+                "dfs.datanode.data.dir": "true",
+                "dfs.namenode.http-address": "true",
+                "dfs.datanode.failed.volumes.tolerated": "true",
+                "dfs.support.append": "true",
+                "dfs.namenode.name.dir": "true",
                 "dfs.webhdfs.enabled": "true"
             }
-        }, 
-        "ranger-tagsync-site": {}, 
-        "zoo.cfg": {}, 
-        "hadoop-policy": {}, 
-        "hdfs-log4j": {}, 
-        "ranger-hdfs-plugin-properties": {}, 
+        },
+        "ranger-tagsync-site": {},
+        "zoo.cfg": {},
+        "hadoop-policy": {},
+        "hdfs-log4j": {},
+        "ranger-hdfs-plugin-properties": {},
         "core-site": {
             "final": {
                 "fs.defaultFS": "true"
             }
-        }, 
-        "hadoop-env": {}, 
-        "zookeeper-log4j": {}, 
-        "ssl-server": {}, 
-        "ranger-site": {}, 
-        "admin-log4j": {}, 
-        "tagsync-log4j": {}, 
-        "ranger-hdfs-security": {}, 
-        "usersync-properties": {}, 
-        "zookeeper-env": {}, 
+        },
+        "hadoop-env": {},
+        "zookeeper-log4j": {},
+        "ssl-server": {},
+        "ranger-site": {},
+        "admin-log4j": {},
+        "tagsync-log4j": {},
+        "ranger-hdfs-security": {},
+        "usersync-properties": {},
+        "zookeeper-env": {},
         "cluster-env": {},
         "dbks-site": {},
         "kms-env": {},
@@ -60,744 +60,744 @@
         "ranger-kms-site": {},
         "ranger-kms-policymgr-ssl": {},
         "ranger-kms-audit": {}
-    }, 
-    "public_hostname": "c6401.ambari.apache.org", 
-    "commandId": "9-1", 
-    "hostname": "c6401.ambari.apache.org", 
-    "kerberosCommandParams": [], 
-    "serviceName": "RANGER_KMS", 
-    "role": "RANGER_KMS_SERVER", 
-    "forceRefreshConfigTagsBeforeExecution": [], 
-    "requestId": 9, 
+    },
+    "public_hostname": "c6401.ambari.apache.org",
+    "commandId": "9-1",
+    "hostname": "c6401.ambari.apache.org",
+    "kerberosCommandParams": [],
+    "serviceName": "RANGER_KMS",
+    "role": "RANGER_KMS_SERVER",
+    "forceRefreshConfigTagsBeforeExecution": [],
+    "requestId": 9,
     "agentConfigParams": {
         "agent": {
             "parallel_execution": 0
         }
-    }, 
-    "clusterName": "c1", 
-    "commandType": "EXECUTION_COMMAND", 
-    "taskId": 64, 
-    "roleParams": {}, 
+    },
+    "clusterName": "c1",
+    "commandType": "EXECUTION_COMMAND",
+    "taskId": 64,
+    "roleParams": {},
     "configurationTags": {
         "ranger-hdfs-audit": {
             "tag": "version1466427664617"
-        }, 
+        },
         "ssl-client": {
             "tag": "version1"
-        }, 
+        },
         "ranger-admin-site": {
             "tag": "version1466427664621"
-        }, 
+        },
         "ranger-hdfs-policymgr-ssl": {
             "tag": "version1466427664617"
-        }, 
+        },
         "tagsync-application-properties": {
             "tag": "version1466427664621"
-        }, 
+        },
         "ranger-env": {
             "tag": "version1466427664621"
-        }, 
+        },
         "usersync-log4j": {
             "tag": "version1466427664621"
-        }, 
+        },
         "admin-properties": {
             "tag": "version1466427664621"
-        }, 
+        },
         "ranger-ugsync-site": {
             "tag": "version1466427664621"
-        }, 
+        },
         "hdfs-site": {
             "tag": "version1"
-        }, 
+        },
         "ranger-tagsync-site": {
             "tag": "version1466427664621"
-        }, 
+        },
         "zoo.cfg": {
             "tag": "version1"
-        }, 
+        },
         "hadoop-policy": {
             "tag": "version1"
-        }, 
+        },
         "hdfs-log4j": {
             "tag": "version1"
-        }, 
+        },
         "ranger-hdfs-plugin-properties": {
             "tag": "version1466427664617"
-        }, 
+        },
         "core-site": {
             "tag": "version1"
-        }, 
+        },
         "hadoop-env": {
             "tag": "version1"
-        }, 
+        },
         "zookeeper-log4j": {
             "tag": "version1"
-        }, 
+        },
         "ssl-server": {
             "tag": "version1"
-        }, 
+        },
         "ranger-site": {
             "tag": "version1466427664621"
-        }, 
+        },
         "admin-log4j": {
             "tag": "version1466427664621"
-        }, 
+        },
         "tagsync-log4j": {
             "tag": "version1466427664621"
-        }, 
+        },
         "ranger-hdfs-security": {
             "tag": "version1466427664617"
-        }, 
+        },
         "usersync-properties": {
             "tag": "version1466427664621"
-        }, 
+        },
         "zookeeper-env": {
             "tag": "version1"
-        }, 
+        },
         "cluster-env": {
             "tag": "version1"
         },
         "dbks-site": {
-            "tag": "version1"            
+            "tag": "version1"
             },
         "kms-env": {
-            "tag": "version1"            
+            "tag": "version1"
             },
         "kms-log4j": {
-            "tag": "version1"            
+            "tag": "version1"
             },
         "kms-properties": {
-            "tag": "version1"            
+            "tag": "version1"
             },
         "kms-site": {
-            "tag": "version1"            
+            "tag": "version1"
             },
         "ranger-kms-security": {
-            "tag": "version1"            
+            "tag": "version1"
             },
         "ranger-kms-site": {
-            "tag": "version1"            
+            "tag": "version1"
             },
         "ranger-kms-policymgr-ssl": {
-            "tag": "version1"            
+            "tag": "version1"
             },
         "ranger-kms-audit": {
-            "tag": "version1"            
+            "tag": "version1"
         }
-    }, 
-    "roleCommand": "START", 
+    },
+    "roleCommand": "START",
     "hostLevelParams": {
-        "agent_stack_retry_on_unavailability": "false", 
-        "stack_name": "HDP", 
+        "agent_stack_retry_on_unavailability": "false",
+        "stack_name": "HDP",
         "custom_mysql_jdbc_name": "mysql-connector-java.jar",
         "previous_custom_mysql_jdbc_name": "mysql-connector-java-old.jar",
-        "host_sys_prepped": "false", 
-        "ambari_db_rca_username": "mapred", 
-        "current_version": "2.5.0.0-777", 
-        "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar", 
-        "agent_stack_retry_count": "5", 
-        "stack_version": "2.5", 
-        "jdk_name": "jdk-8u60-linux-x64.tar.gz", 
-        "ambari_db_rca_driver": "org.postgresql.Driver", 
+        "host_sys_prepped": "false",
+        "ambari_db_rca_username": "mapred",
+        "current_version": "2.5.0.0-777",
+        "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar",
+        "agent_stack_retry_count": "5",
+        "stack_version": "2.5",
+        "jdk_name": "jdk-8u60-linux-x64.tar.gz",
+        "ambari_db_rca_driver": "org.postgresql.Driver",
         "java_home": "/usr/jdk64/jdk1.7.0_45",
-        "repository_version_id": "1", 
-        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
-        "not_managed_hdfs_path_list": "[\"/tmp\"]", 
-        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
-        "java_version": "8", 
-        "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-777\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.5\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-776\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]", 
-        "package_list": "[{\"name\":\"ranger_${stack_version}-admin\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-usersync\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-tagsync\",\"condition\":\"should_install_ranger_tagsync\",\"skipUpgrade\":false},{\"name\":\"ambari-logsearch-solr-client\",\"condition\":\"should_install_logsearch_solr_client\",\"skipUpgrade\":false}]", 
-        "db_name": "ambari", 
-        "group_list": "[\"ranger\",\"hadoop\",\"users\"]", 
-        "agentCacheDir": "/var/lib/ambari-agent/cache", 
-        "ambari_db_rca_password": "mapred", 
+        "repository_version_id": "1",
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
+        "not_managed_hdfs_path_list": "[\"/tmp\"]",
+        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
+        "java_version": "8",
+        "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-777\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.5\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-776\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]",
+        "package_list": "[{\"name\":\"ranger_${stack_version}-admin\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-usersync\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-tagsync\",\"condition\":\"should_install_ranger_tagsync\",\"skipUpgrade\":false},{\"name\":\"ambari-logsearch-solr-client\",\"condition\":\"should_install_logsearch_solr_client\",\"skipUpgrade\":false}]",
+        "db_name": "ambari",
+        "group_list": "[\"ranger\",\"hadoop\",\"users\"]",
+        "agentCacheDir": "/var/lib/ambari-agent/cache",
+        "ambari_db_rca_password": "mapred",
         "jce_name": "UnlimitedJCEPolicyJDK7.zip",
-        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar", 
-        "db_driver_filename": "mysql-connector-java.jar", 
-        "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]", 
+        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
+        "db_driver_filename": "mysql-connector-java.jar",
+        "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]",
         "clientsToUpdateConfigs": "[\"*\"]"
-    }, 
+    },
     "commandParams": {
-        "service_package_folder": "common-services/RANGER/0.4.0/package", 
-        "script": "scripts/ranger_usersync.py", 
-        "hooks_folder": "HDP/2.0.6/hooks", 
-        "version": "2.5.0.0-777", 
-        "max_duration_for_retries": "0", 
-        "command_retry_enabled": "false", 
-        "command_timeout": "600", 
+        "service_package_folder": "common-services/RANGER/0.4.0/package",
+        "script": "scripts/ranger_usersync.py",
+        "hooks_folder": "HDP/2.0.6/hooks",
+        "version": "2.5.0.0-777",
+        "max_duration_for_retries": "0",
+        "command_retry_enabled": "false",
+        "command_timeout": "600",
         "script_type": "PYTHON"
-    }, 
-    "forceRefreshConfigTags": [], 
-    "stageId": 1, 
+    },
+    "forceRefreshConfigTags": [],
+    "stageId": 1,
     "clusterHostInfo": {
         "snamenode_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ambari_server_use_ssl": [
             "false"
-        ], 
+        ],
         "all_ping_ports": [
             "8670"
-        ], 
+        ],
         "ranger_tagsync_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ranger_usersync_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "all_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "slave_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "namenode_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ambari_server_port": [
             "8080"
-        ], 
+        ],
         "ranger_admin_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "all_racks": [
             "/default-rack"
-        ], 
+        ],
         "all_ipv4_ips": [
             "172.22.125.4"
-        ], 
+        ],
         "ambari_server_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "zookeeper_hosts": [
             "c6401.ambari.apache.org"
         ],
         "ranger_kms_server_hosts": [
             "c6401.ambari.apache.org"
         ]
-    }, 
+    },
     "configurations": {
         "ranger-hdfs-audit": {
             "xasecure.audit.destination.solr.zookeepers": "c6401.ambari.apache.org:2181/ranger_audits",
-            "xasecure.audit.destination.solr.urls": "", 
-            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool", 
-            "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool", 
+            "xasecure.audit.destination.solr.urls": "",
+            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool",
+            "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool",
             "xasecure.audit.destination.hdfs": "true",
-            "xasecure.audit.destination.solr": "true", 
-            "xasecure.audit.provider.summary.enabled": "false", 
-            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit", 
+            "xasecure.audit.destination.solr": "true",
+            "xasecure.audit.provider.summary.enabled": "false",
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
             "xasecure.audit.is.enabled": "true"
-        }, 
+        },
         "ssl-client": {
-            "ssl.client.truststore.reload.interval": "10000", 
-            "ssl.client.keystore.password": "bigdata", 
-            "ssl.client.truststore.type": "jks", 
-            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks", 
-            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks", 
-            "ssl.client.truststore.password": "bigdata", 
+            "ssl.client.truststore.reload.interval": "10000",
+            "ssl.client.keystore.password": "bigdata",
+            "ssl.client.truststore.type": "jks",
+            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks",
+            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks",
+            "ssl.client.truststore.password": "bigdata",
             "ssl.client.keystore.type": "jks"
-        }, 
+        },
         "ranger-admin-site": {
-            "ranger.admin.kerberos.cookie.domain": "", 
-            "ranger.kms.service.user.hdfs": "hdfs", 
-            "ranger.spnego.kerberos.principal": "", 
-            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}", 
-            "ranger.plugins.hive.serviceuser": "hive", 
-            "ranger.lookup.kerberos.keytab": "", 
-            "ranger.plugins.kms.serviceuser": "kms", 
-            "ranger.service.https.attrib.ssl.enabled": "false", 
-            "ranger.sso.browser.useragent": "Mozilla,chrome", 
-            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01", 
-            "ranger.plugins.hbase.serviceuser": "hbase", 
-            "ranger.plugins.hdfs.serviceuser": "hdfs", 
-            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}", 
-            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net", 
-            "ranger.plugins.knox.serviceuser": "knox", 
-            "ranger.ldap.base.dn": "dc=example,dc=com", 
-            "ranger.sso.publicKey": "", 
-            "ranger.admin.kerberos.cookie.path": "/", 
-            "ranger.service.https.attrib.clientAuth": "want", 
-            "ranger.jpa.jdbc.user": "{{ranger_db_user}}", 
-            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})", 
-            "ranger.ldap.group.roleattribute": "cn", 
-            "ranger.plugins.kafka.serviceuser": "kafka", 
-            "ranger.admin.kerberos.principal": "", 
-            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
+            "ranger.admin.kerberos.cookie.domain": "",
+            "ranger.kms.service.user.hdfs": "hdfs",
+            "ranger.spnego.kerberos.principal": "",
+            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}",
+            "ranger.plugins.hive.serviceuser": "hive",
+            "ranger.lookup.kerberos.keytab": "",
+            "ranger.plugins.kms.serviceuser": "kms",
+            "ranger.service.https.attrib.ssl.enabled": "false",
+            "ranger.sso.browser.useragent": "Mozilla,chrome",
+            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01",
+            "ranger.plugins.hbase.serviceuser": "hbase",
+            "ranger.plugins.hdfs.serviceuser": "hdfs",
+            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}",
+            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net",
+            "ranger.plugins.knox.serviceuser": "knox",
+            "ranger.ldap.base.dn": "dc=example,dc=com",
+            "ranger.sso.publicKey": "",
+            "ranger.admin.kerberos.cookie.path": "/",
+            "ranger.service.https.attrib.clientAuth": "want",
+            "ranger.jpa.jdbc.user": "{{ranger_db_user}}",
+            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})",
+            "ranger.ldap.group.roleattribute": "cn",
+            "ranger.plugins.kafka.serviceuser": "kafka",
+            "ranger.admin.kerberos.principal": "",
+            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
             "ranger.credential.provider.path": "/etc/ranger/admin/rangeradmin.jceks",
-            "ranger.ldap.referral": "ignore", 
-            "ranger.service.http.port": "6080", 
-            "ranger.ldap.user.searchfilter": "(uid={0})", 
-            "ranger.plugins.atlas.serviceuser": "atlas", 
-            "ranger.truststore.password": "changeit", 
-            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
-            "ranger.audit.solr.password": "NONE", 
-            "ranger.audit.solr.zookeepers": "c6401.ambari.apache.org:2181/ranger_audits", 
-            "ranger.lookup.kerberos.principal": "", 
-            "ranger.service.https.port": "6182", 
-            "ranger.plugins.storm.serviceuser": "storm", 
-            "ranger.externalurl": "{{ranger_external_url}}", 
-            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
-            "ranger.kms.service.user.hive": "", 
-            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
-            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}", 
-            "ranger.service.host": "{{ranger_host}}", 
-            "ranger.service.https.attrib.keystore.keyalias": "rangeradmin", 
-            "ranger.service.https.attrib.keystore.pass": "xasecure", 
-            "ranger.unixauth.remote.login.enabled": "true", 
-            "ranger.jpa.jdbc.credential.alias": "rangeradmin", 
-            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
-            "ranger.audit.solr.username": "ranger_solr", 
-            "ranger.sso.enabled": "false", 
-            "ranger.audit.solr.urls": "", 
-            "ranger.ldap.ad.domain": "", 
-            "ranger.plugins.yarn.serviceuser": "yarn", 
-            "ranger.audit.source.type": "solr", 
-            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
-            "ranger.ldap.url": "{{ranger_ug_ldap_url}}", 
-            "ranger.authentication.method": "UNIX", 
-            "ranger.service.http.enabled": "true", 
-            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}", 
-            "ranger.ldap.ad.referral": "ignore", 
-            "ranger.ldap.ad.base.dn": "dc=example,dc=com", 
-            "ranger.jpa.jdbc.password": "_", 
-            "ranger.spnego.kerberos.keytab": "", 
-            "ranger.sso.providerurl": "", 
-            "ranger.unixauth.service.hostname": "{{ugsync_host}}", 
-            "ranger.admin.kerberos.keytab": "", 
-            "ranger.admin.kerberos.token.valid.seconds": "30", 
-            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver", 
+            "ranger.ldap.referral": "ignore",
+            "ranger.service.http.port": "6080",
+            "ranger.ldap.user.searchfilter": "(uid={0})",
+            "ranger.plugins.atlas.serviceuser": "atlas",
+            "ranger.truststore.password": "changeit",
+            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+            "ranger.audit.solr.password": "NONE",
+            "ranger.audit.solr.zookeepers": "c6401.ambari.apache.org:2181/ranger_audits",
+            "ranger.lookup.kerberos.principal": "",
+            "ranger.service.https.port": "6182",
+            "ranger.plugins.storm.serviceuser": "storm",
+            "ranger.externalurl": "{{ranger_external_url}}",
+            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+            "ranger.kms.service.user.hive": "",
+            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}",
+            "ranger.service.host": "{{ranger_host}}",
+            "ranger.service.https.attrib.keystore.keyalias": "rangeradmin",
+            "ranger.service.https.attrib.keystore.pass": "xasecure",
+            "ranger.unixauth.remote.login.enabled": "true",
+            "ranger.jpa.jdbc.credential.alias": "rangeradmin",
+            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+            "ranger.audit.solr.username": "ranger_solr",
+            "ranger.sso.enabled": "false",
+            "ranger.audit.solr.urls": "",
+            "ranger.ldap.ad.domain": "",
+            "ranger.plugins.yarn.serviceuser": "yarn",
+            "ranger.audit.source.type": "solr",
+            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
+            "ranger.ldap.url": "{{ranger_ug_ldap_url}}",
+            "ranger.authentication.method": "UNIX",
+            "ranger.service.http.enabled": "true",
+            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}",
+            "ranger.ldap.ad.referral": "ignore",
+            "ranger.ldap.ad.base.dn": "dc=example,dc=com",
+            "ranger.jpa.jdbc.password": "_",
+            "ranger.spnego.kerberos.keytab": "",
+            "ranger.sso.providerurl": "",
+            "ranger.unixauth.service.hostname": "{{ugsync_host}}",
+            "ranger.admin.kerberos.keytab": "",
+            "ranger.admin.kerberos.token.valid.seconds": "30",
+            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver",
             "ranger.unixauth.service.port": "5151"
-        }, 
+        },
         "ranger-hdfs-policymgr-ssl": {
-            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks", 
-            "xasecure.policymgr.clientssl.truststore.password": "changeit", 
-            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}", 
-            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks", 
-            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}", 
+            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks",
+            "xasecure.policymgr.clientssl.truststore.password": "changeit",
+            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}",
+            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks",
+            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}",
             "xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
-        }, 
+        },
         "tagsync-application-properties": {
-            "atlas.kafka.entities.group.id": "ranger_entities_consumer", 
-            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181", 
+            "atlas.kafka.entities.group.id": "ranger_entities_consumer",
+            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181",
             "atlas.kafka.bootstrap.servers": "localhost:6667"
-        }, 
+        },
         "ranger-env": {
-            "ranger_solr_shards": "1", 
-            "ranger_solr_config_set": "ranger_audits", 
-            "ranger_user": "ranger", 
-            "xml_configurations_supported": "true", 
-            "ranger-atlas-plugin-enabled": "No", 
-            "ranger-hbase-plugin-enabled": "No", 
-            "ranger-yarn-plugin-enabled": "No", 
-            "bind_anonymous": "false", 
-            "ranger_admin_username": "amb_ranger_admin", 
-            "admin_password": "admin", 
-            "is_solrCloud_enabled": "true", 
-            "ranger-storm-plugin-enabled": "No", 
-            "ranger-hdfs-plugin-enabled": "No", 
-            "ranger_group": "ranger", 
-            "ranger-knox-plugin-enabled": "No", 
-            "ranger_admin_log_dir": "/var/log/ranger/admin", 
-            "ranger-kafka-plugin-enabled": "No", 
-            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306", 
-            "ranger-hive-plugin-enabled": "No", 
-            "xasecure.audit.destination.solr": "true", 
-            "ranger_pid_dir": "/var/run/ranger", 
-            "xasecure.audit.destination.hdfs": "true", 
-            "admin_username": "admin", 
-            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit", 
-            "create_db_dbuser": "true", 
-            "ranger_solr_collection_name": "ranger_audits", 
-            "ranger_admin_password": "P1!qLEQwP24KVlWY", 
+            "ranger_solr_shards": "1",
+            "ranger_solr_config_set": "ranger_audits",
+            "ranger_user": "ranger",
+            "xml_configurations_supported": "true",
+            "ranger-atlas-plugin-enabled": "No",
+            "ranger-hbase-plugin-enabled": "No",
+            "ranger-yarn-plugin-enabled": "No",
+            "bind_anonymous": "false",
+            "ranger_admin_username": "amb_ranger_admin",
+            "admin_password": "admin",
+            "is_solrCloud_enabled": "true",
+            "ranger-storm-plugin-enabled": "No",
+            "ranger-hdfs-plugin-enabled": "No",
+            "ranger_group": "ranger",
+            "ranger-knox-plugin-enabled": "No",
+            "ranger_admin_log_dir": "/var/log/ranger/admin",
+            "ranger-kafka-plugin-enabled": "No",
+            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306",
+            "ranger-hive-plugin-enabled": "No",
+            "xasecure.audit.destination.solr": "true",
+            "ranger_pid_dir": "/var/run/ranger",
+            "xasecure.audit.destination.hdfs": "true",
+            "admin_username": "admin",
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
+            "create_db_dbuser": "true",
+            "ranger_solr_collection_name": "ranger_audits",
+            "ranger_admin_password": "P1!qLEQwP24KVlWY",
             "ranger_usersync_log_dir": "/var/log/ranger/usersync"
-        }, 
+        },
         "usersync-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/usersync.
 log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n"
-        }, 
+        },
         "admin-properties": {
-            "db_user": "rangeradmin01", 
-            "DB_FLAVOR": "MYSQL", 
-            "db_password": "rangeradmin01", 
-            "db_root_user": "root", 
-            "policymgr_external_url": "http://c6401.ambari.apache.org:6080", 
-            "db_name": "ranger01", 
-            "db_host": "c6401.ambari.apache.org", 
-            "db_root_password": "vagrant", 
+            "db_user": "rangeradmin01",
+            "DB_FLAVOR": "MYSQL",
+            "db_password": "rangeradmin01",
+            "db_root_user": "root",
+            "policymgr_external_url": "http://c6401.ambari.apache.org:6080",
+            "db_name": "ranger01",
+            "db_host": "c6401.ambari.apache.org",
+            "db_root_password": "vagrant",
             "SQL_CONNECTOR_JAR": "{{driver_curl_target}}"
-        }, 
+        },
         "ranger-ugsync-site": {
-            "ranger.usersync.ldap.binddn": "", 
-            "ranger.usersync.policymgr.username": "rangerusersync", 
-            "ranger.usersync.policymanager.mockrun": "false", 
-            "ranger.usersync.group.searchbase": "", 
-            "ranger.usersync.ldap.bindalias": "testldapalias", 
-            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks", 
-            "ranger.usersync.port": "5151", 
-            "ranger.usersync.pagedresultssize": "500", 
-            "ranger.usersync.group.memberattributename": "", 
-            "ranger.usersync.kerberos.principal": "", 
-            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder", 
-            "ranger.usersync.ldap.referral": "ignore", 
-            "ranger.usersync.group.searchfilter": "", 
-            "ranger.usersync.ldap.user.objectclass": "person", 
-            "ranger.usersync.logdir": "{{usersync_log_dir}}", 
-            "ranger.usersync.ldap.user.searchfilter": "", 
-            "ranger.usersync.ldap.groupname.caseconversion": "none", 
-            "ranger.usersync.ldap.ldapbindpassword": "", 
-            "ranger.usersync.unix.minUserId": "500", 
-            "ranger.usersync.policymanager.maxrecordsperapicall": "1000", 
-            "ranger.usersync.group.nameattribute": "", 
-            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password", 
-            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks", 
-            "ranger.usersync.user.searchenabled": "false", 
-            "ranger.usersync.group.usermapsyncenabled": "true", 
-            "ranger.usersync.ldap.bindkeystore": "", 
-            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof", 
-            "ranger.usersync.kerberos.keytab": "", 
-            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe", 
-            "ranger.usersync.group.objectclass": "", 
-            "ranger.usersync.ldap.user.searchscope": "sub", 
-            "ranger.usersync.unix.password.file": "/etc/passwd", 
-            "ranger.usersync.ldap.user.nameattribute": "", 
-            "ranger.usersync.pagedresultsenabled": "true", 
-            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}", 
-            "ranger.usersync.group.search.first.enabled": "false", 
-            "ranger.usersync.group.searchenabled": "false", 
-            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder", 
-            "ranger.usersync.ssl": "true", 
-            "ranger.usersync.ldap.url": "", 
-            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org", 
-            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
-            "ranger.usersync.ldap.user.searchbase": "", 
-            "ranger.usersync.ldap.username.caseconversion": "none", 
-            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
-            "ranger.usersync.keystore.password": "UnIx529p", 
-            "ranger.usersync.unix.group.file": "/etc/group", 
-            "ranger.usersync.filesource.file": "/tmp/usergroup.txt", 
-            "ranger.usersync.group.searchscope": "", 
-            "ranger.usersync.truststore.password": "changeit", 
-            "ranger.usersync.enabled": "true", 
-            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000", 
+            "ranger.usersync.ldap.binddn": "",
+            "ranger.usersync.policymgr.username": "rangerusersync",
+            "ranger.usersync.policymanager.mockrun": "false",
+            "ranger.usersync.group.searchbase": "",
+            "ranger.usersync.ldap.bindalias": "testldapalias",
+            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks",
+            "ranger.usersync.port": "5151",
+            "ranger.usersync.pagedresultssize": "500",
+            "ranger.usersync.group.memberattributename": "",
+            "ranger.usersync.kerberos.principal": "",
+            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder",
+            "ranger.usersync.ldap.referral": "ignore",
+            "ranger.usersync.group.searchfilter": "",
+            "ranger.usersync.ldap.user.objectclass": "person",
+            "ranger.usersync.logdir": "{{usersync_log_dir}}",
+            "ranger.usersync.ldap.user.searchfilter": "",
+            "ranger.usersync.ldap.groupname.caseconversion": "none",
+            "ranger.usersync.ldap.ldapbindpassword": "",
+            "ranger.usersync.unix.minUserId": "500",
+            "ranger.usersync.policymanager.maxrecordsperapicall": "1000",
+            "ranger.usersync.group.nameattribute": "",
+            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password",
+            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks",
+            "ranger.usersync.user.searchenabled": "false",
+            "ranger.usersync.group.usermapsyncenabled": "true",
+            "ranger.usersync.ldap.bindkeystore": "",
+            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof",
+            "ranger.usersync.kerberos.keytab": "",
+            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe",
+            "ranger.usersync.group.objectclass": "",
+            "ranger.usersync.ldap.user.searchscope": "sub",
+            "ranger.usersync.unix.password.file": "/etc/passwd",
+            "ranger.usersync.ldap.user.nameattribute": "",
+            "ranger.usersync.pagedresultsenabled": "true",
+            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}",
+            "ranger.usersync.group.search.first.enabled": "false",
+            "ranger.usersync.group.searchenabled": "false",
+            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder",
+            "ranger.usersync.ssl": "true",
+            "ranger.usersync.ldap.url": "",
+            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org",
+            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+            "ranger.usersync.ldap.user.searchbase": "",
+            "ranger.usersync.ldap.username.caseconversion": "none",
+            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+            "ranger.usersync.keystore.password": "UnIx529p",
+            "ranger.usersync.unix.group.file": "/etc/group",
+            "ranger.usersync.filesource.file": "/tmp/usergroup.txt",
+            "ranger.usersync.group.searchscope": "",
+            "ranger.usersync.truststore.password": "changeit",
+            "ranger.usersync.enabled": "true",
+            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000",
             "ranger.usersync.filesource.text.delimiter": ","
-        }, 
+        },
         "hdfs-site": {
-            "dfs.namenode.checkpoint.period": "21600", 
-            "dfs.namenode.avoid.write.stale.datanode": "true", 
-            "dfs.namenode.startup.delay.block.deletion.sec": "3600", 
-            "dfs.namenode.checkpoint.txns": "1000000", 
-            "dfs.content-summary.limit": "5000", 
-            "dfs.support.append": "true", 
-            "dfs.datanode.address": "0.0.0.0:50010", 
-            "dfs.cluster.administrators": " hdfs", 
-            "dfs.namenode.audit.log.async": "true", 
-            "dfs.datanode.balance.bandwidthPerSec": "6250000", 
-            "dfs.namenode.safemode.threshold-pct": "1", 
-            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", 
-            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020", 
-            "dfs.permissions.enabled": "true", 
-            "dfs.client.read.shortcircuit": "true", 
-            "dfs.https.port": "50470", 
-            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470", 
-            "nfs.file.dump.dir": "/tmp/.hdfs-nfs", 
-            "dfs.blocksize": "134217728", 
-            "dfs.blockreport.initialDelay": "120", 
-            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode", 
-            "dfs.namenode.fslock.fair": "false", 
-            "dfs.datanode.max.transfer.threads": "4096", 
-            "dfs.heartbeat.interval": "3", 
-            "dfs.replication": "3", 
-            "dfs.namenode.handler.count": "50", 
-            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary", 
-            "fs.permissions.umask-mode": "022", 
-            "dfs.namenode.stale.datanode.interval": "30000", 
-            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
-            "dfs.datanode.failed.volumes.tolerated": "0", 
-            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data", 
-            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070", 
-            "dfs.webhdfs.enabled": "true", 
-            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding", 
-            "dfs.namenode.accesstime.precision": "0", 
-            "dfs.datanode.https.address": "0.0.0.0:50475", 
-            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
-            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090", 
-            "nfs.exports.allowed.hosts": "* rw", 
-            "dfs.datanode.http.address": "0.0.0.0:50075", 
-            "dfs.datanode.du.reserved": "33011188224", 
-            "dfs.client.read.shortcircuit.streams.cache.size": "4096", 
-            "dfs.http.policy": "HTTP_ONLY", 
-            "dfs.block.access.token.enable": "true", 
-            "dfs.client.retry.policy.enabled": "false", 
-            "dfs.namenode.name.dir.restore": "true", 
-            "dfs.permissions.superusergroup": "hdfs", 
-            "dfs.journalnode.https-address": "0.0.0.0:8481", 
-            "dfs.journalnode.http-address": "0.0.0.0:8480", 
-            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
-            "dfs.namenode.avoid.read.stale.datanode": "true", 
-            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
-            "dfs.datanode.data.dir.perm": "750", 
-            "dfs.encryption.key.provider.uri": "", 
-            "dfs.replication.max": "50", 
+            "dfs.namenode.checkpoint.period": "21600",
+            "dfs.namenode.avoid.write.stale.datanode": "true",
+            "dfs.namenode.startup.delay.block.deletion.sec": "3600",
+            "dfs.namenode.checkpoint.txns": "1000000",
+            "dfs.content-summary.limit": "5000",
+            "dfs.support.append": "true",
+            "dfs.datanode.address": "0.0.0.0:50010",
+            "dfs.cluster.administrators": " hdfs",
+            "dfs.namenode.audit.log.async": "true",
+            "dfs.datanode.balance.bandwidthPerSec": "6250000",
+            "dfs.namenode.safemode.threshold-pct": "1",
+            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
+            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020",
+            "dfs.permissions.enabled": "true",
+            "dfs.client.read.shortcircuit": "true",
+            "dfs.https.port": "50470",
+            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470",
+            "nfs.file.dump.dir": "/tmp/.hdfs-nfs",
+            "dfs.blocksize": "134217728",
+            "dfs.blockreport.initialDelay": "120",
+            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode",
+            "dfs.namenode.fslock.fair": "false",
+            "dfs.datanode.max.transfer.threads": "4096",
+            "dfs.heartbeat.interval": "3",
+            "dfs.replication": "3",
+            "dfs.namenode.handler.count": "50",
+            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary",
+            "fs.permissions.umask-mode": "022",
+            "dfs.namenode.stale.datanode.interval": "30000",
+            "dfs.datanode.ipc.address": "0.0.0.0:8010",
+            "dfs.datanode.failed.volumes.tolerated": "0",
+            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data",
+            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070",
+            "dfs.webhdfs.enabled": "true",
+            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding",
+            "dfs.namenode.accesstime.precision": "0",
+            "dfs.datanode.https.address": "0.0.0.0:50475",
+            "dfs.namenode.write.stale.datanode.ratio": "1.0f",
+            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090",
+            "nfs.exports.allowed.hosts": "* rw",
+            "dfs.datanode.http.address": "0.0.0.0:50075",
+            "dfs.datanode.du.reserved": "33011188224",
+            "dfs.client.read.shortcircuit.streams.cache.size": "4096",
+            "dfs.http.policy": "HTTP_ONLY",
+            "dfs.block.access.token.enable": "true",
+            "dfs.client.retry.policy.enabled": "false",
+            "dfs.namenode.name.dir.restore": "true",
+            "dfs.permissions.superusergroup": "hdfs",
+            "dfs.journalnode.https-address": "0.0.0.0:8481",
+            "dfs.journalnode.http-address": "0.0.0.0:8480",
+            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
+            "dfs.namenode.avoid.read.stale.datanode": "true",
+            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
+            "dfs.datanode.data.dir.perm": "750",
+            "dfs.encryption.key.provider.uri": "",
+            "dfs.replication.max": "50",
             "dfs.namenode.name.dir": "/grid/0/hadoop/hdfs/namenode"
-        }, 
+        },
         "ranger-tagsync-site": {
-            "ranger.tagsync.atlas.to.ranger.service.mapping": "", 
-            "ranger.tagsync.dest.ranger.ssl.config.filename": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks", 
-            "ranger.tagsync.source.file.check.interval.millis": "", 
-            "ranger.tagsync.logdir": "/var/log/ranger/tagsync", 
-            "ranger.tagsync.source.atlasrest.download.interval.millis": "", 
-            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks", 
-            "ranger.tagsync.source.atlasrest.endpoint": "", 
-            "ranger.tagsync.dest.ranger.username": "rangertagsync", 
-            "ranger.tagsync.kerberos.principal": "", 
-            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}", 
-            "ranger.tagsync.atlas.custom.resource.mappers": "", 
-            "ranger.tagsync.kerberos.keytab": "", 
-            "ranger.tagsync.source.atlas": "false", 
-            "ranger.tagsync.source.atlasrest": "false", 
-            "ranger.tagsync.source.file": "false", 
+            "ranger.tagsync.atlas.to.ranger.service.mapping": "",
+            "ranger.tagsync.dest.ranger.ssl.config.filename": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks",
+            "ranger.tagsync.source.file.check.interval.millis": "",
+            "ranger.tagsync.logdir": "/var/log/ranger/tagsync",
+            "ranger.tagsync.source.atlasrest.download.interval.millis": "",
+            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks",
+            "ranger.tagsync.source.atlasrest.endpoint": "",
+            "ranger.tagsync.dest.ranger.username": "rangertagsync",
+            "ranger.tagsync.kerberos.principal": "",
+            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}",
+            "ranger.tagsync.atlas.custom.resource.mappers": "",
+            "ranger.tagsync.kerberos.keytab": "",
+            "ranger.tagsync.source.atlas": "false",
+            "ranger.tagsync.source.atlasrest": "false",
+            "ranger.tagsync.source.file": "false",
             "ranger.tagsync.source.file.filename": ""
-        }, 
+        },
         "zoo.cfg": {
-            "clientPort": "2181", 
-            "autopurge.purgeInterval": "24", 
-            "syncLimit": "5", 
-            "dataDir": "/grid/0/hadoop/zookeeper", 
-            "initLimit": "10", 
-            "tickTime": "2000", 
+            "clientPort": "2181",
+            "autopurge.purgeInterval": "24",
+            "syncLimit": "5",
+            "dataDir": "/grid/0/hadoop/zookeeper",
+            "initLimit": "10",
+            "tickTime": "2000",
             "autopurge.snapRetainCount": "30"
-        }, 
+        },
         "hadoop-policy": {
-            "security.job.client.protocol.acl": "*", 
-            "security.job.task.protocol.acl": "*", 
-            "security.datanode.protocol.acl": "*", 
-            "security.namenode.protocol.acl": "*", 
-            "security.client.datanode.protocol.acl": "*", 
-            "security.inter.tracker.protocol.acl": "*", 
-            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop", 
-            "security.client.protocol.acl": "*", 
-            "security.refresh.policy.protocol.acl": "hadoop", 
-            "security.admin.operations.protocol.acl": "hadoop", 
+            "security.job.client.protocol.acl": "*",
+            "security.job.task.protocol.acl": "*",
+            "security.datanode.protocol.acl": "*",
+            "security.namenode.protocol.acl": "*",
+            "security.client.datanode.protocol.acl": "*",
+            "security.inter.tracker.protocol.acl": "*",
+            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
+            "security.client.protocol.acl": "*",
+            "security.refresh.policy.protocol.acl": "hadoop",
+            "security.admin.operations.protocol.acl": "hadoop",
             "security.inter.datanode.protocol.acl": "*"
-        }, 
+        },
         "hdfs-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.lo
 gger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender
 .console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logg
 er}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=
 false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# NameNode metrics logging.\n# The default is to retain two namenode-metrics.log files up to 64MB each.\n#\nnamenode.metrics.logger=INFO,NullAppender\nlog4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}\nlog4j.additivity.NameNodeMetricsLog=false\nlog4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log\nlog4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n\nlog4j.appender.NNMETRICSRFA.MaxBackupIndex=1\nlog4j.appender.NNMETRICSRFA.MaxFileSize=64MB\n\n#\n# mapred audit logging\n#\nmapred
 .audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging leve
 ls\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateCh
 ange=WARN"
-        }, 
+        },
         "ranger-hdfs-plugin-properties": {
-            "hadoop.rpc.protection": "authentication", 
-            "ranger-hdfs-plugin-enabled": "No", 
-            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
-            "policy_user": "ambari-qa", 
-            "common.name.for.certificate": "", 
+            "hadoop.rpc.protection": "authentication",
+            "ranger-hdfs-plugin-enabled": "No",
+            "REPOSITORY_CONFIG_USERNAME": "hadoop",
+            "policy_user": "ambari-qa",
+            "common.name.for.certificate": "",
             "REPOSITORY_CONFIG_PASSWORD": "hadoop"
-        }, 
+        },
         "core-site": {
-            "hadoop.proxyuser.root.hosts": "*", 
-            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", 
-            "fs.trash.interval": "360", 
-            "ha.failover-controller.active-standby-elector.zk.op.retries": "120", 
-            "hadoop.http.authentication.simple.anonymous.allowed": "true", 
-            "hadoop.security.authentication": "simple", 
-            "hadoop.proxyuser.root.groups": "*", 
-            "ipc.client.connection.maxidletime": "30000", 
-            "hadoop.security.key.provider.path": "", 
-            "mapreduce.jobtracker.webinterface.trusted": "false", 
-            "hadoop.security.authorization": "false", 
-            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py", 
-            "ipc.server.tcpnodelay": "true", 
-            "ipc.client.connect.max.retries": "50", 
-            "hadoop.security.auth_to_local": "DEFAULT", 
-            "io.file.buffer.size": "131072", 
-            "hadoop.proxyuser.hdfs.hosts": "*", 
-            "hadoop.proxyuser.hdfs.groups": "*", 
-            "ipc.client.idlethreshold": "8000", 
-            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020", 
+            "hadoop.proxyuser.root.hosts": "*",
+            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
+            "fs.trash.interval": "360",
+            "ha.failover-controller.active-standby-elector.zk.op.retries": "120",
+            "hadoop.http.authentication.simple.anonymous.allowed": "true",
+            "hadoop.security.authentication": "simple",
+            "hadoop.proxyuser.root.groups": "*",
+            "ipc.client.connection.maxidletime": "30000",
+            "hadoop.security.key.provider.path": "",
+            "mapreduce.jobtracker.webinterface.trusted": "false",
+            "hadoop.security.authorization": "false",
+            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py",
+            "ipc.server.tcpnodelay": "true",
+            "ipc.client.connect.max.retries": "50",
+            "hadoop.security.auth_to_local": "DEFAULT",
+            "io.file.buffer.size": "131072",
+            "hadoop.proxyuser.hdfs.hosts": "*",
+            "hadoop.proxyuser.hdfs.groups": "*",
+            "ipc.client.idlethreshold": "8000",
+            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020",
             "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec"
-        }, 
+        },
         "hadoop-env": {
-            "keyserver_port": "", 
-            "proxyuser_group": "users", 
-            "hdfs_user_nproc_limit": "65536", 
-            "hdfs_log_dir_prefix": "/var/log/hadoop", 
-            "hdfs_user_nofile_limit": "128000", 
-            "hdfs_user": "hdfs", 
-            "keyserver_host": " ", 
-            "namenode_opt_maxnewsize": "128m", 
-            "namenode_opt_maxpermsize": "256m", 
-            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
 {{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
 node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
 NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
 N_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
 ing priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n  for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n  do\n    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n  done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
 ECURE_DN_USER\" ]; then\n  ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}", 
-            "namenode_heapsize": "1024m", 
-            "namenode_opt_newsize": "128m", 
-            "nfsgateway_heapsize": "1024", 
-            "dtnode_heapsize": "1024m", 
-            "hadoop_root_logger": "INFO,RFA", 
-            "hadoop_heapsize": "1024", 
-            "hadoop_pid_dir_prefix": "/var/run/hadoop", 
-            "namenode_opt_permsize": "128m", 
+            "keyserver_port": "",
+            "proxyuser_group": "users",
+            "hdfs_user_nproc_limit": "65536",
+            "hdfs_log_dir_prefix": "/var/log/hadoop",
+            "hdfs_user_nofile_limit": "128000",
+            "hdfs_user": "hdfs",
+            "keyserver_host": " ",
+            "namenode_opt_maxnewsize": "128m",
+            "namenode_opt_maxpermsize": "256m",
+            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
 {{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
 node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
 NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
 N_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
 ing priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n  for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n  do\n    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n  done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
 ECURE_DN_USER\" ]; then\n  ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}",
+            "namenode_heapsize": "1024m",
+            "namenode_opt_newsize": "128m",
+            "nfsgateway_heapsize": "1024",
+            "dtnode_heapsize": "1024m",
+            "hadoop_root_logger": "INFO,RFA",
+            "hadoop_heapsize": "1024",
+            "hadoop_pid_dir_prefix": "/var/run/hadoop",
+            "namenode_opt_permsize": "128m",
             "hdfs_tmp_dir": "/tmp"
-        }, 
+        },
         "zookeeper-log4j": {
             "content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: console appender only\nlog4j.rootLogger=INFO, CONSOLE\n\n# Example with rolling log file\n#log4
 j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE\n\n# Example with rolling log file and tracing\n#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE\n\n#\n# Log INFO level and above messages to the console\n#\nlog4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\nlog4j.appender.CONSOLE.Threshold=INFO\nlog4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n#\n# Add ROLLINGFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender\nlog4j.appender.ROLLINGFILE.Threshold=DEBUG\nlog4j.appender.ROLLINGFILE.File=zookeeper.log\n\n# Max log file size of 10MB\nlog4j.appender.ROLLINGFILE.MaxFileSize=10MB\n# uncomment the next line to limit number of backup files\n#log4j.appender.ROLLINGFILE.MaxBackupIndex=10\n\nlog4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.ROLLIN
 GFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n\n#\n# Add TRACEFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.TRACEFILE=org.apache.log4j.FileAppender\nlog4j.appender.TRACEFILE.Threshold=TRACE\nlog4j.appender.TRACEFILE.File=zookeeper_trace.log\n\nlog4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout\n### Notice we are including log4j's NDC here (%x)\nlog4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n"
-        }, 
+        },
         "ssl-server": {
-            "ssl.server.keystore.location": "/etc/security/serverKeys/keystore.jks", 
-            "ssl.server.keystore.keypassword": "bigdata", 
-            "ssl.server.truststore.location": "/etc/security/serverKeys/all.jks", 
-            "ssl.server.keystore.password": "bigdata", 
-            "ssl.server.truststore.password": "bigdata", 
-            "ssl.server.truststore.type": "jks", 
-            "ssl.server.keystore.type": "jks", 
+            "ssl.server.keystore.location": "/etc/security/serverKeys/keystore.jks",
+            "ssl.server.keystore.keypassword": "bigdata",
+            "ssl.server.truststore.location": "/etc/security/serverKeys/all.jks",
+            "ssl.server.keystore.password": "bigdata",
+            "ssl.server.truststore.password": "bigdata",
+            "ssl.server.truststore.type": "jks",
+            "ssl.server.keystore.type": "jks",
             "ssl.server.truststore.reload.interval": "10000"
-        }, 
-        "ranger-site": {}, 
+        },
+        "ranger-site": {},
         "admin-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nlog4j.rootLogger = warn,xa_log_appender\n\n\n# xa_logger\nlog4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.xa_log_app
 ender.file=${logdir}/xa_portal.log\nlog4j.appender.xa_log_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.xa_log_appender.append=true\nlog4j.appender.xa_log_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.xa_log_appender.layout.ConversionPattern=%d [%t] %-5p %C{6} (%F:%L) - %m%n\n# xa_log_appender : category and additivity\nlog4j.category.org.springframework=warn,xa_log_appender\nlog4j.additivity.org.springframework=false\n\nlog4j.category.org.apache.ranger=info,xa_log_appender\nlog4j.additivity.org.apache.ranger=false\n\nlog4j.category.xa=info,xa_log_appender\nlog4j.additivity.xa=false\n\n# perf_logger\nlog4j.appender.perf_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.perf_appender.file=${logdir}/ranger_admin_perf.log\nlog4j.appender.perf_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.perf_appender.append=true\nlog4j.appender.perf_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.perf_appender.layout.ConversionPattern=%d [%t] 
 %m%n\n\n\n# sql_appender\nlog4j.appender.sql_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.sql_appender.file=${logdir}/xa_portal_sql.log\nlog4j.appender.sql_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.sql_appender.append=true\nlog4j.appender.sql_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.sql_appender.layout.ConversionPattern=%d [%t] %-5p %C{6} (%F:%L) - %m%n\n\n# sql_appender : category and additivity\nlog4j.category.org.hibernate.SQL=warn,sql_appender\nlog4j.additivity.org.hibernate.SQL=false\n\nlog4j.category.jdbc.sqlonly=fatal,sql_appender\nlog4j.additivity.jdbc.sqlonly=false\n\nlog4j.category.jdbc.sqltiming=warn,sql_appender\nlog4j.additivity.jdbc.sqltiming=false\n\nlog4j.category.jdbc.audit=fatal,sql_appender\nlog4j.additivity.jdbc.audit=false\n\nlog4j.category.jdbc.resultset=fatal,sql_appender\nlog4j.additivity.jdbc.resultset=false\n\nlog4j.category.jdbc.connection=fatal,sql_appender\nlog4j.additivity.jdbc.connection=false"
-        }, 
+        },
         "tagsync-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/tagsync
 .log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n"
-        }, 
+        },
         "ranger-hdfs-security": {
-            "ranger.plugin.hdfs.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient", 
-            "ranger.plugin.hdfs.service.name": "{{repo_name}}", 
-            "ranger.plugin.hdfs.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache", 
-            "ranger.plugin.hdfs.policy.pollIntervalMs": "30000", 
-            "ranger.plugin.hdfs.policy.rest.url": "{{policymgr_mgr_url}}", 
-            "ranger.plugin.hdfs.policy.rest.ssl.config.file": "/etc/hadoop/conf/ranger-policymgr-ssl.xml", 
+            "ranger.plugin.hdfs.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient",
+            "ranger.plugin.hdfs.service.name": "{{repo_name}}",
+            "ranger.plugin.hdfs.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache",
+            "ranger.plugin.hdfs.policy.pollIntervalMs": "30000",
+            "ranger.plugin.hdfs.policy.rest.url": "{{policymgr_mgr_url}}",
+            "ranger.plugin.hdfs.policy.rest.ssl.config.file": "/etc/hadoop/conf/ranger-policymgr-ssl.xml",
             "xasecure.add-hadoop-authorization": "true"
-        }, 
-        "usersync-properties": {}, 
+        },
+        "usersync-properties": {},
         "zookeeper-env": {
-            "zk_log_dir": "/var/log/zookeeper", 
-            "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}", 
-            "zk_server_heapsize": "1024m", 
-            "zk_pid_dir": "/var/run/zookeeper", 
+            "zk_log_dir": "/var/log/zookeeper",
+            "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk

<TRUNCATED>

[07/31] ambari git commit: AMBARI-21430 - Allow Multiple Versions of Stack Tools to Co-Exist (jonathanhurley)

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-default.json b/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-default.json
index a1d930c..fb77531 100644
--- a/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-default.json
+++ b/ambari-server/src/test/python/stacks/2.5/configs/ranger-admin-default.json
@@ -1,150 +1,150 @@
 {
     "localComponents": [
-        "NAMENODE", 
-        "SECONDARY_NAMENODE", 
-        "ZOOKEEPER_SERVER", 
-        "DATANODE", 
-        "HDFS_CLIENT", 
-        "ZOOKEEPER_CLIENT", 
-        "RANGER_USERSYNC", 
-        "RANGER_ADMIN", 
+        "NAMENODE",
+        "SECONDARY_NAMENODE",
+        "ZOOKEEPER_SERVER",
+        "DATANODE",
+        "HDFS_CLIENT",
+        "ZOOKEEPER_CLIENT",
+        "RANGER_USERSYNC",
+        "RANGER_ADMIN",
         "RANGER_TAGSYNC",
         "LOGSEARCH_SOLR",
         "LOGSEARCH_SOLR_CLIENT"
-    ], 
+    ],
     "configuration_attributes": {
-        "ranger-hdfs-audit": {}, 
-        "ssl-client": {}, 
-        "ranger-admin-site": {}, 
-        "ranger-hdfs-policymgr-ssl": {}, 
-        "tagsync-application-properties": {}, 
-        "ranger-env": {}, 
-        "usersync-log4j": {}, 
-        "admin-properties": {}, 
-        "ranger-ugsync-site": {}, 
+        "ranger-hdfs-audit": {},
+        "ssl-client": {},
+        "ranger-admin-site": {},
+        "ranger-hdfs-policymgr-ssl": {},
+        "tagsync-application-properties": {},
+        "ranger-env": {},
+        "usersync-log4j": {},
+        "admin-properties": {},
+        "ranger-ugsync-site": {},
         "hdfs-site": {
             "final": {
-                "dfs.datanode.data.dir": "true", 
-                "dfs.namenode.http-address": "true", 
-                "dfs.datanode.failed.volumes.tolerated": "true", 
-                "dfs.support.append": "true", 
-                "dfs.namenode.name.dir": "true", 
+                "dfs.datanode.data.dir": "true",
+                "dfs.namenode.http-address": "true",
+                "dfs.datanode.failed.volumes.tolerated": "true",
+                "dfs.support.append": "true",
+                "dfs.namenode.name.dir": "true",
                 "dfs.webhdfs.enabled": "true"
             }
-        }, 
-        "ranger-tagsync-site": {}, 
-        "zoo.cfg": {}, 
-        "hadoop-policy": {}, 
-        "hdfs-log4j": {}, 
-        "ranger-hdfs-plugin-properties": {}, 
+        },
+        "ranger-tagsync-site": {},
+        "zoo.cfg": {},
+        "hadoop-policy": {},
+        "hdfs-log4j": {},
+        "ranger-hdfs-plugin-properties": {},
         "core-site": {
             "final": {
                 "fs.defaultFS": "true"
             }
-        }, 
-        "hadoop-env": {}, 
-        "zookeeper-log4j": {}, 
-        "ssl-server": {}, 
-        "ranger-site": {}, 
-        "admin-log4j": {}, 
-        "tagsync-log4j": {}, 
-        "ranger-hdfs-security": {}, 
-        "usersync-properties": {}, 
+        },
+        "hadoop-env": {},
+        "zookeeper-log4j": {},
+        "ssl-server": {},
+        "ranger-site": {},
+        "admin-log4j": {},
+        "tagsync-log4j": {},
+        "ranger-hdfs-security": {},
+        "usersync-properties": {},
         "zookeeper-env": {},
         "infra-solr-env": {},
         "infra-solr-client-log4j": {},
         "cluster-env": {}
-    }, 
-    "public_hostname": "c6401.ambari.apache.org", 
-    "commandId": "11-0", 
-    "hostname": "c6401.ambari.apache.org", 
-    "kerberosCommandParams": [], 
-    "serviceName": "RANGER", 
-    "role": "RANGER_ADMIN", 
-    "forceRefreshConfigTagsBeforeExecution": [], 
-    "requestId": 11, 
+    },
+    "public_hostname": "c6401.ambari.apache.org",
+    "commandId": "11-0",
+    "hostname": "c6401.ambari.apache.org",
+    "kerberosCommandParams": [],
+    "serviceName": "RANGER",
+    "role": "RANGER_ADMIN",
+    "forceRefreshConfigTagsBeforeExecution": [],
+    "requestId": 11,
     "agentConfigParams": {
         "agent": {
             "parallel_execution": 0
         }
-    }, 
-    "clusterName": "c1", 
-    "commandType": "EXECUTION_COMMAND", 
-    "taskId": 31, 
-    "roleParams": {}, 
+    },
+    "clusterName": "c1",
+    "commandType": "EXECUTION_COMMAND",
+    "taskId": 31,
+    "roleParams": {},
     "configurationTags": {
         "ranger-hdfs-audit": {
             "tag": "version1466705299922"
-        }, 
+        },
         "ssl-client": {
             "tag": "version1"
-        }, 
+        },
         "ranger-admin-site": {
             "tag": "version1466705299949"
-        }, 
+        },
         "ranger-hdfs-policymgr-ssl": {
             "tag": "version1466705299922"
-        }, 
+        },
         "tagsync-application-properties": {
             "tag": "version1466705299949"
-        }, 
+        },
         "ranger-env": {
             "tag": "version1466705299949"
-        }, 
+        },
         "usersync-log4j": {
             "tag": "version1466705299949"
-        }, 
+        },
         "admin-properties": {
             "tag": "version1466705299949"
-        }, 
+        },
         "ranger-ugsync-site": {
             "tag": "version1466705299949"
-        }, 
+        },
         "hdfs-site": {
             "tag": "version1"
-        }, 
+        },
         "ranger-tagsync-site": {
             "tag": "version1466705299949"
-        }, 
+        },
         "zoo.cfg": {
             "tag": "version1"
-        }, 
+        },
         "hadoop-policy": {
             "tag": "version1"
-        }, 
+        },
         "hdfs-log4j": {
             "tag": "version1"
-        }, 
+        },
         "ranger-hdfs-plugin-properties": {
             "tag": "version1466705299922"
-        }, 
+        },
         "core-site": {
             "tag": "version1"
-        }, 
+        },
         "hadoop-env": {
             "tag": "version1"
-        }, 
+        },
         "zookeeper-log4j": {
             "tag": "version1"
-        }, 
+        },
         "ssl-server": {
             "tag": "version1"
-        }, 
+        },
         "ranger-site": {
             "tag": "version1466705299949"
-        }, 
+        },
         "admin-log4j": {
             "tag": "version1466705299949"
-        }, 
+        },
         "tagsync-log4j": {
             "tag": "version1466705299949"
-        }, 
+        },
         "ranger-hdfs-security": {
             "tag": "version1466705299922"
-        }, 
+        },
         "usersync-properties": {
             "tag": "version1466705299949"
-        }, 
+        },
         "zookeeper-env": {
             "tag": "version1"
         },
@@ -157,492 +157,492 @@
         "cluster-env": {
             "tag": "version1"
         }
-    }, 
-    "roleCommand": "START", 
+    },
+    "roleCommand": "START",
     "hostLevelParams": {
-        "agent_stack_retry_on_unavailability": "false", 
-        "stack_name": "HDP", 
-        "package_version": "2_5_0_0_*", 
+        "agent_stack_retry_on_unavailability": "false",
+        "stack_name": "HDP",
+        "package_version": "2_5_0_0_*",
         "custom_mysql_jdbc_name": "mysql-connector-java.jar",
         "previous_custom_mysql_jdbc_name": "mysql-connector-java-old.jar",
-        "host_sys_prepped": "false", 
-        "ambari_db_rca_username": "mapred", 
-        "current_version": "2.5.0.0-801", 
-        "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar", 
-        "agent_stack_retry_count": "5", 
-        "stack_version": "2.5", 
-        "jdk_name": "jdk-8u60-linux-x64.tar.gz", 
-        "ambari_db_rca_driver": "org.postgresql.Driver", 
-        "java_home": "/usr/jdk64/jdk1.7.0_45", 
-        "repository_version_id": "1", 
-        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
-        "not_managed_hdfs_path_list": "[\"/tmp\"]", 
-        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
-        "java_version": "8", 
-        "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.5\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]", 
-        "package_list": "[{\"name\":\"ranger_${stack_version}-admin\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-usersync\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-tagsync\",\"condition\":\"should_install_ranger_tagsync\",\"skipUpgrade\":false},{\"name\":\"ambari-logsearch-solr-client\",\"condition\":\"should_install_logsearch_solr_client\",\"skipUpgrade\":false}]", 
-        "db_name": "ambari", 
-        "group_list": "[\"ranger\",\"hadoop\",\"users\"]", 
-        "agentCacheDir": "/var/lib/ambari-agent/cache", 
-        "ambari_db_rca_password": "mapred", 
-        "jce_name": "jce_policy-8.zip", 
-        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar", 
-        "db_driver_filename": "mysql-connector-java.jar", 
-        "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]", 
+        "host_sys_prepped": "false",
+        "ambari_db_rca_username": "mapred",
+        "current_version": "2.5.0.0-801",
+        "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar",
+        "agent_stack_retry_count": "5",
+        "stack_version": "2.5",
+        "jdk_name": "jdk-8u60-linux-x64.tar.gz",
+        "ambari_db_rca_driver": "org.postgresql.Driver",
+        "java_home": "/usr/jdk64/jdk1.7.0_45",
+        "repository_version_id": "1",
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
+        "not_managed_hdfs_path_list": "[\"/tmp\"]",
+        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
+        "java_version": "8",
+        "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.5\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]",
+        "package_list": "[{\"name\":\"ranger_${stack_version}-admin\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-usersync\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-tagsync\",\"condition\":\"should_install_ranger_tagsync\",\"skipUpgrade\":false},{\"name\":\"ambari-logsearch-solr-client\",\"condition\":\"should_install_logsearch_solr_client\",\"skipUpgrade\":false}]",
+        "db_name": "ambari",
+        "group_list": "[\"ranger\",\"hadoop\",\"users\"]",
+        "agentCacheDir": "/var/lib/ambari-agent/cache",
+        "ambari_db_rca_password": "mapred",
+        "jce_name": "jce_policy-8.zip",
+        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
+        "db_driver_filename": "mysql-connector-java.jar",
+        "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]",
         "clientsToUpdateConfigs": "[\"*\"]"
-    }, 
+    },
     "commandParams": {
-        "service_package_folder": "common-services/RANGER/0.4.0/package", 
-        "script": "scripts/ranger_admin.py", 
-        "hooks_folder": "HDP/2.0.6/hooks", 
-        "version": "2.5.0.0-801", 
-        "max_duration_for_retries": "0", 
-        "command_retry_enabled": "false", 
-        "command_timeout": "600", 
+        "service_package_folder": "common-services/RANGER/0.4.0/package",
+        "script": "scripts/ranger_admin.py",
+        "hooks_folder": "HDP/2.0.6/hooks",
+        "version": "2.5.0.0-801",
+        "max_duration_for_retries": "0",
+        "command_retry_enabled": "false",
+        "command_timeout": "600",
         "script_type": "PYTHON"
-    }, 
-    "forceRefreshConfigTags": [], 
-    "stageId": 0, 
+    },
+    "forceRefreshConfigTags": [],
+    "stageId": 0,
     "clusterHostInfo": {
         "snamenode_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ambari_server_use_ssl": [
             "false"
-        ], 
+        ],
         "all_ping_ports": [
             "8670"
-        ], 
+        ],
         "ranger_tagsync_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ranger_usersync_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "all_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "slave_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "namenode_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "ambari_server_port": [
             "8080"
-        ], 
+        ],
         "ranger_admin_hosts": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "all_racks": [
             "/default-rack"
-        ], 
+        ],
         "all_ipv4_ips": [
             "172.22.83.73"
-        ], 
+        ],
         "ambari_server_host": [
             "c6401.ambari.apache.org"
-        ], 
+        ],
         "zookeeper_hosts": [
             "c6401.ambari.apache.org"
         ],
         "infra_solr_hosts": [
             "c6401.ambari.apache.org"
         ]
-    }, 
+    },
     "configurations": {
         "ranger-hdfs-audit": {
-            "xasecure.audit.destination.solr.zookeepers": "NONE", 
-            "xasecure.audit.destination.solr.urls": "", 
-            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool", 
-            "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool", 
+            "xasecure.audit.destination.solr.zookeepers": "NONE",
+            "xasecure.audit.destination.solr.urls": "",
+            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool",
+            "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool",
             "xasecure.audit.destination.hdfs": "true",
-            "xasecure.audit.destination.solr": "false", 
+            "xasecure.audit.destination.solr": "false",
             "xasecure.audit.provider.summary.enabled": "false",
             "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
             "xasecure.audit.is.enabled": "true"
-        }, 
+        },
         "ssl-client": {
-            "ssl.client.truststore.reload.interval": "10000", 
-            "ssl.client.keystore.password": "bigdata", 
-            "ssl.client.truststore.type": "jks", 
-            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks", 
-            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks", 
-            "ssl.client.truststore.password": "bigdata", 
+            "ssl.client.truststore.reload.interval": "10000",
+            "ssl.client.keystore.password": "bigdata",
+            "ssl.client.truststore.type": "jks",
+            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks",
+            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks",
+            "ssl.client.truststore.password": "bigdata",
             "ssl.client.keystore.type": "jks"
-        }, 
+        },
         "ranger-admin-site": {
             "ranger.admin.kerberos.cookie.domain": "",
-            "ranger.kms.service.user.hdfs": "hdfs", 
-            "ranger.spnego.kerberos.principal": "", 
-            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}", 
-            "ranger.plugins.hive.serviceuser": "hive", 
-            "ranger.lookup.kerberos.keytab": "", 
-            "ranger.plugins.kms.serviceuser": "kms", 
-            "ranger.service.https.attrib.ssl.enabled": "false", 
-            "ranger.sso.browser.useragent": "Mozilla,chrome", 
-            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01", 
-            "ranger.plugins.hbase.serviceuser": "hbase", 
-            "ranger.plugins.hdfs.serviceuser": "hdfs", 
-            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}", 
-            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net", 
-            "ranger.plugins.knox.serviceuser": "knox", 
-            "ranger.ldap.base.dn": "dc=example,dc=com", 
-            "ranger.sso.publicKey": "", 
-            "ranger.admin.kerberos.cookie.path": "/", 
-            "ranger.service.https.attrib.clientAuth": "want", 
-            "ranger.jpa.jdbc.user": "{{ranger_db_user}}", 
-            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})", 
-            "ranger.ldap.group.roleattribute": "cn", 
-            "ranger.plugins.kafka.serviceuser": "kafka", 
-            "ranger.admin.kerberos.principal": "", 
-            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
-            "ranger.credential.provider.path": "/etc/ranger/admin/rangeradmin.jceks", 
-            "ranger.ldap.referral": "ignore", 
-            "ranger.service.http.port": "6080", 
-            "ranger.ldap.user.searchfilter": "(uid={0})", 
-            "ranger.plugins.atlas.serviceuser": "atlas", 
-            "ranger.truststore.password": "changeit", 
-            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
-            "ranger.audit.solr.password": "NONE", 
+            "ranger.kms.service.user.hdfs": "hdfs",
+            "ranger.spnego.kerberos.principal": "",
+            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}",
+            "ranger.plugins.hive.serviceuser": "hive",
+            "ranger.lookup.kerberos.keytab": "",
+            "ranger.plugins.kms.serviceuser": "kms",
+            "ranger.service.https.attrib.ssl.enabled": "false",
+            "ranger.sso.browser.useragent": "Mozilla,chrome",
+            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01",
+            "ranger.plugins.hbase.serviceuser": "hbase",
+            "ranger.plugins.hdfs.serviceuser": "hdfs",
+            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}",
+            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net",
+            "ranger.plugins.knox.serviceuser": "knox",
+            "ranger.ldap.base.dn": "dc=example,dc=com",
+            "ranger.sso.publicKey": "",
+            "ranger.admin.kerberos.cookie.path": "/",
+            "ranger.service.https.attrib.clientAuth": "want",
+            "ranger.jpa.jdbc.user": "{{ranger_db_user}}",
+            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})",
+            "ranger.ldap.group.roleattribute": "cn",
+            "ranger.plugins.kafka.serviceuser": "kafka",
+            "ranger.admin.kerberos.principal": "",
+            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
+            "ranger.credential.provider.path": "/etc/ranger/admin/rangeradmin.jceks",
+            "ranger.ldap.referral": "ignore",
+            "ranger.service.http.port": "6080",
+            "ranger.ldap.user.searchfilter": "(uid={0})",
+            "ranger.plugins.atlas.serviceuser": "atlas",
+            "ranger.truststore.password": "changeit",
+            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+            "ranger.audit.solr.password": "NONE",
             "ranger.audit.solr.zookeepers": "c6401.ambari.apache.org:2181/ambari-solr",
-            "ranger.lookup.kerberos.principal": "", 
-            "ranger.service.https.port": "6182", 
-            "ranger.plugins.storm.serviceuser": "storm", 
-            "ranger.externalurl": "{{ranger_external_url}}", 
-            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
-            "ranger.kms.service.user.hive": "", 
-            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
-            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}", 
-            "ranger.service.host": "{{ranger_host}}", 
-            "ranger.service.https.attrib.keystore.keyalias": "rangeradmin", 
-            "ranger.service.https.attrib.keystore.pass": "xasecure", 
-            "ranger.unixauth.remote.login.enabled": "true", 
-            "ranger.jpa.jdbc.credential.alias": "rangeradmin", 
-            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
-            "ranger.audit.solr.username": "ranger_solr", 
-            "ranger.sso.enabled": "false", 
-            "ranger.audit.solr.urls": "", 
-            "ranger.ldap.ad.domain": "", 
-            "ranger.plugins.yarn.serviceuser": "yarn", 
-            "ranger.audit.source.type": "solr", 
-            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
-            "ranger.ldap.url": "{{ranger_ug_ldap_url}}", 
-            "ranger.authentication.method": "UNIX", 
-            "ranger.service.http.enabled": "true", 
-            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}", 
-            "ranger.ldap.ad.referral": "ignore", 
-            "ranger.ldap.ad.base.dn": "dc=example,dc=com", 
-            "ranger.jpa.jdbc.password": "_", 
-            "ranger.spnego.kerberos.keytab": "", 
-            "ranger.sso.providerurl": "", 
-            "ranger.unixauth.service.hostname": "{{ugsync_host}}", 
-            "ranger.admin.kerberos.keytab": "", 
-            "ranger.admin.kerberos.token.valid.seconds": "30", 
-            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver", 
+            "ranger.lookup.kerberos.principal": "",
+            "ranger.service.https.port": "6182",
+            "ranger.plugins.storm.serviceuser": "storm",
+            "ranger.externalurl": "{{ranger_external_url}}",
+            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+            "ranger.kms.service.user.hive": "",
+            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
+            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}",
+            "ranger.service.host": "{{ranger_host}}",
+            "ranger.service.https.attrib.keystore.keyalias": "rangeradmin",
+            "ranger.service.https.attrib.keystore.pass": "xasecure",
+            "ranger.unixauth.remote.login.enabled": "true",
+            "ranger.jpa.jdbc.credential.alias": "rangeradmin",
+            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
+            "ranger.audit.solr.username": "ranger_solr",
+            "ranger.sso.enabled": "false",
+            "ranger.audit.solr.urls": "",
+            "ranger.ldap.ad.domain": "",
+            "ranger.plugins.yarn.serviceuser": "yarn",
+            "ranger.audit.source.type": "solr",
+            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
+            "ranger.ldap.url": "{{ranger_ug_ldap_url}}",
+            "ranger.authentication.method": "UNIX",
+            "ranger.service.http.enabled": "true",
+            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}",
+            "ranger.ldap.ad.referral": "ignore",
+            "ranger.ldap.ad.base.dn": "dc=example,dc=com",
+            "ranger.jpa.jdbc.password": "_",
+            "ranger.spnego.kerberos.keytab": "",
+            "ranger.sso.providerurl": "",
+            "ranger.unixauth.service.hostname": "{{ugsync_host}}",
+            "ranger.admin.kerberos.keytab": "",
+            "ranger.admin.kerberos.token.valid.seconds": "30",
+            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver",
             "ranger.unixauth.service.port": "5151"
-        }, 
+        },
         "ranger-hdfs-policymgr-ssl": {
-            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks", 
-            "xasecure.policymgr.clientssl.truststore.password": "changeit", 
-            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}", 
-            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks", 
-            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}", 
+            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks",
+            "xasecure.policymgr.clientssl.truststore.password": "changeit",
+            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}",
+            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks",
+            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}",
             "xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
-        }, 
+        },
         "tagsync-application-properties": {
-            "atlas.kafka.entities.group.id": "ranger_entities_consumer", 
-            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181", 
+            "atlas.kafka.entities.group.id": "ranger_entities_consumer",
+            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181",
             "atlas.kafka.bootstrap.servers": "localhost:6667"
-        }, 
+        },
         "ranger-env": {
-            "ranger_solr_shards": "1", 
-            "ranger_solr_config_set": "ranger_audits", 
-            "ranger_user": "ranger", 
+            "ranger_solr_shards": "1",
+            "ranger_solr_config_set": "ranger_audits",
+            "ranger_user": "ranger",
             "ranger_solr_replication_factor": "1",
-            "xml_configurations_supported": "true", 
-            "ranger-atlas-plugin-enabled": "No", 
-            "ranger-hbase-plugin-enabled": "No", 
-            "ranger-yarn-plugin-enabled": "No", 
-            "bind_anonymous": "false", 
-            "ranger_admin_username": "amb_ranger_admin", 
-            "admin_password": "admin", 
-            "is_solrCloud_enabled": "true", 
-            "ranger-storm-plugin-enabled": "No", 
-            "ranger-hdfs-plugin-enabled": "No", 
-            "ranger_group": "ranger", 
-            "ranger-knox-plugin-enabled": "No", 
-            "ranger_admin_log_dir": "/var/log/ranger/admin", 
-            "ranger-kafka-plugin-enabled": "No", 
-            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306", 
-            "ranger-hive-plugin-enabled": "No", 
-            "xasecure.audit.destination.solr": "true", 
-            "ranger_pid_dir": "/var/run/ranger", 
-            "xasecure.audit.destination.hdfs": "true", 
-            "admin_username": "admin", 
-            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit", 
-            "create_db_dbuser": "true", 
-            "ranger_solr_collection_name": "ranger_audits", 
-            "ranger_admin_password": "P1!q9xa96SMi5NCl", 
+            "xml_configurations_supported": "true",
+            "ranger-atlas-plugin-enabled": "No",
+            "ranger-hbase-plugin-enabled": "No",
+            "ranger-yarn-plugin-enabled": "No",
+            "bind_anonymous": "false",
+            "ranger_admin_username": "amb_ranger_admin",
+            "admin_password": "admin",
+            "is_solrCloud_enabled": "true",
+            "ranger-storm-plugin-enabled": "No",
+            "ranger-hdfs-plugin-enabled": "No",
+            "ranger_group": "ranger",
+            "ranger-knox-plugin-enabled": "No",
+            "ranger_admin_log_dir": "/var/log/ranger/admin",
+            "ranger-kafka-plugin-enabled": "No",
+            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306",
+            "ranger-hive-plugin-enabled": "No",
+            "xasecure.audit.destination.solr": "true",
+            "ranger_pid_dir": "/var/run/ranger",
+            "xasecure.audit.destination.hdfs": "true",
+            "admin_username": "admin",
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
+            "create_db_dbuser": "true",
+            "ranger_solr_collection_name": "ranger_audits",
+            "ranger_admin_password": "P1!q9xa96SMi5NCl",
             "ranger_usersync_log_dir": "/var/log/ranger/usersync"
-        }, 
+        },
         "usersync-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/usersync.
 log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n"
-        }, 
+        },
         "admin-properties": {
-            "db_user": "rangeradmin01", 
-            "DB_FLAVOR": "MYSQL", 
-            "db_password": "rangeradmin01", 
-            "db_root_user": "root", 
-            "policymgr_external_url": "http://c6401.ambari.apache.org:6080", 
-            "db_name": "ranger01", 
-            "db_host": "c6401.ambari.apache.org", 
-            "db_root_password": "vagrant", 
+            "db_user": "rangeradmin01",
+            "DB_FLAVOR": "MYSQL",
+            "db_password": "rangeradmin01",
+            "db_root_user": "root",
+            "policymgr_external_url": "http://c6401.ambari.apache.org:6080",
+            "db_name": "ranger01",
+            "db_host": "c6401.ambari.apache.org",
+            "db_root_password": "vagrant",
             "SQL_CONNECTOR_JAR": "{{driver_curl_target}}"
-        }, 
+        },
         "ranger-ugsync-site": {
-            "ranger.usersync.ldap.binddn": "", 
-            "ranger.usersync.policymgr.username": "rangerusersync", 
-            "ranger.usersync.policymanager.mockrun": "false", 
-            "ranger.usersync.group.searchbase": "", 
-            "ranger.usersync.ldap.bindalias": "testldapalias", 
-            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks", 
-            "ranger.usersync.port": "5151", 
-            "ranger.usersync.pagedresultssize": "500", 
-            "ranger.usersync.group.memberattributename": "", 
-            "ranger.usersync.kerberos.principal": "", 
-            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder", 
-            "ranger.usersync.ldap.referral": "ignore", 
-            "ranger.usersync.group.searchfilter": "", 
-            "ranger.usersync.ldap.user.objectclass": "person", 
-            "ranger.usersync.logdir": "{{usersync_log_dir}}", 
-            "ranger.usersync.ldap.user.searchfilter": "", 
-            "ranger.usersync.ldap.groupname.caseconversion": "none", 
-            "ranger.usersync.ldap.ldapbindpassword": "", 
-            "ranger.usersync.unix.minUserId": "500", 
-            "ranger.usersync.policymanager.maxrecordsperapicall": "1000", 
-            "ranger.usersync.group.nameattribute": "", 
-            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password", 
-            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks", 
-            "ranger.usersync.user.searchenabled": "false", 
-            "ranger.usersync.group.usermapsyncenabled": "true", 
-            "ranger.usersync.ldap.bindkeystore": "", 
-            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof", 
-            "ranger.usersync.kerberos.keytab": "", 
-            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe", 
-            "ranger.usersync.group.objectclass": "", 
-            "ranger.usersync.ldap.user.searchscope": "sub", 
-            "ranger.usersync.unix.password.file": "/etc/passwd", 
-            "ranger.usersync.ldap.user.nameattribute": "", 
-            "ranger.usersync.pagedresultsenabled": "true", 
-            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}", 
-            "ranger.usersync.group.search.first.enabled": "false", 
-            "ranger.usersync.group.searchenabled": "false", 
-            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder", 
-            "ranger.usersync.ssl": "true", 
-            "ranger.usersync.ldap.url": "", 
-            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org", 
-            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
-            "ranger.usersync.ldap.user.searchbase": "", 
-            "ranger.usersync.ldap.username.caseconversion": "none", 
-            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
-            "ranger.usersync.keystore.password": "UnIx529p", 
-            "ranger.usersync.unix.group.file": "/etc/group", 
-            "ranger.usersync.filesource.file": "/tmp/usergroup.txt", 
-            "ranger.usersync.group.searchscope": "", 
-            "ranger.usersync.truststore.password": "changeit", 
-            "ranger.usersync.enabled": "true", 
-            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000", 
+            "ranger.usersync.ldap.binddn": "",
+            "ranger.usersync.policymgr.username": "rangerusersync",
+            "ranger.usersync.policymanager.mockrun": "false",
+            "ranger.usersync.group.searchbase": "",
+            "ranger.usersync.ldap.bindalias": "testldapalias",
+            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks",
+            "ranger.usersync.port": "5151",
+            "ranger.usersync.pagedresultssize": "500",
+            "ranger.usersync.group.memberattributename": "",
+            "ranger.usersync.kerberos.principal": "",
+            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder",
+            "ranger.usersync.ldap.referral": "ignore",
+            "ranger.usersync.group.searchfilter": "",
+            "ranger.usersync.ldap.user.objectclass": "person",
+            "ranger.usersync.logdir": "{{usersync_log_dir}}",
+            "ranger.usersync.ldap.user.searchfilter": "",
+            "ranger.usersync.ldap.groupname.caseconversion": "none",
+            "ranger.usersync.ldap.ldapbindpassword": "",
+            "ranger.usersync.unix.minUserId": "500",
+            "ranger.usersync.policymanager.maxrecordsperapicall": "1000",
+            "ranger.usersync.group.nameattribute": "",
+            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password",
+            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks",
+            "ranger.usersync.user.searchenabled": "false",
+            "ranger.usersync.group.usermapsyncenabled": "true",
+            "ranger.usersync.ldap.bindkeystore": "",
+            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof",
+            "ranger.usersync.kerberos.keytab": "",
+            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe",
+            "ranger.usersync.group.objectclass": "",
+            "ranger.usersync.ldap.user.searchscope": "sub",
+            "ranger.usersync.unix.password.file": "/etc/passwd",
+            "ranger.usersync.ldap.user.nameattribute": "",
+            "ranger.usersync.pagedresultsenabled": "true",
+            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}",
+            "ranger.usersync.group.search.first.enabled": "false",
+            "ranger.usersync.group.searchenabled": "false",
+            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder",
+            "ranger.usersync.ssl": "true",
+            "ranger.usersync.ldap.url": "",
+            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org",
+            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+            "ranger.usersync.ldap.user.searchbase": "",
+            "ranger.usersync.ldap.username.caseconversion": "none",
+            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+            "ranger.usersync.keystore.password": "UnIx529p",
+            "ranger.usersync.unix.group.file": "/etc/group",
+            "ranger.usersync.filesource.file": "/tmp/usergroup.txt",
+            "ranger.usersync.group.searchscope": "",
+            "ranger.usersync.truststore.password": "changeit",
+            "ranger.usersync.enabled": "true",
+            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000",
             "ranger.usersync.filesource.text.delimiter": ","
-        }, 
+        },
         "hdfs-site": {
-            "dfs.namenode.checkpoint.period": "21600", 
-            "dfs.namenode.avoid.write.stale.datanode": "true", 
-            "dfs.namenode.startup.delay.block.deletion.sec": "3600", 
-            "dfs.namenode.checkpoint.txns": "1000000", 
-            "dfs.content-summary.limit": "5000", 
-            "dfs.support.append": "true", 
-            "dfs.datanode.address": "0.0.0.0:50010", 
-            "dfs.cluster.administrators": " hdfs", 
-            "dfs.namenode.audit.log.async": "true", 
-            "dfs.datanode.balance.bandwidthPerSec": "6250000", 
-            "dfs.namenode.safemode.threshold-pct": "1", 
-            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", 
-            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020", 
-            "dfs.permissions.enabled": "true", 
-            "dfs.client.read.shortcircuit": "true", 
-            "dfs.https.port": "50470", 
-            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470", 
-            "nfs.file.dump.dir": "/tmp/.hdfs-nfs", 
-            "dfs.blocksize": "134217728", 
-            "dfs.blockreport.initialDelay": "120", 
-            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode", 
-            "dfs.namenode.fslock.fair": "false", 
-            "dfs.datanode.max.transfer.threads": "4096", 
-            "dfs.heartbeat.interval": "3", 
-            "dfs.replication": "3", 
-            "dfs.namenode.handler.count": "50", 
-            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary", 
-            "fs.permissions.umask-mode": "022", 
-            "dfs.namenode.stale.datanode.interval": "30000", 
-            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
-            "dfs.datanode.failed.volumes.tolerated": "0", 
-            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data", 
-            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070", 
-            "dfs.webhdfs.enabled": "true", 
-            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding", 
-            "dfs.namenode.accesstime.precision": "0", 
-            "dfs.datanode.https.address": "0.0.0.0:50475", 
-            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
-            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090", 
-            "nfs.exports.allowed.hosts": "* rw", 
-            "dfs.datanode.http.address": "0.0.0.0:50075", 
-            "dfs.datanode.du.reserved": "33011188224", 
-            "dfs.client.read.shortcircuit.streams.cache.size": "4096", 
-            "dfs.http.policy": "HTTP_ONLY", 
-            "dfs.block.access.token.enable": "true", 
-            "dfs.client.retry.policy.enabled": "false", 
-            "dfs.namenode.name.dir.restore": "true", 
-            "dfs.permissions.superusergroup": "hdfs", 
-            "dfs.journalnode.https-address": "0.0.0.0:8481", 
-            "dfs.journalnode.http-address": "0.0.0.0:8480", 
-            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
-            "dfs.namenode.avoid.read.stale.datanode": "true", 
-            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
-            "dfs.datanode.data.dir.perm": "750", 
-            "dfs.encryption.key.provider.uri": "", 
-            "dfs.replication.max": "50", 
+            "dfs.namenode.checkpoint.period": "21600",
+            "dfs.namenode.avoid.write.stale.datanode": "true",
+            "dfs.namenode.startup.delay.block.deletion.sec": "3600",
+            "dfs.namenode.checkpoint.txns": "1000000",
+            "dfs.content-summary.limit": "5000",
+            "dfs.support.append": "true",
+            "dfs.datanode.address": "0.0.0.0:50010",
+            "dfs.cluster.administrators": " hdfs",
+            "dfs.namenode.audit.log.async": "true",
+            "dfs.datanode.balance.bandwidthPerSec": "6250000",
+            "dfs.namenode.safemode.threshold-pct": "1",
+            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
+            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020",
+            "dfs.permissions.enabled": "true",
+            "dfs.client.read.shortcircuit": "true",
+            "dfs.https.port": "50470",
+            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470",
+            "nfs.file.dump.dir": "/tmp/.hdfs-nfs",
+            "dfs.blocksize": "134217728",
+            "dfs.blockreport.initialDelay": "120",
+            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode",
+            "dfs.namenode.fslock.fair": "false",
+            "dfs.datanode.max.transfer.threads": "4096",
+            "dfs.heartbeat.interval": "3",
+            "dfs.replication": "3",
+            "dfs.namenode.handler.count": "50",
+            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary",
+            "fs.permissions.umask-mode": "022",
+            "dfs.namenode.stale.datanode.interval": "30000",
+            "dfs.datanode.ipc.address": "0.0.0.0:8010",
+            "dfs.datanode.failed.volumes.tolerated": "0",
+            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data",
+            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070",
+            "dfs.webhdfs.enabled": "true",
+            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding",
+            "dfs.namenode.accesstime.precision": "0",
+            "dfs.datanode.https.address": "0.0.0.0:50475",
+            "dfs.namenode.write.stale.datanode.ratio": "1.0f",
+            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090",
+            "nfs.exports.allowed.hosts": "* rw",
+            "dfs.datanode.http.address": "0.0.0.0:50075",
+            "dfs.datanode.du.reserved": "33011188224",
+            "dfs.client.read.shortcircuit.streams.cache.size": "4096",
+            "dfs.http.policy": "HTTP_ONLY",
+            "dfs.block.access.token.enable": "true",
+            "dfs.client.retry.policy.enabled": "false",
+            "dfs.namenode.name.dir.restore": "true",
+            "dfs.permissions.superusergroup": "hdfs",
+            "dfs.journalnode.https-address": "0.0.0.0:8481",
+            "dfs.journalnode.http-address": "0.0.0.0:8480",
+            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
+            "dfs.namenode.avoid.read.stale.datanode": "true",
+            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
+            "dfs.datanode.data.dir.perm": "750",
+            "dfs.encryption.key.provider.uri": "",
+            "dfs.replication.max": "50",
             "dfs.namenode.name.dir": "/grid/0/hadoop/hdfs/namenode"
-        }, 
+        },
         "ranger-tagsync-site": {
-            "ranger.tagsync.dest.ranger.ssl.config.filename": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks", 
-            "ranger.tagsync.source.atlasrest.username": "", 
-            "ranger.tagsync.logdir": "/var/log/ranger/tagsync", 
-            "ranger.tagsync.source.atlasrest.download.interval.millis": "", 
-            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks", 
-            "ranger.tagsync.source.file.check.interval.millis": "", 
-            "ranger.tagsync.source.atlasrest.endpoint": "", 
-            "ranger.tagsync.dest.ranger.username": "rangertagsync", 
-            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}", 
-            "ranger.tagsync.kerberos.principal": "", 
-            "ranger.tagsync.kerberos.keytab": "", 
-            "ranger.tagsync.source.atlas": "false", 
-            "ranger.tagsync.source.atlasrest": "false", 
-            "ranger.tagsync.source.file": "false", 
+            "ranger.tagsync.dest.ranger.ssl.config.filename": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks",
+            "ranger.tagsync.source.atlasrest.username": "",
+            "ranger.tagsync.logdir": "/var/log/ranger/tagsync",
+            "ranger.tagsync.source.atlasrest.download.interval.millis": "",
+            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks",
+            "ranger.tagsync.source.file.check.interval.millis": "",
+            "ranger.tagsync.source.atlasrest.endpoint": "",
+            "ranger.tagsync.dest.ranger.username": "rangertagsync",
+            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}",
+            "ranger.tagsync.kerberos.principal": "",
+            "ranger.tagsync.kerberos.keytab": "",
+            "ranger.tagsync.source.atlas": "false",
+            "ranger.tagsync.source.atlasrest": "false",
+            "ranger.tagsync.source.file": "false",
             "ranger.tagsync.source.file.filename": ""
-        }, 
+        },
         "zoo.cfg": {
-            "clientPort": "2181", 
-            "autopurge.purgeInterval": "24", 
-            "syncLimit": "5", 
-            "dataDir": "/grid/0/hadoop/zookeeper", 
-            "initLimit": "10", 
-            "tickTime": "2000", 
+            "clientPort": "2181",
+            "autopurge.purgeInterval": "24",
+            "syncLimit": "5",
+            "dataDir": "/grid/0/hadoop/zookeeper",
+            "initLimit": "10",
+            "tickTime": "2000",
             "autopurge.snapRetainCount": "30"
-        }, 
+        },
         "hadoop-policy": {
-            "security.job.client.protocol.acl": "*", 
-            "security.job.task.protocol.acl": "*", 
-            "security.datanode.protocol.acl": "*", 
-            "security.namenode.protocol.acl": "*", 
-            "security.client.datanode.protocol.acl": "*", 
-            "security.inter.tracker.protocol.acl": "*", 
-            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop", 
-            "security.client.protocol.acl": "*", 
-            "security.refresh.policy.protocol.acl": "hadoop", 
-            "security.admin.operations.protocol.acl": "hadoop", 
+            "security.job.client.protocol.acl": "*",
+            "security.job.task.protocol.acl": "*",
+            "security.datanode.protocol.acl": "*",
+            "security.namenode.protocol.acl": "*",
+            "security.client.datanode.protocol.acl": "*",
+            "security.inter.tracker.protocol.acl": "*",
+            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
+            "security.client.protocol.acl": "*",
+            "security.refresh.policy.protocol.acl": "hadoop",
+            "security.admin.operations.protocol.acl": "hadoop",
             "security.inter.datanode.protocol.acl": "*"
-        }, 
+        },
         "hdfs-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.lo
 gger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender
 .console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logg
 er}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=
 false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# NameNode metrics logging.\n# The default is to retain two namenode-metrics.log files up to 64MB each.\n#\nnamenode.metrics.logger=INFO,NullAppender\nlog4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}\nlog4j.additivity.NameNodeMetricsLog=false\nlog4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log\nlog4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n\nlog4j.appender.NNMETRICSRFA.MaxBackupIndex=1\nlog4j.appender.NNMETRICSRFA.MaxFileSize=64MB\n\n#\n# mapred audit logging\n#\nmapred
 .audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging leve
 ls\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateCh
 ange=WARN"
-        }, 
+        },
         "ranger-hdfs-plugin-properties": {
-            "hadoop.rpc.protection": "authentication", 
-            "ranger-hdfs-plugin-enabled": "No", 
-            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
-            "policy_user": "ambari-qa", 
-            "common.name.for.certificate": "", 
+            "hadoop.rpc.protection": "authentication",
+            "ranger-hdfs-plugin-enabled": "No",
+            "REPOSITORY_CONFIG_USERNAME": "hadoop",
+            "policy_user": "ambari-qa",
+            "common.name.for.certificate": "",
             "REPOSITORY_CONFIG_PASSWORD": "hadoop"
-        }, 
+        },
         "core-site": {
-            "hadoop.proxyuser.root.hosts": "c6401.ambari.apache.org", 
-            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", 
-            "fs.trash.interval": "360", 
-            "ha.failover-controller.active-standby-elector.zk.op.retries": "120", 
-            "hadoop.http.authentication.simple.anonymous.allowed": "true", 
-            "hadoop.security.authentication": "simple", 
-            "hadoop.proxyuser.root.groups": "*", 
-            "ipc.client.connection.maxidletime": "30000", 
-            "hadoop.security.key.provider.path": "", 
-            "mapreduce.jobtracker.webinterface.trusted": "false", 
-            "hadoop.security.authorization": "false", 
-            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py", 
-            "ipc.server.tcpnodelay": "true", 
-            "ipc.client.connect.max.retries": "50", 
-            "hadoop.security.auth_to_local": "DEFAULT", 
-            "io.file.buffer.size": "131072", 
-            "hadoop.proxyuser.hdfs.hosts": "*", 
-            "hadoop.proxyuser.hdfs.groups": "*", 
-            "ipc.client.idlethreshold": "8000", 
-            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020", 
+            "hadoop.proxyuser.root.hosts": "c6401.ambari.apache.org",
+            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
+            "fs.trash.interval": "360",
+            "ha.failover-controller.active-standby-elector.zk.op.retries": "120",
+            "hadoop.http.authentication.simple.anonymous.allowed": "true",
+            "hadoop.security.authentication": "simple",
+            "hadoop.proxyuser.root.groups": "*",
+            "ipc.client.connection.maxidletime": "30000",
+            "hadoop.security.key.provider.path": "",
+            "mapreduce.jobtracker.webinterface.trusted": "false",
+            "hadoop.security.authorization": "false",
+            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py",
+            "ipc.server.tcpnodelay": "true",
+            "ipc.client.connect.max.retries": "50",
+            "hadoop.security.auth_to_local": "DEFAULT",
+            "io.file.buffer.size": "131072",
+            "hadoop.proxyuser.hdfs.hosts": "*",
+            "hadoop.proxyuser.hdfs.groups": "*",
+            "ipc.client.idlethreshold": "8000",
+            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020",
             "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec"
-        }, 
+        },
         "hadoop-env": {
-            "keyserver_port": "", 
-            "proxyuser_group": "users", 
-            "hdfs_user_nproc_limit": "65536", 
-            "hdfs_log_dir_prefix": "/var/log/hadoop", 
-            "hdfs_user_nofile_limit": "128000", 
-            "hdfs_user": "hdfs", 
-            "keyserver_host": " ", 
-            "namenode_opt_maxnewsize": "128m", 
-            "namenode_opt_maxpermsize": "256m", 
-            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
 {{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
 node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
 NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
 N_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
 ing priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n  for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n  do\n    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n  done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
 ECURE_DN_USER\" ]; then\n  ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}", 
-            "namenode_heapsize": "1024m", 
-            "namenode_opt_newsize": "128m", 
-            "nfsgateway_heapsize": "1024", 
-            "dtnode_heapsize": "1024m", 
-            "hadoop_root_logger": "INFO,RFA", 
-            "hadoop_heapsize": "1024", 
-            "hadoop_pid_dir_prefix": "/var/run/hadoop", 
-            "namenode_opt_permsize": "128m", 
+            "keyserver_port": "",
+            "proxyuser_group": "users",
+            "hdfs_user_nproc_limit": "65536",
+            "hdfs_log_dir_prefix": "/var/log/hadoop",
+            "hdfs_user_nofile_limit": "128000",
+            "hdfs_user": "hdfs",
+            "keyserver_host": " ",
+            "namenode_opt_maxnewsize": "128m",
+            "namenode_opt_maxpermsize": "256m",
+            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
 {{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
 node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
 NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
 N_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
 ing priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n  for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n  do\n    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n  done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
 ECURE_DN_USER\" ]; then\n  ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}",
+            "namenode_heapsize": "1024m",
+            "namenode_opt_newsize": "128m",
+            "nfsgateway_heapsize": "1024",
+            "dtnode_heapsize": "1024m",
+            "hadoop_root_logger": "INFO,RFA",
+            "hadoop_heapsize": "1024",
+            "hadoop_pid_dir_prefix": "/var/run/hadoop",
+            "namenode_opt_permsize": "128m",
             "hdfs_tmp_dir": "/tmp"
-        }, 
+        },
         "zookeeper-log4j": {
             "content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: console appender only\nlog4j.rootLogger=INFO, CONSOLE\n\n# Example with rolling log file\n#log4
 j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE\n\n# Example with rolling log file and tracing\n#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE\n\n#\n# Log INFO level and above messages to the console\n#\nlog4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\nlog4j.appender.CONSOLE.Threshold=INFO\nlog4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n#\n# Add ROLLINGFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender\nlog4j.appender.ROLLINGFILE.Threshold=DEBUG\nlog4j.appender.ROLLINGFILE.File=zookeeper.log\n\n# Max log file size of 10MB\nlog4j.appender.ROLLINGFILE.MaxFileSize=10MB\n# uncomment the next line to limit number of backup files\n#log4j.appender.ROLLINGFILE.MaxBackupIndex=10\n\nlog4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.ROLLIN
 GFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n\n#\n# Add TRACEFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.TRACEFILE=org.apache.log4j.FileAppender\nlog4j.appender.TRACEFILE.Threshold=TRACE\nlog4j.appender.TRACEFILE.File=zookeeper_trace.log\n\nlog4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout\n### Notice we are including log4j's NDC here (%x)\nlog4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n"
-        }, 
+        },
         "ssl-server": {
-            "ssl.server.keystore.location": "/etc/security/serverKeys/keystore.jks", 
-            "ssl.server.keystore.keypassword": "bigdata", 
-            "ssl.server.truststore.location": "/etc/security/serverKeys/all.jks", 
-            "ssl.server.keystore.password": "bigdata", 
-            "ssl.server.truststore.password": "bigdata", 
-            "ssl.server.truststore.type": "jks", 
-            "ssl.server.keystore.type": "jks", 
+            "ssl.server.keystore.location": "/etc/security/serverKeys/keystore.jks",
+            "ssl.server.keystore.keypassword": "bigdata",
+            "ssl.server.truststore.location": "/etc/security/serverKeys/all.jks",
+            "ssl.server.keystore.password": "bigdata",
+            "ssl.server.truststore.password": "bigdata",
+            "ssl.server.truststore.type": "jks",
+            "ssl.server.keystore.type": "jks",
             "ssl.server.truststore.reload.interval": "10000"
-        }, 
-        "ranger-site": {}, 
+        },
+        "ranger-site": {},
         "admin-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nlog4j.rootLogger = warn,xa_log_appender\n\n\n# xa_logger\nlog4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.xa_log_app
 ender.file=${logdir}/xa_portal.log\nlog4j.appender.xa_log_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.xa_log_appender.append=true\nlog4j.appender.xa_log_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.xa_log_appender.layout.ConversionPattern=%d [%t] %-5p %C{6} (%F:%L) - %m%n\n# xa_log_appender : category and additivity\nlog4j.category.org.springframework=warn,xa_log_appender\nlog4j.additivity.org.springframework=false\n\nlog4j.category.org.apache.ranger=info,xa_log_appender\nlog4j.additivity.org.apache.ranger=false\n\nlog4j.category.xa=info,xa_log_appender\nlog4j.additivity.xa=false\n\n# perf_logger\nlog4j.appender.perf_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.perf_appender.file=${logdir}/ranger_admin_perf.log\nlog4j.appender.perf_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.perf_appender.append=true\nlog4j.appender.perf_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.perf_appender.layout.ConversionPattern=%d [%t] 
 %m%n\n\n\n# sql_appender\nlog4j.appender.sql_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.sql_appender.file=${logdir}/xa_portal_sql.log\nlog4j.appender.sql_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.sql_appender.append=true\nlog4j.appender.sql_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.sql_appender.layout.ConversionPattern=%d [%t] %-5p %C{6} (%F:%L) - %m%n\n\n# sql_appender : category and additivity\nlog4j.category.org.hibernate.SQL=warn,sql_appender\nlog4j.additivity.org.hibernate.SQL=false\n\nlog4j.category.jdbc.sqlonly=fatal,sql_appender\nlog4j.additivity.jdbc.sqlonly=false\n\nlog4j.category.jdbc.sqltiming=warn,sql_appender\nlog4j.additivity.jdbc.sqltiming=false\n\nlog4j.category.jdbc.audit=fatal,sql_appender\nlog4j.additivity.jdbc.audit=false\n\nlog4j.category.jdbc.resultset=fatal,sql_appender\nlog4j.additivity.jdbc.resultset=false\n\nlog4j.category.jdbc.connection=fatal,sql_appender\nlog4j.additivity.jdbc.connection=false"
-        }, 
+        },
         "tagsync-log4j": {
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/tagsync
 .log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n"
-        }, 
+        },
         "ranger-hdfs-security": {
-            "ranger.plugin.hdfs.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient", 
-            "ranger.plugin.hdfs.service.name": "{{repo_name}}", 
-            "ranger.plugin.hdfs.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache", 
-            "ranger.plugin.hdfs.policy.pollIntervalMs": "30000", 
-            "ranger.plugin.hdfs.policy.rest.url": "{{policymgr_mgr_url}}", 
-            "ranger.plugin.hdfs.policy.rest.ssl.config.file": "/etc/hadoop/conf/ranger-policymgr-ssl.xml", 
+            "ranger.plugin.hdfs.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient",
+            "ranger.plugin.hdfs.service.name": "{{repo_name}}",
+            "ranger.plugin.hdfs.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache",
+            "ranger.plugin.hdfs.policy.pollIntervalMs": "30000",
+            "ranger.plugin.hdfs.policy.rest.url": "{{policymgr_mgr_url}}",
+            "ranger.plugin.hdfs.policy.rest.ssl.config.file": "/etc/hadoop/conf/ranger-policymgr-ssl.xml",
             "xasecure.add-hadoop-authorization": "true"
-        }, 
-        "usersync-properties": {}, 
+        },
+        "usersync-properties": {},
         "zookeeper-env": {
-            "zk_log_dir": "/var/log/zookeeper", 
-            "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}", 
-            "zk_server_heapsize": "1024m", 
-            "zk_pid_dir": "/var/run/zookeeper", 
+            "zk_log_dir": "/var/log/zookeeper",
+            "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}",
+            "zk_server_heapsize": "1024m",
+            "zk_pid_dir": "/var/run/zookeeper",
             "zk_user": "zookeeper"
         },
         "infra-solr-env": {
@@ -651,7 +651,7 @@
             "infra_solr_kerberos_name_rules": "DEFAULT",
             "infra_solr_user": "infra-solr",
             "infra_solr_maxmem": "1024",
-            "content": "#!/bin/bash\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LI

<TRUNCATED>

[11/31] ambari git commit: AMBARI-20950. HdfsResource can not handle S3 URL when hbase.rootdir is set to S3 URL (aonishuk)

Posted by ab...@apache.org.
AMBARI-20950. HdfsResource can not handle S3 URL when hbase.rootdir is set to S3 URL (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7f3d3b21
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7f3d3b21
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7f3d3b21

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 7f3d3b21a961581678cb7c072ec71e5eb15d7da9
Parents: d0f7a51
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Mon Jul 10 12:58:10 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Jul 10 12:58:10 2017 +0300

----------------------------------------------------------------------
 .../HBASE/0.96.0.2.0/package/scripts/hbase.py           | 12 +++++++-----
 .../HBASE/0.96.0.2.0/package/scripts/params_linux.py    |  3 +++
 2 files changed, 10 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/7f3d3b21/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py
index 8ad802e..cec6b2a 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py
@@ -17,6 +17,7 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
+from urlparse import urlparse
 import os
 import sys
 from resource_management.libraries.script.script import Script
@@ -200,11 +201,12 @@ def hbase(name=None):
       owner=params.hbase_user
     )
   if name == "master":
-    params.HdfsResource(params.hbase_hdfs_root_dir,
-                         type="directory",
-                         action="create_on_execute",
-                         owner=params.hbase_user
-    )
+    if not params.hbase_hdfs_root_dir_protocol or params.hbase_hdfs_root_dir_protocol == urlparse(params.default_fs).scheme:
+      params.HdfsResource(params.hbase_hdfs_root_dir,
+                           type="directory",
+                           action="create_on_execute",
+                           owner=params.hbase_user
+      )
     params.HdfsResource(params.hbase_staging_dir,
                          type="directory",
                          action="create_on_execute",

http://git-wip-us.apache.org/repos/asf/ambari/blob/7f3d3b21/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
index d45aea6..e05da06 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
@@ -17,6 +17,8 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
+from urlparse import urlparse
+
 import status_params
 import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
 
@@ -237,6 +239,7 @@ else:
 hbase_env_sh_template = config['configurations']['hbase-env']['content']
 
 hbase_hdfs_root_dir = config['configurations']['hbase-site']['hbase.rootdir']
+hbase_hdfs_root_dir_protocol = urlparse(hbase_hdfs_root_dir).scheme
 hbase_staging_dir = "/apps/hbase/staging"
 #for create_hdfs_directory
 hostname = config["hostname"]


[27/31] ambari git commit: AMBARI-21471. ATS going down due to missing org.apache.spark.deploy.history.yarn.plugin.SparkATSPlugin (smohanty)

Posted by ab...@apache.org.
AMBARI-21471. ATS going down due to missing org.apache.spark.deploy.history.yarn.plugin.SparkATSPlugin (smohanty)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/853a5d4a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/853a5d4a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/853a5d4a

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 853a5d4a2eda1afb5ee4578cf99d0757abc5f95d
Parents: eb1adcb
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Thu Jul 13 22:35:28 2017 -0700
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Thu Jul 13 22:38:40 2017 -0700

----------------------------------------------------------------------
 .../stacks/HDP/2.5/services/stack_advisor.py    |   5 +-
 .../src/main/resources/stacks/stack_advisor.py  |  19 ++-
 .../stacks/2.5/common/test_stack_advisor.py     | 150 +++++++++++--------
 3 files changed, 105 insertions(+), 69 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/853a5d4a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
index 3337e8e..4ca74ee 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
@@ -19,6 +19,7 @@ limitations under the License.
 
 import math
 
+
 from ambari_commons.str_utils import string_set_equals
 from resource_management.core.exceptions import Fail
 from resource_management.libraries.functions.get_bare_principal import get_bare_principal
@@ -774,9 +775,7 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
         self.checkAndStopLlapQueue(services, configurations, LLAP_QUEUE_NAME)
 
     putYarnSiteProperty = self.putProperty(configurations, "yarn-site", services)
-    stack_root = "/usr/hdp"
-    if cluster_env and "stack_root" in cluster_env:
-      stack_root = cluster_env["stack_root"]
+    stack_root = self.getStackRoot(services)
 
     timeline_plugin_classes_values = []
     timeline_plugin_classpath_values = []

http://git-wip-us.apache.org/repos/asf/ambari/blob/853a5d4a/ambari-server/src/main/resources/stacks/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/stack_advisor.py b/ambari-server/src/main/resources/stacks/stack_advisor.py
index 8e08d82..67f7fe0 100644
--- a/ambari-server/src/main/resources/stacks/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/stack_advisor.py
@@ -25,6 +25,7 @@ import re
 import socket
 import string
 import traceback
+import json
 import sys
 import logging
 from math import ceil, floor
@@ -34,7 +35,6 @@ from urlparse import urlparse
 from resource_management.libraries.functions.data_structure_utils import get_from_dict
 from resource_management.core.exceptions import Fail
 
-
 class StackAdvisor(object):
   """
   Abstract class implemented by all stack advisors. Stack advisors advise on stack specific questions. 
@@ -2006,6 +2006,23 @@ class DefaultStackAdvisor(StackAdvisor):
 
     return mount_points
 
+  def getStackRoot(self, services):
+    """
+    Gets the stack root associated with the stack
+    :param services: the services structure containing the current configurations
+    :return: the stack root as specified in the config or /usr/hdp
+    """
+    cluster_env = self.getServicesSiteProperties(services, "cluster-env")
+    stack_root = "/usr/hdp"
+    if cluster_env and "stack_root" in cluster_env:
+      stack_root_as_str = cluster_env["stack_root"]
+      stack_roots = json.loads(stack_root_as_str)
+      stack_name = cluster_env["stack_name"]
+      if stack_name in stack_roots:
+        stack_root = stack_roots[stack_name]
+
+    return stack_root
+
   def isSecurityEnabled(self, services):
     """
     Determines if security is enabled by testing the value of cluster-env/security enabled.

http://git-wip-us.apache.org/repos/asf/ambari/blob/853a5d4a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
index 50f527d..bf0cbec 100644
--- a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
@@ -825,70 +825,80 @@ class TestHDP25StackAdvisor(TestCase):
 
     services = {
       "services": [{
-        "StackServices": {
-          "service_name": "YARN",
-        },
-        "Versions": {
-          "stack_version": "2.5"
-        },
-        "components": [
-          {
-            "StackServiceComponents": {
-              "component_name": "NODEMANAGER",
-              "hostnames": ["c6401.ambari.apache.org"]
-            }
-          }
-        ]
-      }, {
-        "href": "/api/v1/stacks/HDP/versions/2.5/services/HIVE",
-        "StackServices": {
-          "service_name": "HIVE",
-          "service_version": "1.2.1.2.5",
-          "stack_name": "HDP",
-          "stack_version": "2.5"
-        },
-        "components": [
-          {
-            "href": "/api/v1/stacks/HDP/versions/2.5/services/HIVE/components/HIVE_SERVER_INTERACTIVE",
-            "StackServiceComponents": {
-              "advertise_version": "true",
-              "bulk_commands_display_name": "",
-              "bulk_commands_master_component_name": "",
-              "cardinality": "0-1",
-              "component_category": "MASTER",
-              "component_name": "HIVE_SERVER_INTERACTIVE",
-              "custom_commands": ["RESTART_LLAP"],
-              "decommission_allowed": "false",
-              "display_name": "HiveServer2 Interactive",
-              "has_bulk_commands_definition": "false",
-              "is_client": "false",
-              "is_master": "true",
-              "reassign_allowed": "false",
-              "recovery_enabled": "false",
-              "service_name": "HIVE",
-              "stack_name": "HDP",
-              "stack_version": "2.5",
-              "hostnames": ["c6401.ambari.apache.org"]
-            },
-            "dependencies": []
-          },
-          {
-            "StackServiceComponents": {
-              "advertise_version": "true",
-              "cardinality": "1+",
-              "component_category": "SLAVE",
-              "component_name": "NODEMANAGER",
-              "display_name": "NodeManager",
-              "is_client": "false",
-              "is_master": "false",
-              "hostnames": [
-                "c6403.ambari.apache.org"
-              ]
-            },
-            "dependencies": []
-          },
-        ]
-      }
+                     "StackServices": {
+                       "service_name": "TEZ"
+                     }
+                   },
+                   {
+                     "StackServices": {
+                       "service_name": "SPARK"
+                     }
+                   },
+                   {
+                     "StackServices": {
+                       "service_name": "YARN",
+                     },
+                     "Versions": {
+                       "stack_version": "2.5"
+                     },
+                     "components": [
+                       {
+                         "StackServiceComponents": {
+                           "component_name": "NODEMANAGER",
+                           "hostnames": ["c6401.ambari.apache.org"]
+                         }
+                       }
+                     ]
+                   }, {
+                     "href": "/api/v1/stacks/HDP/versions/2.5/services/HIVE",
+                     "StackServices": {
+                       "service_name": "HIVE",
+                       "service_version": "1.2.1.2.5",
+                       "stack_name": "HDP",
+                       "stack_version": "2.5"
+                     },
+                     "components": [
+                       {
+                         "href": "/api/v1/stacks/HDP/versions/2.5/services/HIVE/components/HIVE_SERVER_INTERACTIVE",
+                         "StackServiceComponents": {
+                           "advertise_version": "true",
+                           "bulk_commands_display_name": "",
+                           "bulk_commands_master_component_name": "",
+                           "cardinality": "0-1",
+                           "component_category": "MASTER",
+                           "component_name": "HIVE_SERVER_INTERACTIVE",
+                           "custom_commands": ["RESTART_LLAP"],
+                           "decommission_allowed": "false",
+                           "display_name": "HiveServer2 Interactive",
+                           "has_bulk_commands_definition": "false",
+                           "is_client": "false",
+                           "is_master": "true",
+                           "reassign_allowed": "false",
+                           "recovery_enabled": "false",
+                           "service_name": "HIVE",
+                           "stack_name": "HDP",
+                           "stack_version": "2.5",
+                           "hostnames": ["c6401.ambari.apache.org"]
+                         },
+                         "dependencies": []
+                       },
+                       {
+                         "StackServiceComponents": {
+                           "advertise_version": "true",
+                           "cardinality": "1+",
+                           "component_category": "SLAVE",
+                           "component_name": "NODEMANAGER",
+                           "display_name": "NodeManager",
+                           "is_client": "false",
+                           "is_master": "false",
+                           "hostnames": [
+                             "c6403.ambari.apache.org"
+                           ]
+                         },
+                         "dependencies": []
+                       },
+                     ]
+                   }
       ],
       "changed-configurations": [
         {
@@ -898,6 +908,12 @@ class TestHDP25StackAdvisor(TestCase):
         }
       ],
       "configurations": {
+        "cluster-env": {
+          "properties": {
+            "stack_root": "{\"HDP\":\"/usr/hdp\"}",
+            "stack_name": "HDP"
+          },
+        },
         "capacity-scheduler": {
           "properties": {
             "capacity-scheduler": 'yarn.scheduler.capacity.root.default.maximum-capacity=60\n'
@@ -960,7 +976,8 @@ class TestHDP25StackAdvisor(TestCase):
             "tez.am.resource.memory.mb": "341"
           }
         }
-      }
+      },
+      "ambari-server-properties": {"ambari-server.user":"ambari_user"}
     }
 
     clusterData = {
@@ -990,6 +1007,9 @@ class TestHDP25StackAdvisor(TestCase):
 
     self.assertEquals(configurations['hive-interactive-site']['properties']['hive.server2.tez.default.queues'], 'default')
     self.assertEquals(configurations['hive-interactive-site']['properties']['hive.llap.daemon.queue.name'], 'default')
+    self.assertEquals(configurations['yarn-site']['properties']['yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes'],
+                      'org.apache.tez.dag.history.logging.ats.TimelineCachePluginImpl,org.apache.spark.deploy.history.yarn.plugin.SparkATSPlugin')
+    self.assertEquals(configurations['yarn-site']['properties']['yarn.timeline-service.entity-group-fs-store.group-id-plugin-classpath'], '/usr/hdp/${hdp.version}/spark/hdpLib/*')
     self.assertTrue('hive-interactive-env' not in configurations)
     self.assertTrue('property_attributes' not in configurations)
 


[26/31] ambari git commit: AMBARI-21454. hive20 and wfmanager views fails to build with issue missing module babel-plugin-transform-es2015-block-scoping (Vijay Kumar via via smohanty)

Posted by ab...@apache.org.
AMBARI-21454. hive20 and wfmanager views fails to build with issue missing module babel-plugin-transform-es2015-block-scoping (Vijay Kumar via via smohanty)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/eb1adcbf
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/eb1adcbf
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/eb1adcbf

Branch: refs/heads/branch-feature-logsearch-ui
Commit: eb1adcbff32fb9440f288ccaddc997297eb8e4fb
Parents: f27f3af
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Wed Jul 12 16:30:49 2017 -0700
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Wed Jul 12 16:30:49 2017 -0700

----------------------------------------------------------------------
 contrib/views/hive20/src/main/resources/ui/package.json    | 1 +
 contrib/views/wfmanager/src/main/resources/ui/package.json | 1 +
 2 files changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/eb1adcbf/contrib/views/hive20/src/main/resources/ui/package.json
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/package.json b/contrib/views/hive20/src/main/resources/ui/package.json
index a409111..eea8cf9 100644
--- a/contrib/views/hive20/src/main/resources/ui/package.json
+++ b/contrib/views/hive20/src/main/resources/ui/package.json
@@ -24,6 +24,7 @@
     "bootstrap-daterangepicker": "2.1.24",
     "bower": "^1.7.9",
     "broccoli-asset-rev": "^2.4.2",
+    "babel-plugin-transform-es2015-block-scoping": "^6.24.1",
     "ember-ajax": "^2.0.1",
     "ember-cli": "2.7.0",
     "ember-cli-app-version": "^1.0.0",

http://git-wip-us.apache.org/repos/asf/ambari/blob/eb1adcbf/contrib/views/wfmanager/src/main/resources/ui/package.json
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/package.json b/contrib/views/wfmanager/src/main/resources/ui/package.json
index 25ed6c1..69f43c8 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/package.json
+++ b/contrib/views/wfmanager/src/main/resources/ui/package.json
@@ -21,6 +21,7 @@
   "devDependencies": {
     "bower": "^1.7.7",
     "broccoli-asset-rev": "^2.2.0",
+    "babel-plugin-transform-es2015-block-scoping": "^6.24.1",
     "ember-ajax": "0.7.1",
     "ember-cli": "2.3.0",
     "ember-cli-app-version": "^1.0.0",


[14/31] ambari git commit: AMBARI-21423 Add REST end point for the documentation of the Log Feeder shipper properties (mgergely)

Posted by ab...@apache.org.
AMBARI-21423 Add REST end point for the documentation of the Log Feeder shipper properties (mgergely)

Change-Id: If6d1b66c3a1f74b118ae60a7edc26624d49fb7e6


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/15dd999f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/15dd999f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/15dd999f

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 15dd999fff99fb80bc65ddfc94513e890a6efdef
Parents: c088289
Author: Miklos Gergely <mg...@hortonworks.com>
Authored: Mon Jul 10 14:51:23 2017 +0200
Committer: Miklos Gergely <mg...@hortonworks.com>
Committed: Mon Jul 10 14:51:23 2017 +0200

----------------------------------------------------------------------
 .../api/ShipperConfigElementDescription.java    |  59 +++++++++++
 .../api/ShipperConfigTypeDescription.java       |  44 ++++++++
 .../model/inputconfig/impl/ConditionsImpl.java  |  13 +++
 .../model/inputconfig/impl/FieldsImpl.java      |  14 +++
 .../inputconfig/impl/FilterDescriptorImpl.java  |  51 ++++++++++
 .../impl/FilterGrokDescriptorImpl.java          |  24 +++++
 .../impl/FilterKeyValueDescriptorImpl.java      |  28 +++++
 .../model/inputconfig/impl/InputConfigImpl.java |  18 ++++
 .../inputconfig/impl/InputDescriptorImpl.java   | 101 +++++++++++++++++++
 .../impl/InputFileBaseDescriptorImpl.java       |  27 +++++
 .../impl/InputS3FileDescriptorImpl.java         |  16 +++
 .../impl/MapAnonymizeDescriptorImpl.java        |  21 +++-
 .../inputconfig/impl/MapDateDescriptorImpl.java |  20 +++-
 .../impl/MapFieldCopyDescriptorImpl.java        |  14 ++-
 .../impl/MapFieldDescriptorImpl.java            |  33 ++++++
 .../impl/MapFieldNameDescriptorImpl.java        |  14 ++-
 .../impl/MapFieldValueDescriptorImpl.java       |  20 +++-
 .../inputconfig/impl/PostMapValuesAdapter.java  |   2 +-
 .../ambari-logsearch-logfeeder/docs/filter.md   |   4 +-
 .../ambari-logsearch-logfeeder/docs/input.md    |  10 +-
 .../docs/postMapValues.md                       |   2 +-
 .../ambari/logfeeder/filter/FilterJSONTest.java |  12 ++-
 .../common/ShipperConfigDescriptionStorage.java |  67 ++++++++++++
 .../ambari/logsearch/doc/DocConstants.java      |   1 +
 .../ambari/logsearch/manager/InfoManager.java   |   9 ++
 .../response/ShipperConfigDescriptionData.java  |  52 ++++++++++
 .../ambari/logsearch/rest/InfoResource.java     |  10 ++
 27 files changed, 667 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/ShipperConfigElementDescription.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/ShipperConfigElementDescription.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/ShipperConfigElementDescription.java
new file mode 100644
index 0000000..d65bf8e
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/ShipperConfigElementDescription.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.logsearch.config.api;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * Marker for the shipper configuration properties.
+ * Can be used to generate documentation about the shipper configs.
+ */
+@Retention(RetentionPolicy.RUNTIME)
+@Target({ElementType.FIELD})
+public @interface ShipperConfigElementDescription {
+
+  /**
+   * The path of the json element.
+   */
+  String path();
+
+  /**
+   * The type of the json element.
+   */
+  String type();
+
+  /**
+   * Describe what the json element is used for.
+   */
+  String description();
+
+  /**
+   * An example value for the element, if applicable.
+   */
+  String[] examples() default {};
+
+  /**
+   * Default value of the json element, if applicable.
+   */
+  String defaultValue() default "";
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/ShipperConfigTypeDescription.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/ShipperConfigTypeDescription.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/ShipperConfigTypeDescription.java
new file mode 100644
index 0000000..1c112d8
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/ShipperConfigTypeDescription.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.logsearch.config.api;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * Marker for the shipper configuration types.
+ * Can be used to generate documentation about the shipper configs.
+ */
+@Retention(RetentionPolicy.RUNTIME)
+@Target({ElementType.TYPE})
+public @interface ShipperConfigTypeDescription {
+
+  /**
+   * The name of the element type.
+   */
+  String name();
+
+  /**
+   * The description of the json element.
+   */
+  String description();
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/ConditionsImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/ConditionsImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/ConditionsImpl.java
index 8bbff8f..2ba472c 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/ConditionsImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/ConditionsImpl.java
@@ -19,11 +19,24 @@
 
 package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
 
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
 import org.apache.ambari.logsearch.config.api.model.inputconfig.Conditions;
 
 import com.google.gson.annotations.Expose;
 
+@ShipperConfigTypeDescription(
+  name = "Conditions",
+  description = "Describes the conditions that should be met in order to match a filter to an input element.\n" +
+                "\n" +
+                "It has the following attributes:"
+)
 public class ConditionsImpl implements Conditions {
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/conditions/fields",
+    type = "json object",
+    description = "The fields in the input element of which's value should be met."
+  )
   @Expose
   private FieldsImpl fields;
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FieldsImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FieldsImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FieldsImpl.java
index 68cd0e2..32a0348 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FieldsImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FieldsImpl.java
@@ -21,11 +21,25 @@ package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
 
 import java.util.Set;
 
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
 import org.apache.ambari.logsearch.config.api.model.inputconfig.Fields;
 
 import com.google.gson.annotations.Expose;
 
+@ShipperConfigTypeDescription(
+    name = "Fields",
+    description = "Describes a the fields which's value should be met in order to match a filter to an input element.\n" +
+                  "\n" +
+                  "It has the following attributes:"
+  )
 public class FieldsImpl implements Fields {
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/conditions/fields/type",
+    type = "list of strings",
+    description = "The acceptable values for the type field in the input element.",
+    examples = {"ambari_server", "\"spark_jobhistory_server\", \"spark_thriftserver\", \"livy_server\""}
+  )
   @Expose
   private Set<String> type;
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterDescriptorImpl.java
index 4e11715..eb9d38c 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterDescriptorImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterDescriptorImpl.java
@@ -22,35 +22,86 @@ package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
 import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterDescriptor;
 import org.apache.ambari.logsearch.config.api.model.inputconfig.PostMapValues;
 
 import com.google.gson.annotations.Expose;
 import com.google.gson.annotations.SerializedName;
 
+@ShipperConfigTypeDescription(
+  name = "Filter",
+  description = "The filter element in the [input configuration](inputConfig.md) contains a list of filter descriptions, each describing one filter applied on an input.\n" +
+                "\n" +
+                "The general elements in the json are the following:"
+)
 public abstract class FilterDescriptorImpl implements FilterDescriptor {
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/filter",
+    type = "string",
+    description = "The type of the filter.",
+    examples = {"grok", "keyvalue", "json"}
+  )
   @Expose
   private String filter;
 
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/conditions",
+    type = "json object",
+    description = "The conditions of which input to filter."
+  )
   @Expose
   private ConditionsImpl conditions;
 
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/sort_order",
+    type = "integer",
+    description = "Describes the order in which the filters should be applied.",
+    examples = {"1", "3"}
+  )
   @Expose
   @SerializedName("sort_order")
   private Integer sortOrder;
 
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/source_field",
+    type = "integer",
+    description = "The source of the filter, must be set for keyvalue filters.",
+    examples = {"field_further_to_filter"},
+    defaultValue = "log_message"
+  )
   @Expose
   @SerializedName("source_field")
   private String sourceField;
 
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/remove_source_field",
+    type = "boolean",
+    description = "Remove the source field after the filter is applied.",
+    examples = {"true", "false"},
+    defaultValue = "false"
+  )
   @Expose
   @SerializedName("remove_source_field")
   private Boolean removeSourceField;
 
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/post_map_values",
+    type = "dictionary string to list of json objects",
+    description = "Mappings done after the filtering provided it's result."
+  )
   @Expose
   @SerializedName("post_map_values")
   private Map<String, List<PostMapValuesImpl>> postMapValues;
 
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/is_enabled",
+    type = "boolean",
+    description = "A flag to show if the filter should be used.",
+    examples = {"true", "false"},
+    defaultValue = "true"
+  )
   @Expose
   @SerializedName("is_enabled")
   private Boolean isEnabled;

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterGrokDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterGrokDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterGrokDescriptorImpl.java
index 995f76b..e140df0 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterGrokDescriptorImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterGrokDescriptorImpl.java
@@ -19,20 +19,44 @@
 
 package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
 
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
 import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterGrokDescriptor;
 
 import com.google.gson.annotations.Expose;
 import com.google.gson.annotations.SerializedName;
 
+@ShipperConfigTypeDescription(
+  name = "Grok Filter",
+  description = "Grok filters have the following additional parameters:"
+)
 public class FilterGrokDescriptorImpl extends FilterDescriptorImpl implements FilterGrokDescriptor {
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/log4j_format",
+    type = "string",
+    description = "The log4j pattern of the log, not used, it is only there for documentation.",
+    examples = {"%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n"}
+  )
   @Expose
   @SerializedName("log4j_format")
   private String log4jFormat;
 
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/multiline_pattern",
+    type = "string",
+    description = "The grok pattern that shows that the line is not a log line on it's own but the part of a multi line entry.",
+    examples = {"^(%{TIMESTAMP_ISO8601:logtime})"}
+  )
   @Expose
   @SerializedName("multiline_pattern")
   private String multilinePattern;
 
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/message_pattern",
+    type = "string",
+    description = "The grok pattern to use to parse the log entry.",
+    examples = {"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}-%{SPACE}%{LOGLEVEL:level}%{SPACE}\\[%{DATA:thread_name}\\@%{INT:line_number}\\]%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}"}
+  )
   @Expose
   @SerializedName("message_pattern")
   private String messagePattern;

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterKeyValueDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterKeyValueDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterKeyValueDescriptorImpl.java
index 8e89990..1c782c5 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterKeyValueDescriptorImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterKeyValueDescriptorImpl.java
@@ -19,20 +19,48 @@
 
 package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
 
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
 import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterKeyValueDescriptor;
 
 import com.google.gson.annotations.Expose;
 import com.google.gson.annotations.SerializedName;
 
+@ShipperConfigTypeDescription(
+    name = "Key-value Filter",
+    description = "value_borders is only used if it is specified, and value_split is not.\n" +
+                  "\n" +
+                  "Key-value filters have the following additional parameters:"
+)
 public class FilterKeyValueDescriptorImpl extends FilterDescriptorImpl implements FilterKeyValueDescriptor {
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/field_split",
+    type = "string",
+    description = "The string that splits the key-value pairs.",
+    examples = {" ", ","},
+    defaultValue = "\\t"
+  )
   @Expose
   @SerializedName("field_split")
   private String fieldSplit;
 
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/value_split",
+    type = "string",
+    description = "The string that separates keys from values.",
+    examples = {":", "->"},
+    defaultValue = "="
+  )
   @Expose
   @SerializedName("value_split")
   private String valueSplit;
 
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/value_borders",
+    type = "string",
+    description = "The borders around the value, must be 2 characters long, first before it, second after it.",
+    examples = {"()", "[]", "{}"}
+  )
   @Expose
   @SerializedName("value_borders")
   private String valueBorders;

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputConfigImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputConfigImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputConfigImpl.java
index a4eba8e..6ce634f 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputConfigImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputConfigImpl.java
@@ -21,16 +21,34 @@ package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
 
 import java.util.List;
 
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
 import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterDescriptor;
 import org.apache.ambari.logsearch.config.api.model.inputconfig.InputConfig;
 import org.apache.ambari.logsearch.config.api.model.inputconfig.InputDescriptor;
 
 import com.google.gson.annotations.Expose;
 
+@ShipperConfigTypeDescription(
+  name = "Input Config",
+  description = "The input configurations are stored in json files. Each of them are describing the processing of the log files of a service.\n" +
+                "\n" +
+                "The json contains two elements:"
+)
 public class InputConfigImpl implements InputConfig {
+  @ShipperConfigElementDescription(
+    path = "/input",
+    type = "list of json objects",
+    description = "A list of input descriptions"
+  )
   @Expose
   private List<InputDescriptorImpl> input;
 
+  @ShipperConfigElementDescription(
+    path = "/filter",
+    type = "list of json objects",
+    description = "A list of filter descriptions"
+  )
   @Expose
   private List<FilterDescriptorImpl> filter;
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputDescriptorImpl.java
index 54b4b9b..cec16c8 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputDescriptorImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputDescriptorImpl.java
@@ -21,59 +21,160 @@ package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
 
 import java.util.Map;
 
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
 import org.apache.ambari.logsearch.config.api.model.inputconfig.InputDescriptor;
 
 import com.google.gson.annotations.Expose;
 import com.google.gson.annotations.SerializedName;
 
+@ShipperConfigTypeDescription(
+  name = "Input",
+  description = "The input element in the input configuration contains a list of input descriptions, each describing one source of input.\n" +
+                "\n" +
+                "The general elements in the json are the following:"
+)
 public abstract class InputDescriptorImpl implements InputDescriptor {
+  @ShipperConfigElementDescription(
+    path = "/input/[]/type",
+    type = "string",
+    description = "The log id for this source.",
+    examples = {"zookeeper", "ambari_server"}
+  )
   @Expose
   private String type;
 
+  @ShipperConfigElementDescription(
+    path = "/input/[]/rowtype",
+    type = "string",
+    description = "The type of the row.",
+    examples = {"service", "audit"}
+  )
   @Expose
   private String rowtype;
 
+  @ShipperConfigElementDescription(
+    path = "/input/[]/path",
+    type = "string",
+    description = "The path of the source, may contain '*' characters too.",
+    examples = {"/var/log/ambari-logsearch-logfeeder/logsearch-logfeeder.json", "/var/log/zookeeper/zookeeper*.log"}
+  )
   @Expose
   private String path;
 
+  @ShipperConfigElementDescription(
+    path = "/input/[]/add_fields",
+    type = "dictionary",
+    description = "The element contains field_name: field_value pairs which will be added to each rows data.",
+    examples = {"\"cluster\":\"cluster_name\""}
+  )
   @Expose
   @SerializedName("add_fields")
   private Map<String, String> addFields;
   
+  @ShipperConfigElementDescription(
+    path = "/input/[]/source",
+    type = "dictionary",
+    description = "The type of the input source.",
+    examples = {"file", "s3_file"}
+  )
   @Expose
   private String source;
   
+  @ShipperConfigElementDescription(
+    path = "/input/[]/tail",
+    type = "boolean",
+    description = "The input should check for only the latest file matching the pattern, not all of them.",
+    examples = {"true", "false"},
+    defaultValue = "true"
+  )
   @Expose
   private Boolean tail;
   
+  @ShipperConfigElementDescription(
+    path = "/input/[]/gen_event_md5",
+    type = "boolean",
+    description = "Generate an event_md5 field for each row by creating a hash of the row data.",
+    examples = {"true", "false"},
+    defaultValue = "true"
+  )
   @Expose
   @SerializedName("gen_event_md5")
   private Boolean genEventMd5;
   
+  @ShipperConfigElementDescription(
+    path = "/input/[]/use_event_md5_as_id",
+    type = "boolean",
+    description = "Generate an id for each row by creating a hash of the row data.",
+    examples = {"true", "false"},
+    defaultValue = "false"
+  )
   @Expose
   @SerializedName("use_event_md5_as_id")
   private Boolean useEventMd5AsId;
 
+  @ShipperConfigElementDescription(
+    path = "/input/[]/cache_enabled",
+    type = "boolean",
+    description = "Allows the input to use a cache to filter out duplications.",
+    examples = {"true", "false"},
+    defaultValue = "false"
+  )
   @Expose
   @SerializedName("cache_enabled")
   private Boolean cacheEnabled;
 
+  @ShipperConfigElementDescription(
+    path = "/input/[]/cache_key_field",
+    type = "string",
+    description = "Specifies the field for which to use the cache to find duplications of.",
+    examples = {"some_field_prone_to_repeating_value"},
+    defaultValue = "log_message"
+  )
   @Expose
   @SerializedName("cache_key_field")
   private String cacheKeyField;
 
+  @ShipperConfigElementDescription(
+    path = "/input/[]/cache_last_dedup_enabled",
+    type = "boolean",
+    description = "Allow to filter out entries which are same as the most recent one irrelevant of it's time.",
+    examples = {"true", "false"},
+    defaultValue = "false"
+  )
   @Expose
   @SerializedName("cache_last_dedup_enabled")
   private Boolean cacheLastDedupEnabled;
 
+  @ShipperConfigElementDescription(
+    path = "/input/[]/cache_size",
+    type = "integer",
+    description = "The number of entries to store in the cache.",
+    examples = {"50"},
+    defaultValue = "100"
+  )
   @Expose
   @SerializedName("cache_size")
   private Integer cacheSize;
 
+  @ShipperConfigElementDescription(
+    path = "/input/[]/cache_dedup_interval",
+    type = "integer",
+    description = "The maximum interval in ms which may pass between two identical log messages to filter the latter out.",
+    examples = {"500"},
+    defaultValue = "1000"
+  )
   @Expose
   @SerializedName("cache_dedup_interval")
   private Long cacheDedupInterval;
 
+  @ShipperConfigElementDescription(
+    path = "/input/[]/is_enabled",
+    type = "boolean",
+    description = "A flag to show if the input should be used.",
+    examples = {"true", "false"},
+    defaultValue = "true"
+  )
   @Expose
   @SerializedName("is_enabled")
   private Boolean isEnabled;

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputFileBaseDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputFileBaseDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputFileBaseDescriptorImpl.java
index 51c7ec8..8281daa 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputFileBaseDescriptorImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputFileBaseDescriptorImpl.java
@@ -19,20 +19,47 @@
 
 package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
 
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
 import org.apache.ambari.logsearch.config.api.model.inputconfig.InputFileBaseDescriptor;
 
 import com.google.gson.annotations.Expose;
 import com.google.gson.annotations.SerializedName;
 
+@ShipperConfigTypeDescription(
+  name = "File Input",
+  description = "File inputs have some additional parameters:"
+)
 public class InputFileBaseDescriptorImpl extends InputDescriptorImpl implements InputFileBaseDescriptor {
+  @ShipperConfigElementDescription(
+    path = "/input/[]/checkpoint_interval_ms",
+    type = "integer",
+    description = "The time interval in ms when the checkpoint file should be updated.",
+    examples = {"10000"},
+    defaultValue = "5000"
+  )
   @Expose
   @SerializedName("checkpoint_interval_ms")
   private Integer checkpointIntervalMs;
 
+  @ShipperConfigElementDescription(
+    path = "/input/[]/process_file",
+    type = "boolean",
+    description = "Should the file be processed.",
+    examples = {"true", "false"},
+    defaultValue = "true"
+  )
   @Expose
   @SerializedName("process_file")
   private Boolean processFile;
 
+  @ShipperConfigElementDescription(
+    path = "/input/[]/copy_file",
+    type = "boolean",
+    description = "Should the file be copied (only if not processed).",
+    examples = {"true", "false"},
+    defaultValue = "false"
+  )
   @Expose
   @SerializedName("copy_file")
   private Boolean copyFile;

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputS3FileDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputS3FileDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputS3FileDescriptorImpl.java
index 277a57c..19f52d3 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputS3FileDescriptorImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputS3FileDescriptorImpl.java
@@ -19,16 +19,32 @@
 
 package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
 
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
 import org.apache.ambari.logsearch.config.api.model.inputconfig.InputS3FileDescriptor;
 
 import com.google.gson.annotations.Expose;
 import com.google.gson.annotations.SerializedName;
 
+@ShipperConfigTypeDescription(
+  name = "S3 File Input",
+  description = "S3 file inputs have the following parameters in addition to the general file parameters:"
+)
 public class InputS3FileDescriptorImpl extends InputFileBaseDescriptorImpl implements InputS3FileDescriptor {
+  @ShipperConfigElementDescription(
+    path = "/input/[]/s3_access_key",
+    type = "string",
+    description = "The access key used for AWS credentials."
+  )
   @Expose
   @SerializedName("s3_access_key")
   private String s3AccessKey;
 
+  @ShipperConfigElementDescription(
+    path = "/input/[]/s3_secret_key",
+    type = "string",
+    description = "The secret key used for AWS credentials."
+  )
   @Expose
   @SerializedName("s3_secret_key")
   private String s3SecretKey;

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapAnonymizeDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapAnonymizeDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapAnonymizeDescriptorImpl.java
index 5fdbbab..8c128de 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapAnonymizeDescriptorImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapAnonymizeDescriptorImpl.java
@@ -19,20 +19,39 @@
 
 package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
 
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
 import org.apache.ambari.logsearch.config.api.model.inputconfig.MapAnonymizeDescriptor;
 
 import com.google.gson.annotations.Expose;
 import com.google.gson.annotations.SerializedName;
 
-public class MapAnonymizeDescriptorImpl implements MapAnonymizeDescriptor {
+@ShipperConfigTypeDescription(
+    name = "Map Anonymize",
+    description = "The name of the mapping element should be map_anonymize. The value json element should contain the following parameter:"
+)
+public class MapAnonymizeDescriptorImpl extends MapFieldDescriptorImpl implements MapAnonymizeDescriptor {
   @Override
   public String getJsonName() {
     return "map_anonymize";
   }
 
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/post_map_values/{field_name}/[]/map_anonymize/pattern",
+    type = "string",
+    description = "The pattern to use to identify parts to anonymize. The parts to hide should be marked with the \"<hide>\" string.",
+    examples = {"Some secret is here: <hide>, and another one is here: <hide>"}
+  )
   @Expose
   private String pattern;
 
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/post_map_values/{field_name}/[]/map_anonymize/hide_char",
+    type = "string",
+    description = "The character to hide with",
+    defaultValue = "*",
+    examples = {"X", "-"}
+  )
   @Expose
   @SerializedName("hide_char")
   private Character hideChar;

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapDateDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapDateDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapDateDescriptorImpl.java
index 2e54e7a..feec4b6 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapDateDescriptorImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapDateDescriptorImpl.java
@@ -19,21 +19,39 @@
 
 package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
 
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
 import org.apache.ambari.logsearch.config.api.model.inputconfig.MapDateDescriptor;
 
 import com.google.gson.annotations.Expose;
 import com.google.gson.annotations.SerializedName;
 
-public class MapDateDescriptorImpl implements MapDateDescriptor {
+@ShipperConfigTypeDescription(
+    name = "Map Date",
+    description = "The name of the mapping element should be map_date. The value json element may contain the following parameters:"
+)
+public class MapDateDescriptorImpl extends MapFieldDescriptorImpl implements MapDateDescriptor {
   @Override
   public String getJsonName() {
     return "map_date";
   }
 
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/post_map_values/{field_name}/[]/map_date/src_date_pattern",
+    type = "string",
+    description = "If it is specified than the mapper converts from this format to the target, and also adds missing year",
+    examples = {"MMM dd HH:mm:ss"}
+  )
   @Expose
   @SerializedName("src_date_pattern")
   private String sourceDatePattern;
 
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/post_map_values/{field_name}/[]/map_date/target_date_pattern",
+    type = "string",
+    description = "If 'epoch' then the field is parsed as seconds from 1970, otherwise the content used as pattern",
+    examples = {"yyyy-MM-dd HH:mm:ss,SSS", "epoch"}
+  )
   @Expose
   @SerializedName("target_date_pattern")
   private String targetDatePattern;

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldCopyDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldCopyDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldCopyDescriptorImpl.java
index 4a8d746..e7b8fdf 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldCopyDescriptorImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldCopyDescriptorImpl.java
@@ -19,17 +19,29 @@
 
 package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
 
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
 import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldCopyDescriptor;
 
 import com.google.gson.annotations.Expose;
 import com.google.gson.annotations.SerializedName;
 
-public class MapFieldCopyDescriptorImpl implements MapFieldCopyDescriptor {
+@ShipperConfigTypeDescription(
+    name = "Map Copy",
+    description = "The name of the mapping element should be map_copy. The value json element should contain the following parameter:"
+)
+public class MapFieldCopyDescriptorImpl extends MapFieldDescriptorImpl implements MapFieldCopyDescriptor {
   @Override
   public String getJsonName() {
     return "map_fieldcopy";
   }
 
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/post_map_values/{field_name}/[]/map_copy/copy_name",
+    type = "string",
+    description = "The name of the copied field",
+    examples = {"new_name"}
+  )
   @Expose
   @SerializedName("copy_name")
   private String copyName;

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldDescriptorImpl.java
new file mode 100644
index 0000000..101e0d4
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldDescriptorImpl.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldDescriptor;
+
+@ShipperConfigTypeDescription(
+    name = "Post Map Values",
+    description = "The Post Map Values element in the [filter](filter.md) field names as keys, the values are lists of sets of " +
+                  "post map values, each describing one mapping done on a field named before obtained after filtering.\n" +
+                  "\n" +
+                  "Currently there are four kind of mappings are supported:"
+  )
+public abstract class MapFieldDescriptorImpl implements MapFieldDescriptor {
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldNameDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldNameDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldNameDescriptorImpl.java
index bd32018..e1b71e6 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldNameDescriptorImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldNameDescriptorImpl.java
@@ -19,17 +19,29 @@
 
 package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
 
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
 import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldNameDescriptor;
 
 import com.google.gson.annotations.Expose;
 import com.google.gson.annotations.SerializedName;
 
-public class MapFieldNameDescriptorImpl implements MapFieldNameDescriptor {
+@ShipperConfigTypeDescription(
+    name = "Map Field Name",
+    description = "The name of the mapping element should be map_fieldname. The value json element should contain the following parameter:"
+)
+public class MapFieldNameDescriptorImpl extends MapFieldDescriptorImpl implements MapFieldNameDescriptor {
   @Override
   public String getJsonName() {
     return "map_fieldname";
   }
 
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/post_map_values/{field_name}/[]/map_fieldname/new_field_name",
+    type = "string",
+    description = "The name of the renamed field",
+    examples = {"new_name"}
+  )
   @Expose
   @SerializedName("new_field_name")
   private String newFieldName;

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldValueDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldValueDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldValueDescriptorImpl.java
index 599e152..a80a994 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldValueDescriptorImpl.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldValueDescriptorImpl.java
@@ -19,21 +19,39 @@
 
 package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
 
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.config.api.ShipperConfigTypeDescription;
 import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldValueDescriptor;
 
 import com.google.gson.annotations.Expose;
 import com.google.gson.annotations.SerializedName;
 
-public class MapFieldValueDescriptorImpl implements MapFieldValueDescriptor {
+@ShipperConfigTypeDescription(
+    name = "Map Field Value",
+    description = "The name of the mapping element should be map_fieldvalue. The value json element should contain the following parameter:"
+)
+public class MapFieldValueDescriptorImpl extends MapFieldDescriptorImpl implements MapFieldValueDescriptor {
   @Override
   public String getJsonName() {
     return "map_fieldvalue";
   }
 
+  @ShipperConfigElementDescription(
+    path = "/filter/[]/post_map_values/{field_name}/[]/map_fieldvalue/pre_value",
+    type = "string",
+    description = "The value that the field must match (ignoring case) to be mapped",
+    examples = {"old_value"}
+  )
   @Expose
   @SerializedName("pre_value")
   private String preValue;
 
+  @ShipperConfigElementDescription(
+      path = "/filter/[]/post_map_values/{field_name}/[]/map_fieldvalue/post_value",
+      type = "string",
+      description = "The value to which the field is modified to",
+      examples = {"new_value"}
+    )
   @Expose
   @SerializedName("post_value")
   private String postValue;

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/PostMapValuesAdapter.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/PostMapValuesAdapter.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/PostMapValuesAdapter.java
index 3c21fd8..e3f9886 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/PostMapValuesAdapter.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/PostMapValuesAdapter.java
@@ -95,7 +95,7 @@ public class PostMapValuesAdapter implements JsonDeserializer<List<PostMapValues
   private JsonElement createMapperObject(PostMapValuesImpl postMapValues, JsonSerializationContext context) {
     JsonObject jsonObject = new JsonObject();
     for (MapFieldDescriptor m : postMapValues.getMappers()) {
-      jsonObject.add(((MapFieldDescriptor)m).getJsonName(), context.serialize(m));
+      jsonObject.add(((MapFieldDescriptorImpl)m).getJsonName(), context.serialize(m));
     }
     return jsonObject;
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-logfeeder/docs/filter.md
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/docs/filter.md b/ambari-logsearch/ambari-logsearch-logfeeder/docs/filter.md
index 129279b..d825290 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/docs/filter.md
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/docs/filter.md
@@ -48,6 +48,8 @@ Grok filters have the following additional parameters:
 
 ## Key-value Filter
 
+value\_borders is only used if it is specified, and value\_split is not.
+
 Key-value filters have the following additional parameters:
 
 | Field          | Description                                                                               | Default |
@@ -56,4 +58,4 @@ Key-value filters have the following additional parameters:
 | value\_split   | The string that separates keys from values                                                | "="     |
 | value\_borders | The borders around the value, must be 2 characters long, first before it, second after it | -       |
 
-If value\_borders is only used if it is specified, and value\_split is not.
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-logfeeder/docs/input.md
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/docs/input.md b/ambari-logsearch/ambari-logsearch-logfeeder/docs/input.md
index 661eeb8..1a9ce8d 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/docs/input.md
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/docs/input.md
@@ -20,20 +20,18 @@ limitations under the License.
 # Input
 
 The input element in the [input configuration](inputConfig.md) contains a list of input descriptions, each describing one source
-of input.
-
-The general elements in the json are the following:
+of input. The general elements in the json are the following:
 
 | Field                       | Description                                                                                           | Default      |
 |-----------------------------|-------------------------------------------------------------------------------------------------------|--------------|
-| type                        | The type of the input source, currently file and s3_file are supported                                | -            |
+| type                        | The log id for this source                                                                            | -            |
 | rowtype                     | The type of the row, can be service / audit                                                           | -            |
 | path                        | The path of the source, may contain '*' characters too                                                | -            |
 | add\_fields                 | The element contains field\_name: field\_value pairs which will be added to each rows data            | -            |
+| source                      | The type of the input source, currently file and s3_file are supported                                | -            |
 | tail                        | The input should check for only the latest file matching the pattern, not all of them                 | true         |
 | gen\_event\_md5             | Generate an event\_md5 field for each row by creating a hash of the row data                          | true         |
 | use\_event\_md5\_as\_id     | Generate an id for each row by creating a hash of the row data                                        | false        |
-| start\_position             | Should the parsing start from the beginning                                                           | beginning    |
 | cache\_enabled              | Allows the input to use a cache to filter out duplications                                            | true         |
 | cache\_key\_field           | Specifies the field for which to use the cache to find duplications of                                | log\_message |
 | cache\_last\_dedup\_enabled | Allow to filter out entries which are same as the most recent one irrelevant of it's time             | false        |
@@ -44,7 +42,7 @@ The general elements in the json are the following:
 
 ## File Input
 
-File inputs have the following parameters too:
+File inputs have some additional parameters:
 
 | Field                    | Description                                                        | Default |
 |--------------------------|--------------------------------------------------------------------|---------|

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-logfeeder/docs/postMapValues.md
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/docs/postMapValues.md b/ambari-logsearch/ambari-logsearch-logfeeder/docs/postMapValues.md
index 7ec439a..bc219df 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/docs/postMapValues.md
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/docs/postMapValues.md
@@ -67,4 +67,4 @@ The name of the mapping element should be map\_anonymize. The value json element
 | Field      | Description                                                                                                     |
 |------------|-----------------------------------------------------------------------------------------------------------------|
 | pattern    | The pattern to use to identify parts to anonymize. The parts to hide should be marked with the "<hide>" string. |
-| hide\_char | The character to hide with, if it is not specified then the default is 'X'                                      |
+| hide\_char | The character to hide with, if it is not specified then the default is '*'                                      |

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterJSONTest.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterJSONTest.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterJSONTest.java
index 7abf177..acc3d4d 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterJSONTest.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterJSONTest.java
@@ -131,13 +131,15 @@ public class FilterJSONTest {
   @Test
   public void testJSONFilterCode_invalidJson() throws Exception {
     LOG.info("testJSONFilterCode_invalidJson()");
+    
     init(new FilterJsonDescriptorImpl());
-    String inputStr="invalid json";
+    
+    String inputStr = "invalid json";
     try{
-    filterJson.apply(inputStr,new InputMarker(null, null, 0));
-    fail("Expected LogFeederException was not occured");
-    }catch(LogFeederException logFeederException){
-      assertEquals("Json parsing failed for inputstr = "+inputStr, logFeederException.getLocalizedMessage());
+      filterJson.apply(inputStr,new InputMarker(null, null, 0));
+      fail("Expected LogFeederException was not occured");
+    } catch(LogFeederException logFeederException) {
+      assertEquals("Json parsing failed for inputstr = " + inputStr, logFeederException.getLocalizedMessage());
     }
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/common/ShipperConfigDescriptionStorage.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/common/ShipperConfigDescriptionStorage.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/common/ShipperConfigDescriptionStorage.java
new file mode 100644
index 0000000..7d4bc2c
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/common/ShipperConfigDescriptionStorage.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.logsearch.common;
+
+import org.apache.ambari.logsearch.config.api.ShipperConfigElementDescription;
+import org.apache.ambari.logsearch.model.response.ShipperConfigDescriptionData;
+import org.reflections.Reflections;
+import org.reflections.scanners.FieldAnnotationsScanner;
+
+import javax.annotation.PostConstruct;
+import javax.inject.Named;
+import java.lang.reflect.Field;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+
+@Named
+public class ShipperConfigDescriptionStorage {
+
+  private static final String SHIPPER_CONFIG_PACKAGE = "org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl";
+  
+  private final List<ShipperConfigDescriptionData> shipperConfigDescription = new ArrayList<>();
+
+  @PostConstruct
+  public void postConstruct() {
+    Thread loadShipperConfigDescriptionThread = new Thread("load_shipper_config_description") {
+      @Override
+      public void run() {
+        fillShipperConfigDescriptions();
+      }
+    };
+    loadShipperConfigDescriptionThread.setDaemon(true);
+    loadShipperConfigDescriptionThread.start();
+  }
+
+  public List<ShipperConfigDescriptionData> getShipperConfigDescription() {
+    return shipperConfigDescription;
+  }
+
+  private void fillShipperConfigDescriptions() {
+    Reflections reflections = new Reflections(SHIPPER_CONFIG_PACKAGE, new FieldAnnotationsScanner());
+    Set<Field> fields = reflections.getFieldsAnnotatedWith(ShipperConfigElementDescription.class);
+    for (Field field : fields) {
+      ShipperConfigElementDescription description = field.getAnnotation(ShipperConfigElementDescription.class);
+      shipperConfigDescription.add(new ShipperConfigDescriptionData(description.path(), description.description(),
+          description.examples(), description.defaultValue()));
+    }
+    
+    shipperConfigDescription.sort((o1, o2) -> o1.getPath().compareTo(o2.getPath()));
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/doc/DocConstants.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/doc/DocConstants.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/doc/DocConstants.java
index 6d1382d..da0a8bb 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/doc/DocConstants.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/doc/DocConstants.java
@@ -104,6 +104,7 @@ public class DocConstants {
     public static final String GET_AUTH_DETAILS_OD = "Get authentication details.";
     public static final String GET_ALL_PROPERTIES_INFO_OD = "List all available properties for Log Search and Log Feeder";
     public static final String GET_LOGSEARCH_PROPERTIES_INFO_OD = "List all available properties for Log Search property file (e.g: logsearch.properties/logfeeder.properties)";
+    public static final String GET_ALL_SHIPPER_CONFIG_INFO_OD = "List all available shipper configuration element";
   }
 
   public class EventHistoryDescriptions {

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/InfoManager.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/InfoManager.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/InfoManager.java
index f6d0449..2f63492 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/InfoManager.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/InfoManager.java
@@ -25,7 +25,9 @@ import java.util.Map;
 
 import org.apache.ambari.logsearch.conf.AuthPropsConfig;
 import org.apache.ambari.logsearch.common.PropertyDescriptionStorage;
+import org.apache.ambari.logsearch.common.ShipperConfigDescriptionStorage;
 import org.apache.ambari.logsearch.model.response.PropertyDescriptionData;
+import org.apache.ambari.logsearch.model.response.ShipperConfigDescriptionData;
 
 import javax.inject.Inject;
 import javax.inject.Named;
@@ -39,6 +41,9 @@ public class InfoManager extends JsonManagerBase {
   @Inject
   private PropertyDescriptionStorage propertyDescriptionStore;
 
+  @Inject
+  private ShipperConfigDescriptionStorage shipperConfigDescriptionStore;
+
   public Map<String, Boolean> getAuthMap() {
     Map<String, Boolean> authMap = new HashMap<>();
     authMap.put("external", authPropsConfig.isAuthExternalEnabled());
@@ -56,4 +61,8 @@ public class InfoManager extends JsonManagerBase {
   public List<PropertyDescriptionData> getLogSearchPropertyDescriptions(String propertiesFile) {
     return getPropertyDescriptions().get(propertiesFile);
   }
+  
+  public List<ShipperConfigDescriptionData> getLogSearchShipperConfigDescription() {
+    return shipperConfigDescriptionStore.getShipperConfigDescription();
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/response/ShipperConfigDescriptionData.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/response/ShipperConfigDescriptionData.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/response/ShipperConfigDescriptionData.java
new file mode 100644
index 0000000..91f7420
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/response/ShipperConfigDescriptionData.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.logsearch.model.response;
+
+public class ShipperConfigDescriptionData {
+  private final String path;
+
+  private final String description;
+
+  private final String[] examples;
+
+  private final String defaultValue;
+
+  public ShipperConfigDescriptionData(String path, String description, String[] examples, String defaultValue) {
+    this.path = path;
+    this.description = description;
+    this.examples = examples;
+    this.defaultValue = defaultValue;
+  }
+
+  public String getPath() {
+    return path;
+  }
+
+  public String getDescription() {
+    return description;
+  }
+
+  public String[] getExamples() {
+    return examples;
+  }
+
+  public String getDefaultValue() {
+    return defaultValue;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/15dd999f/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/InfoResource.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/InfoResource.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/InfoResource.java
index 6ea0bab..e49be90 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/InfoResource.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/InfoResource.java
@@ -29,12 +29,14 @@ import io.swagger.annotations.Api;
 import io.swagger.annotations.ApiOperation;
 import org.apache.ambari.logsearch.manager.InfoManager;
 import org.apache.ambari.logsearch.model.response.PropertyDescriptionData;
+import org.apache.ambari.logsearch.model.response.ShipperConfigDescriptionData;
 import org.springframework.context.annotation.Scope;
 
 import java.util.List;
 import java.util.Map;
 
 import static org.apache.ambari.logsearch.doc.DocConstants.PublicOperationDescriptions.GET_ALL_PROPERTIES_INFO_OD;
+import static org.apache.ambari.logsearch.doc.DocConstants.PublicOperationDescriptions.GET_ALL_SHIPPER_CONFIG_INFO_OD;
 import static org.apache.ambari.logsearch.doc.DocConstants.PublicOperationDescriptions.GET_LOGSEARCH_PROPERTIES_INFO_OD;
 import static org.apache.ambari.logsearch.doc.DocConstants.PublicOperationDescriptions.GET_AUTH_DETAILS_OD;
 
@@ -70,4 +72,12 @@ public class InfoResource {
   public List<PropertyDescriptionData> getPropertyFileDescription(@PathParam("propertyFile") String propertyFile) {
     return infoManager.getLogSearchPropertyDescriptions(propertyFile);
   }
+
+  @GET
+  @Path("/shipperconfig")
+  @Produces({"application/json"})
+  @ApiOperation(GET_ALL_SHIPPER_CONFIG_INFO_OD)
+  public List<ShipperConfigDescriptionData> getShipperConfigDescription() {
+    return infoManager.getLogSearchShipperConfigDescription();
+  }
 }


[29/31] ambari git commit: AMBARI-21476 Log Search UI: implement pagination for logs list. (ababiichuk)

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/app/mock-data.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/mock-data.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/mock-data.ts
new file mode 100644
index 0000000..3d04122
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/mock-data.ts
@@ -0,0 +1,906 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import * as moment from 'moment-timezone';
+
+export const mockData = {
+  login: {},
+  api: {
+    v1: {
+      audit: {
+        logs: {
+          logList: [
+            {
+              policy: 'policy',
+              reason: 'Authentication required',
+              result: 0,
+              text: 'Please log in',
+              tags: [
+                'ambari_agent'
+              ],
+              resource: '/ambari-agent',
+              sess: '0',
+              access: '0',
+              logType: 'AmbariAudit',
+              tags_str: 'ambari_agent',
+              resType: 'agent',
+              reqUser: 'admin',
+              reqData: 'data',
+              repoType: 1,
+              repo: 'ambari',
+              proxyUsers: [
+                'admin'
+              ],
+              evtTime: '2017-05-29T11:30:22.531Z',
+              enforcer: 'ambari-acl',
+              reqContext: 'ambari',
+              cliType: 'GET',
+              cliIP: '192.168.0.1',
+              agent: 'agent',
+              agentHost: 'localhost',
+              action: 'SERVICE_CHECK',
+              type: 'ambari-audit',
+              _version_: 2,
+              id: 'id0',
+              file: 'ambari-agent.log',
+              seq_num: 3,
+              bundle_id: 'b0',
+              case_id: 'c0',
+              log_message: 'User(admin), Operation(SERVICE_CHECK)',
+              logfile_line_number: 4,
+              message_md5: '12345678900987654321',
+              cluster: 'cl0',
+              event_count: 0,
+              event_md5: '09876543211234567890',
+              event_dur_ms: 100,
+              _ttl_: "+7DAYS",
+              _expire_at_: '2017-05-29T11:30:22.531Z',
+              _router_field_: 5
+            },
+            {
+              policy: 'policy',
+              reason: 'Server error',
+              result: 1,
+              text: 'Something went wrong',
+              tags: [
+                'ambari_agent'
+              ],
+              resource: '/ambari-agent',
+              sess: '1',
+              access: '1',
+              logType: 'AmbariAudit',
+              tags_str: 'ambari_server',
+              resType: 'server',
+              reqUser: 'user',
+              reqData: 'data',
+              repoType: 1,
+              repo: 'ambari',
+              proxyUsers: [
+                'user'
+              ],
+              evtTime: '2017-05-29T11:30:22.531Z',
+              enforcer: 'hdfs',
+              reqContext: 'ambari_server',
+              cliType: 'PUT',
+              cliIP: '192.168.0.1',
+              agent: 'agent',
+              agentHost: 'localhost',
+              action: 'SERVICE_CHECK',
+              type: 'ambari-audit',
+              _version_: 4,
+              id: 'id1',
+              file: 'ambari-agent.log',
+              seq_num: 5,
+              bundle_id: 'b1',
+              case_id: 'c1',
+              log_message: 'User(user), Operation(SERVICE_CHECK)',
+              logfile_line_number: 6,
+              message_md5: '10293847561029384756',
+              cluster: 'cl1',
+              event_count: 2,
+              event_md5: '01928374650192837465',
+              event_dur_ms: 500,
+              _ttl_: "+7DAYS",
+              _expire_at_: '2017-05-29T11:30:22.531Z',
+              _router_field_: 10
+            }
+          ],
+          bargraph: {
+            graphData: [
+              {
+                dataCount: [
+                  {
+                    name: 'n0',
+                    value: 1
+                  },
+                  {
+                    name: 'n1',
+                    value: 2
+                  }
+                ],
+                name: 'graph0'
+              },
+              {
+                dataCount: [
+                  {
+                    name: 'n2',
+                    value: 10
+                  },
+                  {
+                    name: 'n3',
+                    value: 20
+                  }
+                ],
+                name: 'graph1'
+              }
+            ]
+          },
+          components: {},
+          resources: {
+            graphData: [
+              {
+                dataCount: [
+                  {
+                    name: 'n16',
+                    value: 800
+                  },
+                  {
+                    name: 'n17',
+                    value: 400
+                  }
+                ],
+                name: 'graph8'
+              },
+              {
+                dataCount: [
+                  {
+                    name: 'n18',
+                    value: 600
+                  },
+                  {
+                    name: 'n19',
+                    value: 300
+                  }
+                ],
+                name: 'graph9'
+              }
+            ]
+          },
+          schema: {
+            fields: ''
+          },
+          serviceload: {
+            graphData: [
+              {
+                dataCount: [
+                  {
+                    name: 'n4',
+                    value: 1
+                  },
+                  {
+                    name: 'n5',
+                    value: 2
+                  }
+                ],
+                name: 'graph2'
+              },
+              {
+                dataCount: [
+                  {
+                    name: 'n6',
+                    value: 10
+                  },
+                  {
+                    name: 'n7',
+                    value: 20
+                  }
+                ],
+                name: 'graph3'
+              }
+            ]
+          }
+        }
+      },
+      public: {
+        config: {}
+      },
+      service: {
+        logs: {
+          logList: [
+            {
+              path: '/var/log/ambari-metrics-collector/ambari-metrics-collector.log',
+              host: 'h0',
+              level: 'WARN',
+              logtime: moment().valueOf(),
+              ip: '192.168.0.1',
+              logfile_line_number: 8,
+              type: 'ams_collector',
+              _version_: 9,
+              id: 'id2',
+              file: 'ambari-metrics-collector.log',
+              seq_num: 10,
+              bundle_id: 'b2',
+              case_id: 'c2',
+              log_message: 'Connection refused.\nPlease check Ambari Metrics.\nCheck log file for details.',
+              message_md5: '1357908642',
+              cluster: 'cl2',
+              event_count: 5,
+              event_md5: '1908755391',
+              event_dur_ms: 200,
+              _ttl_: "+5DAYS",
+              _expire_at_: moment().add(5, 'd').valueOf(),
+              _router_field_: 20
+            },
+            {
+              path: '/var/log/ambari-metrics-collector/ambari-metrics-collector.log',
+              host: 'h1',
+              level: 'ERROR',
+              logtime: moment().subtract(2, 'd'),
+              ip: '192.168.0.2',
+              type: 'ams_collector',
+              _version_: 14,
+              id: 'id3',
+              file: 'ambari-metrics-collector.log',
+              seq_num: 15,
+              bundle_id: 'b3',
+              case_id: 'c3',
+              log_message: 'Connection refused.\nPlease check Ambari Metrics.\nCheck log file for details.',
+              logfile_line_number: 16,
+              message_md5: '1357908642',
+              cluster: 'cl3',
+              event_count: 2,
+              event_md5: '1029384756',
+              event_dur_ms: 700,
+              _ttl_: "+5DAYS",
+              _expire_at_: moment().add(3, 'd').valueOf(),
+              _router_field_: 5
+            },
+            {
+              path: '/var/log/ambari-metrics-collector/ambari-metrics-collector.log',
+              host: 'h1',
+              level: 'FATAL',
+              logtime: moment().subtract(10, 'd').valueOf(),
+              ip: '192.168.0.3',
+              type: 'ambari_agent',
+              _version_: 14,
+              id: 'id4',
+              file: 'ambari-agent.log',
+              seq_num: 15,
+              bundle_id: 'b4',
+              case_id: 'c4',
+              log_message: 'Connection refused.\nPlease check Ambari Agent.\nCheck log file for details.',
+              logfile_line_number: 16,
+              message_md5: '1038027502',
+              cluster: 'cl4',
+              event_count: 2,
+              event_md5: '67589403',
+              event_dur_ms: 100,
+              _ttl_: "+5DAYS",
+              _expire_at_: moment().subtract(5, 'd').valueOf(),
+              _router_field_: 45
+            },
+            {
+              path: '/var/log/ambari-metrics-collector/zookeeper-server.log',
+              host: 'h1',
+              level: 'INFO',
+              logtime: moment().subtract(25, 'h').valueOf(),
+              ip: '192.168.0.4',
+              type: 'zookeeper_server',
+              _version_: 14,
+              id: 'id4',
+              file: 'zookeeper_server.log',
+              seq_num: 15,
+              bundle_id: 'b0',
+              case_id: 'c0',
+              log_message: 'Connection refused.\nPlease check ZooKeeper Server.\nCheck log file for details.',
+              logfile_line_number: 16,
+              message_md5: '1038027502',
+              cluster: 'cl0',
+              event_count: 2,
+              event_md5: '67589403',
+              event_dur_ms: 1000,
+              _ttl_: "+5DAYS",
+              _expire_at_: moment().subtract(25, 'h').add(5, 'd').valueOf(),
+              _router_field_: 55
+            },
+            {
+              path: '/var/log/ambari-metrics-collector/zookeeper-server.log',
+              host: 'h1',
+              level: 'DEBUG',
+              logtime: moment().subtract(25, 'd').valueOf(),
+              ip: '192.168.0.4',
+              type: 'zookeeper_server',
+              _version_: 14,
+              id: 'id4',
+              file: 'zookeeper_server.log',
+              seq_num: 15,
+              bundle_id: 'b0',
+              case_id: 'c0',
+              log_message: 'Connection refused.\nPlease check ZooKeeper Server.\nCheck log file for details.',
+              logfile_line_number: 16,
+              message_md5: '1038027502',
+              cluster: 'cl1',
+              event_count: 2,
+              event_md5: '67589403',
+              event_dur_ms: 1000,
+              _ttl_: "+5DAYS",
+              _expire_at_: moment().subtract(20, 'd').valueOf(),
+              _router_field_: 55
+            },
+            {
+              path: '/var/log/ambari-metrics-collector/zookeeper-client.log',
+              host: 'h1',
+              level: 'TRACE',
+              logtime: moment().subtract(2, 'h').valueOf(),
+              ip: '192.168.0.4',
+              type: 'zookeeper_client',
+              _version_: 14,
+              id: 'id4',
+              file: 'zookeeper_client.log',
+              seq_num: 15,
+              bundle_id: 'b0',
+              case_id: 'c0',
+              log_message: 'Connection refused.\nPlease check ZooKeeper Client.\nCheck log file for details.',
+              logfile_line_number: 16,
+              message_md5: '1038027502',
+              cluster: 'cl1',
+              event_count: 2,
+              event_md5: '67589403',
+              event_dur_ms: 1000,
+              _ttl_: "+5DAYS",
+              _expire_at_: moment().subtract(2, 'h').add(5, 'd').valueOf(),
+              _router_field_: 55
+            },
+            {
+              path: '/var/log/ambari-metrics-collector/zookeeper-client.log',
+              host: 'h1',
+              level: 'UNKNOWN',
+              logtime: moment().subtract(31, 'd').valueOf(),
+              ip: '192.168.0.4',
+              type: 'zookeeper_client',
+              _version_: 14,
+              id: 'id4',
+              file: 'zookeeper_client.log',
+              seq_num: 15,
+              bundle_id: 'b0',
+              case_id: 'c0',
+              log_message: 'Connection refused.\nPlease check ZooKeeper Client.\nCheck log file for details.',
+              logfile_line_number: 16,
+              message_md5: '1038027502',
+              cluster: 'cl1',
+              event_count: 2,
+              event_md5: '67589403',
+              event_dur_ms: 1000,
+              _ttl_: "+5DAYS",
+              _expire_at_: moment().subtract(26, 'd').valueOf(),
+              _router_field_: 55
+            }
+          ],
+          aggregated: {
+            graphData: [
+              {
+                name: 'n0',
+                count: 100,
+                dataList: [
+                  {
+                    name: 'n1',
+                    count: 50,
+                    dataList: null
+                  },
+                  {
+                    name: 'n2',
+                    count: 200,
+                    dataList: null
+                  }
+                ]
+              },
+              {
+                name: 'n3',
+                count: 10,
+                dataList: [
+                  {
+                    name: 'n4',
+                    count: 5,
+                    dataList: null
+                  },
+                  {
+                    name: 'n5',
+                    count: 20,
+                    dataList: null
+                  }
+                ]
+              }
+            ]
+          },
+          components: {
+            count: {
+              anygraph: {
+                graphData: [
+                  {
+                    dataCount: [
+                      {
+                        name: 'n8',
+                        value: 50
+                      },
+                      {
+                        name: 'n9',
+                        value: 100
+                      }
+                    ],
+                    name: 'graph4'
+                  },
+                  {
+                    dataCount: [
+                      {
+                        name: 'n10',
+                        value: 5
+                      },
+                      {
+                        name: 'n11',
+                        value: 10
+                      }
+                    ],
+                    name: 'graph5'
+                  }
+                ]
+              }
+            },
+            levels: {
+              counts: {
+                vNodeList: [
+                  {
+                    name: 'ambari',
+                    type: 0,
+                    logLevelCount: [
+                      {
+                        name: 'ERROR',
+                        value: '10'
+                      },
+                      {
+                        name: 'WARN',
+                        value: '50'
+                      }
+                    ],
+                    childs: [
+                      {
+                        name: 'hdfs',
+                        type: 2,
+                        logLevelCount: [
+                          {
+                            name: 'ERROR',
+                            value: '10'
+                          },
+                          {
+                            name: 'WARN',
+                            value: '20'
+                          }
+                        ],
+                        isParent: false,
+                        isRoot: false
+                      },
+                      {
+                        name: 'zookeeper',
+                        type: 3,
+                        logLevelCount: [
+                          {
+                            name: 'ERROR',
+                            value: '20'
+                          },
+                          {
+                            name: 'WARN',
+                            value: '40'
+                          }
+                        ],
+                        isParent: false,
+                        isRoot: false
+                      }
+                    ],
+                    isParent: true,
+                    isRoot: false
+                  },
+                  {
+                    name: 'ambari_agent',
+                    type: 1,
+                    logLevelCount: [
+                      {
+                        name: 'ERROR',
+                        value: '100'
+                      },
+                      {
+                        name: 'WARN',
+                        value: '500'
+                      }
+                    ],
+                    isParent: false,
+                    isRoot: false
+                  }
+                ]
+              }
+            },
+            groupList: [
+              {
+                type: 'ams_collector'
+              },
+              {
+                type: 'ambari_agent'
+              },
+              {
+                type: 'zookeeper_server'
+              },
+              {
+                type: 'zookeeper_client'
+              }
+            ]
+          },
+          files: {
+            hostLogFiles: {
+              clusters: [
+                'c0',
+                'c1'
+              ],
+              services: [
+                'hdfs',
+                'zookeeper'
+              ]
+            }
+          },
+          histogram: {
+            graphData: [
+              {
+                dataCount: [
+                  {
+                    name: 'n12',
+                    value: 1000
+                  },
+                  {
+                    name: 'n13',
+                    value: 2000
+                  }
+                ],
+                name: 'graph6'
+              },
+              {
+                dataCount: [
+                  {
+                    name: 'n14',
+                    value: 700
+                  },
+                  {
+                    name: 'n15',
+                    value: 900
+                  }
+                ],
+                name: 'graph7'
+              }
+            ]
+          },
+          hosts: {
+            components: {
+              vNodeList: [
+                {
+                  name: 'ambari',
+                  type: 0,
+                  logLevelCount: [
+                    {
+                      name: 'ERROR',
+                      value: '100'
+                    },
+                    {
+                      name: 'WARN',
+                      value: '500'
+                    }
+                  ],
+                  childs: [
+                    {
+                      name: 'ambari_metrics',
+                      type: 2,
+                      logLevelCount: [
+                        {
+                          name: 'ERROR',
+                          value: '100'
+                        },
+                        {
+                          name: 'WARN',
+                          value: '200'
+                        }
+                      ],
+                      isParent: false,
+                      isRoot: false
+                    },
+                    {
+                      name: 'hbase',
+                      type: 3,
+                      logLevelCount: [
+                        {
+                          name: 'ERROR',
+                          value: '200'
+                        },
+                        {
+                          name: 'WARN',
+                          value: '400'
+                        }
+                      ],
+                      isParent: false,
+                      isRoot: false
+                    }
+                  ],
+                  isParent: true,
+                  isRoot: false
+                },
+                {
+                  name: 'ambari_server',
+                  type: 1,
+                  logLevelCount: [
+                    {
+                      name: 'ERROR',
+                      value: '1000'
+                    },
+                    {
+                      name: 'WARN',
+                      value: '5000'
+                    }
+                  ],
+                  isParent: false,
+                  isRoot: false
+                }
+              ]
+            },
+            count: {
+              getvCounts: [
+                {
+                  name: 'n20',
+                  count: 100
+                },
+                {
+                  name: 'n21',
+                  count: 200
+                }
+              ]
+            }
+          },
+          levels: {
+            counts: {
+              getvNameValues: [
+                {
+                  name: 'n22',
+                  count: 1000
+                },
+                {
+                  name: 'n23',
+                  count: 2000
+                }
+              ]
+            }
+          },
+          schema: {
+            fields: ''
+          },
+          serviceconfig: '',
+          tree: {
+            vNodeList: [
+              {
+                name: 'ambari',
+                type: 0,
+                logLevelCount: [
+                  {
+                    name: 'ERROR',
+                    value: '1000'
+                  },
+                  {
+                    name: 'WARN',
+                    value: '5000'
+                  }
+                ],
+                childs: [
+                  {
+                    name: 'yarn',
+                    type: 2,
+                    logLevelCount: [
+                      {
+                        name: 'ERROR',
+                        value: '1000'
+                      },
+                      {
+                        name: 'WARN',
+                        value: '2000'
+                      }
+                    ],
+                    isParent: false,
+                    isRoot: false
+                  },
+                  {
+                    name: 'hive',
+                    type: 3,
+                    logLevelCount: [
+                      {
+                        name: 'ERROR',
+                        value: '2000'
+                      },
+                      {
+                        name: 'WARN',
+                        value: '4000'
+                      }
+                    ],
+                    isParent: false,
+                    isRoot: false
+                  }
+                ],
+                isParent: true,
+                isRoot: false
+              },
+              {
+                name: 'ambari_server',
+                type: 1,
+                logLevelCount: [
+                  {
+                    name: 'ERROR',
+                    value: '10000'
+                  },
+                  {
+                    name: 'WARN',
+                    value: '50000'
+                  }
+                ],
+                isParent: false,
+                isRoot: false
+              }
+            ]
+          },
+          truncated: {
+            logList: [
+              {
+                path: '/var/log/ambari-metrics-collector/ambari-metrics-collector.log',
+                host: 'h0',
+                level: 'WARN',
+                logtime: '2017-05-28T11:30:22.531Z',
+                ip: '192.168.0.1',
+                logfile_line_number: 8,
+                type: 'ams_collector',
+                _version_: 9,
+                id: 'id2',
+                file: 'ambari-metrics-collector.log',
+                seq_num: 10,
+                bundle_id: 'b2',
+                case_id: 'c2',
+                log_message: 'Connection refused',
+                message_md5: '1357908642',
+                cluster: 'cl2',
+                event_count: 5,
+                event_md5: '1908755391',
+                event_dur_ms: 200,
+                _ttl_: "+5DAYS",
+                _expire_at_: '2017-05-29T11:30:22.531Z',
+                _router_field_: 20
+              },
+              {
+                path: '/var/log/ambari-metrics-collector/ambari-metrics-collector.log',
+                host: 'h1',
+                level: 'ERROR',
+                logtime: '2017-05-28T10:30:22.531Z',
+                ip: '192.168.0.2',
+                type: 'ams_collector',
+                _version_: 14,
+                id: 'id3',
+                file: 'ambari-metrics-collector.log',
+                seq_num: 15,
+                bundle_id: 'b3',
+                case_id: 'c3',
+                log_message: 'Connection refused',
+                logfile_line_number: 16,
+                message_md5: '1357908642',
+                cluster: 'cl3',
+                event_count: 2,
+                event_md5: '1029384756',
+                event_dur_ms: 700,
+                _ttl_: "+5DAYS",
+                _expire_at_: '2017-05-29T10:30:22.531Z',
+                _router_field_: 5
+              }
+            ]
+          },
+          clusters: [
+            'cl0',
+            'cl1',
+            'cl2',
+            'cl3',
+            'cl4'
+          ]
+        }
+      },
+      status: {
+        auditlogs: {
+          znodeReady: true,
+          solrCollectionReady: true,
+          solrAliasReady: false,
+          configurationUploaded: true
+        },
+        servicelogs: {
+          znodeReady: true,
+          solrCollectionReady: true,
+          configurationUploaded: true
+        },
+        userconfig: {
+          znodeReady: true,
+          solrCollectionReady: true,
+          configurationUploaded: true
+        }
+      },
+      userconfig: {
+        userConfigList: [
+          {
+            id: 'c0',
+            userName: 'admin',
+            filtername: 'service',
+            values: 'hdfs',
+            shareNameList: [
+              's0',
+              's1'
+            ],
+            rowType: 'history'
+          },
+          {
+            id: 'c0',
+            userName: 'user',
+            filtername: 'component',
+            values: 'namenode',
+            shareNameList: [
+              's2',
+              's3'
+            ],
+            rowType: 'history'
+          }
+        ],
+        filters: {
+          filter0: {
+            label: 'filter0',
+            hosts: [
+              'h0',
+              'h1'
+            ],
+            defaultLevels: [
+              'l0',
+              'l1'
+            ],
+            overrideLevels: [
+              'l2',
+              'l3'
+            ],
+            expiryTime: '2017-05-29T11:30:22.531Z'
+          },
+          filter1: {
+            label: 'filter1',
+            hosts: [
+              'h1',
+              'h2'
+            ],
+            defaultLevels: [
+              'l4',
+              'l5'
+            ],
+            overrideLevels: [
+              'l6',
+              'l7'
+            ],
+            expiryTime: '2017-05-30T11:30:22.531Z'
+          }
+        },
+        names: []
+      }
+    }
+  }
+};
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/app/services/filtering.service.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/services/filtering.service.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/services/filtering.service.ts
index e31202b..c317076 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/services/filtering.service.ts
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/services/filtering.service.ts
@@ -42,10 +42,7 @@ export class FilteringService {
 
   private readonly defaultTimeZone = moment.tz.guess();
 
-  private readonly sortMap = {
-    component_name: 'type',
-    start_time: 'logtime'
-  };
+  private readonly paginationOptions = ['10', '25', '50', '100'];
 
   timeZone: string = this.defaultTimeZone;
 
@@ -58,10 +55,8 @@ export class FilteringService {
           value: ''
         }
       ],
-      selectedValue: '',
       defaultValue: '',
-      defaultLabel: 'filter.all',
-      paramName: 'clusters',
+      defaultLabel: 'filter.all'
     },
     text: {
       label: 'filter.message',
@@ -136,11 +131,6 @@ export class FilteringService {
           }
         }
       ],
-      selectedValue: {
-        type: 'LAST',
-        unit: 'h',
-        interval: 1
-      },
       defaultValue: {
         type: 'LAST',
         unit: 'h',
@@ -157,9 +147,7 @@ export class FilteringService {
           value: ''
         }
       ],
-      selectedValue: '',
-      defaultValue: '',
-      defaultLabel: 'filter.all'
+      defaultValue: ''
     },
     levels: {
       label: 'filter.levels',
@@ -198,9 +186,7 @@ export class FilteringService {
           value: 'UNKNOWN'
         }
       ],
-      selectedValue: '',
-      defaultValue: '',
-      defaultLabel: 'filter.all'
+      defaultValue: ''
     },
     sorting: {
       label: 'sorting.title',
@@ -222,35 +208,48 @@ export class FilteringService {
         {
           label: 'sorting.component.asc',
           value: {
-            key: 'component_name',
+            key: 'type',
             type: 'asc'
           }
         },
         {
           label: 'sorting.component.desc',
           value: {
-            key: 'component_name',
+            key: 'type',
             type: 'desc'
           }
         },
         {
           label: 'sorting.time.asc',
           value: {
-            key: 'start_time',
+            key: 'logtime',
             type: 'asc'
           }
         },
         {
           label: 'sorting.time.desc',
           value: {
-            key: 'start_time',
+            key: 'logtime',
             type: 'desc'
           }
         }
       ],
-      selectedValue: '',
       defaultValue: '',
       defaultLabel: ''
+    },
+    pageSize: {
+      label: 'pagination.title',
+      options: this.paginationOptions.map(option => {
+        return {
+          label: option,
+          value: option
+        }
+      }),
+      defaultValue: '10',
+      defaultLabel: '10'
+    },
+    page: {
+      defaultValue: 0
     }
   };
 
@@ -269,7 +268,7 @@ export class FilteringService {
   private filtersFormItems = Object.keys(this.filters).reduce((currentObject, key) => {
     let formControl = new FormControl(),
       item = {
-        [key]: new FormControl()
+        [key]: formControl
       };
     formControl.setValue(this.filters[key].defaultValue);
     return Object.assign(currentObject, item);
@@ -318,7 +317,8 @@ export class FilteringService {
       return time ? time.toISOString() : '';
     },
     sortType: value => value && value.type,
-    sortBy: value => value && (this.sortMap[value.key] || value.key)
+    sortBy: value => value && value.key,
+    page: value => value == null ? value : value.toString()
   };
 
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/app/services/mock-api-data.service.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/services/mock-api-data.service.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/services/mock-api-data.service.ts
index fdebb2f..985a0bf 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/services/mock-api-data.service.ts
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/services/mock-api-data.service.ts
@@ -21,7 +21,7 @@ import {InMemoryDbService, InMemoryBackendService, createErrorResponse} from 'an
 import {Observable} from 'rxjs/Observable';
 import {Subscriber} from 'rxjs/Subscriber';
 import * as moment from 'moment';
-import {mockData} from '../../assets/mock-data';
+import {mockData} from '@app/mock-data';
 
 export class mockBackendService extends InMemoryBackendService {
   getLocation(url: string): any {
@@ -34,6 +34,7 @@ export class mockApiDataService implements InMemoryDbService {
   private readonly filterMap = {
     'api/v1/service/logs': {
       pathToCollection: 'logList',
+      totalCountKey: 'totalCount',
       filters: {
         clusters: {
           key: 'cluster'
@@ -105,8 +106,8 @@ export class mockApiDataService implements InMemoryDbService {
         if (query && filterMapItem) {
           filteredData = {};
           const pathToCollection = filterMapItem.pathToCollection,
-            collection = allData[pathToCollection],
-            filteredCollection = collection.filter(item => {
+            collection = allData[pathToCollection];
+          let filteredCollection = collection.filter(item => {
             let result = true;
               query.paramsMap.forEach((value, key) => {
               const paramValue = decodeURIComponent(value[0]), // TODO implement multiple conditions
@@ -136,6 +137,14 @@ export class mockApiDataService implements InMemoryDbService {
               return ascResult * Math.pow(-1, Number(sortType === 'desc'));
             });
           }
+          if (filterMapItem.totalCountKey) {
+            filteredData[filterMapItem.totalCountKey] = filteredCollection.length;
+          }
+          if (query && query.paramsMap.has('page') && query.paramsMap.has('pageSize')) {
+            const page = parseInt(query.paramsMap.get('page')[0]),
+              pageSize = parseInt(query.paramsMap.get('pageSize')[0]);
+            filteredCollection = filteredCollection.slice(page * pageSize, (page + 1) * pageSize);
+          }
           filteredData[pathToCollection] = filteredCollection;
         } else {
           filteredData = allData;

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/assets/i18n/en.json
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/assets/i18n/en.json b/ambari-logsearch/ambari-logsearch-web-new/src/assets/i18n/en.json
index 33d059e..d6a46ac 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/assets/i18n/en.json
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/assets/i18n/en.json
@@ -46,6 +46,9 @@
   "sorting.time.asc": "Ascending Time",
   "sorting.time.desc": "Descending Time",
 
+  "pagination.title": "Rows per page:",
+  "pagination.numbers": "{{startIndex}}-{{endIndex}} of {{totalCount}}",
+
   "logs.status": "Status",
   "logs.details": "Details"
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/assets/mock-data.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/assets/mock-data.ts b/ambari-logsearch/ambari-logsearch-web-new/src/assets/mock-data.ts
deleted file mode 100644
index 3d04122..0000000
--- a/ambari-logsearch/ambari-logsearch-web-new/src/assets/mock-data.ts
+++ /dev/null
@@ -1,906 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import * as moment from 'moment-timezone';
-
-export const mockData = {
-  login: {},
-  api: {
-    v1: {
-      audit: {
-        logs: {
-          logList: [
-            {
-              policy: 'policy',
-              reason: 'Authentication required',
-              result: 0,
-              text: 'Please log in',
-              tags: [
-                'ambari_agent'
-              ],
-              resource: '/ambari-agent',
-              sess: '0',
-              access: '0',
-              logType: 'AmbariAudit',
-              tags_str: 'ambari_agent',
-              resType: 'agent',
-              reqUser: 'admin',
-              reqData: 'data',
-              repoType: 1,
-              repo: 'ambari',
-              proxyUsers: [
-                'admin'
-              ],
-              evtTime: '2017-05-29T11:30:22.531Z',
-              enforcer: 'ambari-acl',
-              reqContext: 'ambari',
-              cliType: 'GET',
-              cliIP: '192.168.0.1',
-              agent: 'agent',
-              agentHost: 'localhost',
-              action: 'SERVICE_CHECK',
-              type: 'ambari-audit',
-              _version_: 2,
-              id: 'id0',
-              file: 'ambari-agent.log',
-              seq_num: 3,
-              bundle_id: 'b0',
-              case_id: 'c0',
-              log_message: 'User(admin), Operation(SERVICE_CHECK)',
-              logfile_line_number: 4,
-              message_md5: '12345678900987654321',
-              cluster: 'cl0',
-              event_count: 0,
-              event_md5: '09876543211234567890',
-              event_dur_ms: 100,
-              _ttl_: "+7DAYS",
-              _expire_at_: '2017-05-29T11:30:22.531Z',
-              _router_field_: 5
-            },
-            {
-              policy: 'policy',
-              reason: 'Server error',
-              result: 1,
-              text: 'Something went wrong',
-              tags: [
-                'ambari_agent'
-              ],
-              resource: '/ambari-agent',
-              sess: '1',
-              access: '1',
-              logType: 'AmbariAudit',
-              tags_str: 'ambari_server',
-              resType: 'server',
-              reqUser: 'user',
-              reqData: 'data',
-              repoType: 1,
-              repo: 'ambari',
-              proxyUsers: [
-                'user'
-              ],
-              evtTime: '2017-05-29T11:30:22.531Z',
-              enforcer: 'hdfs',
-              reqContext: 'ambari_server',
-              cliType: 'PUT',
-              cliIP: '192.168.0.1',
-              agent: 'agent',
-              agentHost: 'localhost',
-              action: 'SERVICE_CHECK',
-              type: 'ambari-audit',
-              _version_: 4,
-              id: 'id1',
-              file: 'ambari-agent.log',
-              seq_num: 5,
-              bundle_id: 'b1',
-              case_id: 'c1',
-              log_message: 'User(user), Operation(SERVICE_CHECK)',
-              logfile_line_number: 6,
-              message_md5: '10293847561029384756',
-              cluster: 'cl1',
-              event_count: 2,
-              event_md5: '01928374650192837465',
-              event_dur_ms: 500,
-              _ttl_: "+7DAYS",
-              _expire_at_: '2017-05-29T11:30:22.531Z',
-              _router_field_: 10
-            }
-          ],
-          bargraph: {
-            graphData: [
-              {
-                dataCount: [
-                  {
-                    name: 'n0',
-                    value: 1
-                  },
-                  {
-                    name: 'n1',
-                    value: 2
-                  }
-                ],
-                name: 'graph0'
-              },
-              {
-                dataCount: [
-                  {
-                    name: 'n2',
-                    value: 10
-                  },
-                  {
-                    name: 'n3',
-                    value: 20
-                  }
-                ],
-                name: 'graph1'
-              }
-            ]
-          },
-          components: {},
-          resources: {
-            graphData: [
-              {
-                dataCount: [
-                  {
-                    name: 'n16',
-                    value: 800
-                  },
-                  {
-                    name: 'n17',
-                    value: 400
-                  }
-                ],
-                name: 'graph8'
-              },
-              {
-                dataCount: [
-                  {
-                    name: 'n18',
-                    value: 600
-                  },
-                  {
-                    name: 'n19',
-                    value: 300
-                  }
-                ],
-                name: 'graph9'
-              }
-            ]
-          },
-          schema: {
-            fields: ''
-          },
-          serviceload: {
-            graphData: [
-              {
-                dataCount: [
-                  {
-                    name: 'n4',
-                    value: 1
-                  },
-                  {
-                    name: 'n5',
-                    value: 2
-                  }
-                ],
-                name: 'graph2'
-              },
-              {
-                dataCount: [
-                  {
-                    name: 'n6',
-                    value: 10
-                  },
-                  {
-                    name: 'n7',
-                    value: 20
-                  }
-                ],
-                name: 'graph3'
-              }
-            ]
-          }
-        }
-      },
-      public: {
-        config: {}
-      },
-      service: {
-        logs: {
-          logList: [
-            {
-              path: '/var/log/ambari-metrics-collector/ambari-metrics-collector.log',
-              host: 'h0',
-              level: 'WARN',
-              logtime: moment().valueOf(),
-              ip: '192.168.0.1',
-              logfile_line_number: 8,
-              type: 'ams_collector',
-              _version_: 9,
-              id: 'id2',
-              file: 'ambari-metrics-collector.log',
-              seq_num: 10,
-              bundle_id: 'b2',
-              case_id: 'c2',
-              log_message: 'Connection refused.\nPlease check Ambari Metrics.\nCheck log file for details.',
-              message_md5: '1357908642',
-              cluster: 'cl2',
-              event_count: 5,
-              event_md5: '1908755391',
-              event_dur_ms: 200,
-              _ttl_: "+5DAYS",
-              _expire_at_: moment().add(5, 'd').valueOf(),
-              _router_field_: 20
-            },
-            {
-              path: '/var/log/ambari-metrics-collector/ambari-metrics-collector.log',
-              host: 'h1',
-              level: 'ERROR',
-              logtime: moment().subtract(2, 'd'),
-              ip: '192.168.0.2',
-              type: 'ams_collector',
-              _version_: 14,
-              id: 'id3',
-              file: 'ambari-metrics-collector.log',
-              seq_num: 15,
-              bundle_id: 'b3',
-              case_id: 'c3',
-              log_message: 'Connection refused.\nPlease check Ambari Metrics.\nCheck log file for details.',
-              logfile_line_number: 16,
-              message_md5: '1357908642',
-              cluster: 'cl3',
-              event_count: 2,
-              event_md5: '1029384756',
-              event_dur_ms: 700,
-              _ttl_: "+5DAYS",
-              _expire_at_: moment().add(3, 'd').valueOf(),
-              _router_field_: 5
-            },
-            {
-              path: '/var/log/ambari-metrics-collector/ambari-metrics-collector.log',
-              host: 'h1',
-              level: 'FATAL',
-              logtime: moment().subtract(10, 'd').valueOf(),
-              ip: '192.168.0.3',
-              type: 'ambari_agent',
-              _version_: 14,
-              id: 'id4',
-              file: 'ambari-agent.log',
-              seq_num: 15,
-              bundle_id: 'b4',
-              case_id: 'c4',
-              log_message: 'Connection refused.\nPlease check Ambari Agent.\nCheck log file for details.',
-              logfile_line_number: 16,
-              message_md5: '1038027502',
-              cluster: 'cl4',
-              event_count: 2,
-              event_md5: '67589403',
-              event_dur_ms: 100,
-              _ttl_: "+5DAYS",
-              _expire_at_: moment().subtract(5, 'd').valueOf(),
-              _router_field_: 45
-            },
-            {
-              path: '/var/log/ambari-metrics-collector/zookeeper-server.log',
-              host: 'h1',
-              level: 'INFO',
-              logtime: moment().subtract(25, 'h').valueOf(),
-              ip: '192.168.0.4',
-              type: 'zookeeper_server',
-              _version_: 14,
-              id: 'id4',
-              file: 'zookeeper_server.log',
-              seq_num: 15,
-              bundle_id: 'b0',
-              case_id: 'c0',
-              log_message: 'Connection refused.\nPlease check ZooKeeper Server.\nCheck log file for details.',
-              logfile_line_number: 16,
-              message_md5: '1038027502',
-              cluster: 'cl0',
-              event_count: 2,
-              event_md5: '67589403',
-              event_dur_ms: 1000,
-              _ttl_: "+5DAYS",
-              _expire_at_: moment().subtract(25, 'h').add(5, 'd').valueOf(),
-              _router_field_: 55
-            },
-            {
-              path: '/var/log/ambari-metrics-collector/zookeeper-server.log',
-              host: 'h1',
-              level: 'DEBUG',
-              logtime: moment().subtract(25, 'd').valueOf(),
-              ip: '192.168.0.4',
-              type: 'zookeeper_server',
-              _version_: 14,
-              id: 'id4',
-              file: 'zookeeper_server.log',
-              seq_num: 15,
-              bundle_id: 'b0',
-              case_id: 'c0',
-              log_message: 'Connection refused.\nPlease check ZooKeeper Server.\nCheck log file for details.',
-              logfile_line_number: 16,
-              message_md5: '1038027502',
-              cluster: 'cl1',
-              event_count: 2,
-              event_md5: '67589403',
-              event_dur_ms: 1000,
-              _ttl_: "+5DAYS",
-              _expire_at_: moment().subtract(20, 'd').valueOf(),
-              _router_field_: 55
-            },
-            {
-              path: '/var/log/ambari-metrics-collector/zookeeper-client.log',
-              host: 'h1',
-              level: 'TRACE',
-              logtime: moment().subtract(2, 'h').valueOf(),
-              ip: '192.168.0.4',
-              type: 'zookeeper_client',
-              _version_: 14,
-              id: 'id4',
-              file: 'zookeeper_client.log',
-              seq_num: 15,
-              bundle_id: 'b0',
-              case_id: 'c0',
-              log_message: 'Connection refused.\nPlease check ZooKeeper Client.\nCheck log file for details.',
-              logfile_line_number: 16,
-              message_md5: '1038027502',
-              cluster: 'cl1',
-              event_count: 2,
-              event_md5: '67589403',
-              event_dur_ms: 1000,
-              _ttl_: "+5DAYS",
-              _expire_at_: moment().subtract(2, 'h').add(5, 'd').valueOf(),
-              _router_field_: 55
-            },
-            {
-              path: '/var/log/ambari-metrics-collector/zookeeper-client.log',
-              host: 'h1',
-              level: 'UNKNOWN',
-              logtime: moment().subtract(31, 'd').valueOf(),
-              ip: '192.168.0.4',
-              type: 'zookeeper_client',
-              _version_: 14,
-              id: 'id4',
-              file: 'zookeeper_client.log',
-              seq_num: 15,
-              bundle_id: 'b0',
-              case_id: 'c0',
-              log_message: 'Connection refused.\nPlease check ZooKeeper Client.\nCheck log file for details.',
-              logfile_line_number: 16,
-              message_md5: '1038027502',
-              cluster: 'cl1',
-              event_count: 2,
-              event_md5: '67589403',
-              event_dur_ms: 1000,
-              _ttl_: "+5DAYS",
-              _expire_at_: moment().subtract(26, 'd').valueOf(),
-              _router_field_: 55
-            }
-          ],
-          aggregated: {
-            graphData: [
-              {
-                name: 'n0',
-                count: 100,
-                dataList: [
-                  {
-                    name: 'n1',
-                    count: 50,
-                    dataList: null
-                  },
-                  {
-                    name: 'n2',
-                    count: 200,
-                    dataList: null
-                  }
-                ]
-              },
-              {
-                name: 'n3',
-                count: 10,
-                dataList: [
-                  {
-                    name: 'n4',
-                    count: 5,
-                    dataList: null
-                  },
-                  {
-                    name: 'n5',
-                    count: 20,
-                    dataList: null
-                  }
-                ]
-              }
-            ]
-          },
-          components: {
-            count: {
-              anygraph: {
-                graphData: [
-                  {
-                    dataCount: [
-                      {
-                        name: 'n8',
-                        value: 50
-                      },
-                      {
-                        name: 'n9',
-                        value: 100
-                      }
-                    ],
-                    name: 'graph4'
-                  },
-                  {
-                    dataCount: [
-                      {
-                        name: 'n10',
-                        value: 5
-                      },
-                      {
-                        name: 'n11',
-                        value: 10
-                      }
-                    ],
-                    name: 'graph5'
-                  }
-                ]
-              }
-            },
-            levels: {
-              counts: {
-                vNodeList: [
-                  {
-                    name: 'ambari',
-                    type: 0,
-                    logLevelCount: [
-                      {
-                        name: 'ERROR',
-                        value: '10'
-                      },
-                      {
-                        name: 'WARN',
-                        value: '50'
-                      }
-                    ],
-                    childs: [
-                      {
-                        name: 'hdfs',
-                        type: 2,
-                        logLevelCount: [
-                          {
-                            name: 'ERROR',
-                            value: '10'
-                          },
-                          {
-                            name: 'WARN',
-                            value: '20'
-                          }
-                        ],
-                        isParent: false,
-                        isRoot: false
-                      },
-                      {
-                        name: 'zookeeper',
-                        type: 3,
-                        logLevelCount: [
-                          {
-                            name: 'ERROR',
-                            value: '20'
-                          },
-                          {
-                            name: 'WARN',
-                            value: '40'
-                          }
-                        ],
-                        isParent: false,
-                        isRoot: false
-                      }
-                    ],
-                    isParent: true,
-                    isRoot: false
-                  },
-                  {
-                    name: 'ambari_agent',
-                    type: 1,
-                    logLevelCount: [
-                      {
-                        name: 'ERROR',
-                        value: '100'
-                      },
-                      {
-                        name: 'WARN',
-                        value: '500'
-                      }
-                    ],
-                    isParent: false,
-                    isRoot: false
-                  }
-                ]
-              }
-            },
-            groupList: [
-              {
-                type: 'ams_collector'
-              },
-              {
-                type: 'ambari_agent'
-              },
-              {
-                type: 'zookeeper_server'
-              },
-              {
-                type: 'zookeeper_client'
-              }
-            ]
-          },
-          files: {
-            hostLogFiles: {
-              clusters: [
-                'c0',
-                'c1'
-              ],
-              services: [
-                'hdfs',
-                'zookeeper'
-              ]
-            }
-          },
-          histogram: {
-            graphData: [
-              {
-                dataCount: [
-                  {
-                    name: 'n12',
-                    value: 1000
-                  },
-                  {
-                    name: 'n13',
-                    value: 2000
-                  }
-                ],
-                name: 'graph6'
-              },
-              {
-                dataCount: [
-                  {
-                    name: 'n14',
-                    value: 700
-                  },
-                  {
-                    name: 'n15',
-                    value: 900
-                  }
-                ],
-                name: 'graph7'
-              }
-            ]
-          },
-          hosts: {
-            components: {
-              vNodeList: [
-                {
-                  name: 'ambari',
-                  type: 0,
-                  logLevelCount: [
-                    {
-                      name: 'ERROR',
-                      value: '100'
-                    },
-                    {
-                      name: 'WARN',
-                      value: '500'
-                    }
-                  ],
-                  childs: [
-                    {
-                      name: 'ambari_metrics',
-                      type: 2,
-                      logLevelCount: [
-                        {
-                          name: 'ERROR',
-                          value: '100'
-                        },
-                        {
-                          name: 'WARN',
-                          value: '200'
-                        }
-                      ],
-                      isParent: false,
-                      isRoot: false
-                    },
-                    {
-                      name: 'hbase',
-                      type: 3,
-                      logLevelCount: [
-                        {
-                          name: 'ERROR',
-                          value: '200'
-                        },
-                        {
-                          name: 'WARN',
-                          value: '400'
-                        }
-                      ],
-                      isParent: false,
-                      isRoot: false
-                    }
-                  ],
-                  isParent: true,
-                  isRoot: false
-                },
-                {
-                  name: 'ambari_server',
-                  type: 1,
-                  logLevelCount: [
-                    {
-                      name: 'ERROR',
-                      value: '1000'
-                    },
-                    {
-                      name: 'WARN',
-                      value: '5000'
-                    }
-                  ],
-                  isParent: false,
-                  isRoot: false
-                }
-              ]
-            },
-            count: {
-              getvCounts: [
-                {
-                  name: 'n20',
-                  count: 100
-                },
-                {
-                  name: 'n21',
-                  count: 200
-                }
-              ]
-            }
-          },
-          levels: {
-            counts: {
-              getvNameValues: [
-                {
-                  name: 'n22',
-                  count: 1000
-                },
-                {
-                  name: 'n23',
-                  count: 2000
-                }
-              ]
-            }
-          },
-          schema: {
-            fields: ''
-          },
-          serviceconfig: '',
-          tree: {
-            vNodeList: [
-              {
-                name: 'ambari',
-                type: 0,
-                logLevelCount: [
-                  {
-                    name: 'ERROR',
-                    value: '1000'
-                  },
-                  {
-                    name: 'WARN',
-                    value: '5000'
-                  }
-                ],
-                childs: [
-                  {
-                    name: 'yarn',
-                    type: 2,
-                    logLevelCount: [
-                      {
-                        name: 'ERROR',
-                        value: '1000'
-                      },
-                      {
-                        name: 'WARN',
-                        value: '2000'
-                      }
-                    ],
-                    isParent: false,
-                    isRoot: false
-                  },
-                  {
-                    name: 'hive',
-                    type: 3,
-                    logLevelCount: [
-                      {
-                        name: 'ERROR',
-                        value: '2000'
-                      },
-                      {
-                        name: 'WARN',
-                        value: '4000'
-                      }
-                    ],
-                    isParent: false,
-                    isRoot: false
-                  }
-                ],
-                isParent: true,
-                isRoot: false
-              },
-              {
-                name: 'ambari_server',
-                type: 1,
-                logLevelCount: [
-                  {
-                    name: 'ERROR',
-                    value: '10000'
-                  },
-                  {
-                    name: 'WARN',
-                    value: '50000'
-                  }
-                ],
-                isParent: false,
-                isRoot: false
-              }
-            ]
-          },
-          truncated: {
-            logList: [
-              {
-                path: '/var/log/ambari-metrics-collector/ambari-metrics-collector.log',
-                host: 'h0',
-                level: 'WARN',
-                logtime: '2017-05-28T11:30:22.531Z',
-                ip: '192.168.0.1',
-                logfile_line_number: 8,
-                type: 'ams_collector',
-                _version_: 9,
-                id: 'id2',
-                file: 'ambari-metrics-collector.log',
-                seq_num: 10,
-                bundle_id: 'b2',
-                case_id: 'c2',
-                log_message: 'Connection refused',
-                message_md5: '1357908642',
-                cluster: 'cl2',
-                event_count: 5,
-                event_md5: '1908755391',
-                event_dur_ms: 200,
-                _ttl_: "+5DAYS",
-                _expire_at_: '2017-05-29T11:30:22.531Z',
-                _router_field_: 20
-              },
-              {
-                path: '/var/log/ambari-metrics-collector/ambari-metrics-collector.log',
-                host: 'h1',
-                level: 'ERROR',
-                logtime: '2017-05-28T10:30:22.531Z',
-                ip: '192.168.0.2',
-                type: 'ams_collector',
-                _version_: 14,
-                id: 'id3',
-                file: 'ambari-metrics-collector.log',
-                seq_num: 15,
-                bundle_id: 'b3',
-                case_id: 'c3',
-                log_message: 'Connection refused',
-                logfile_line_number: 16,
-                message_md5: '1357908642',
-                cluster: 'cl3',
-                event_count: 2,
-                event_md5: '1029384756',
-                event_dur_ms: 700,
-                _ttl_: "+5DAYS",
-                _expire_at_: '2017-05-29T10:30:22.531Z',
-                _router_field_: 5
-              }
-            ]
-          },
-          clusters: [
-            'cl0',
-            'cl1',
-            'cl2',
-            'cl3',
-            'cl4'
-          ]
-        }
-      },
-      status: {
-        auditlogs: {
-          znodeReady: true,
-          solrCollectionReady: true,
-          solrAliasReady: false,
-          configurationUploaded: true
-        },
-        servicelogs: {
-          znodeReady: true,
-          solrCollectionReady: true,
-          configurationUploaded: true
-        },
-        userconfig: {
-          znodeReady: true,
-          solrCollectionReady: true,
-          configurationUploaded: true
-        }
-      },
-      userconfig: {
-        userConfigList: [
-          {
-            id: 'c0',
-            userName: 'admin',
-            filtername: 'service',
-            values: 'hdfs',
-            shareNameList: [
-              's0',
-              's1'
-            ],
-            rowType: 'history'
-          },
-          {
-            id: 'c0',
-            userName: 'user',
-            filtername: 'component',
-            values: 'namenode',
-            shareNameList: [
-              's2',
-              's3'
-            ],
-            rowType: 'history'
-          }
-        ],
-        filters: {
-          filter0: {
-            label: 'filter0',
-            hosts: [
-              'h0',
-              'h1'
-            ],
-            defaultLevels: [
-              'l0',
-              'l1'
-            ],
-            overrideLevels: [
-              'l2',
-              'l3'
-            ],
-            expiryTime: '2017-05-29T11:30:22.531Z'
-          },
-          filter1: {
-            label: 'filter1',
-            hosts: [
-              'h1',
-              'h2'
-            ],
-            defaultLevels: [
-              'l4',
-              'l5'
-            ],
-            overrideLevels: [
-              'l6',
-              'l7'
-            ],
-            expiryTime: '2017-05-30T11:30:22.531Z'
-          }
-        },
-        names: []
-      }
-    }
-  }
-};
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/src/main.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/main.ts b/ambari-logsearch/ambari-logsearch-web-new/src/main.ts
index dfabf38..5e76e0d 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/main.ts
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/main.ts
@@ -19,8 +19,8 @@
 import {enableProdMode} from '@angular/core';
 import {platformBrowserDynamic} from '@angular/platform-browser-dynamic';
 
-import {AppModule} from './app/app.module';
-import {environment} from './environments/environment';
+import {AppModule} from '@app/app.module';
+import {environment} from '@envs/environment';
 
 if (environment.production) {
   enableProdMode();

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9338e61/ambari-logsearch/ambari-logsearch-web-new/tsconfig.json
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/tsconfig.json b/ambari-logsearch/ambari-logsearch-web-new/tsconfig.json
index 12163cb..fc7c1f7 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/tsconfig.json
+++ b/ambari-logsearch/ambari-logsearch-web-new/tsconfig.json
@@ -6,6 +6,9 @@
     "paths": {
       "@app/*": [
         "app/*"
+      ],
+      "@envs/*": [
+        "environments/*"
       ]
     },
     "sourceMap": true,


[31/31] ambari git commit: Merge branch 'trunk' into branch-feature-logsearch-ui

Posted by ab...@apache.org.
Merge branch 'trunk' into branch-feature-logsearch-ui


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/0256fb7f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/0256fb7f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/0256fb7f

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 0256fb7f76635d10d56020ae9bf30680795fe2fd
Parents: c9338e6 63186bf
Author: ababiichuk <ab...@hortonworks.com>
Authored: Fri Jul 14 15:28:54 2017 +0300
Committer: ababiichuk <ab...@hortonworks.com>
Committed: Fri Jul 14 15:28:54 2017 +0300

----------------------------------------------------------------------
 .../python/ambari_commons/get_ambari_version.py |   44 +
 .../libraries/functions/conf_select.py          |   56 +-
 .../libraries/functions/stack_features.py       |   41 +-
 .../libraries/functions/stack_tools.py          |   39 +
 .../libraries/script/script.py                  |   19 +-
 .../api/ShipperConfigElementDescription.java    |   59 +
 .../api/ShipperConfigTypeDescription.java       |   44 +
 .../model/inputconfig/impl/ConditionsImpl.java  |   13 +
 .../model/inputconfig/impl/FieldsImpl.java      |   14 +
 .../inputconfig/impl/FilterDescriptorImpl.java  |   51 +
 .../impl/FilterGrokDescriptorImpl.java          |   24 +
 .../impl/FilterKeyValueDescriptorImpl.java      |   28 +
 .../model/inputconfig/impl/InputConfigImpl.java |   18 +
 .../inputconfig/impl/InputDescriptorImpl.java   |  101 ++
 .../impl/InputFileBaseDescriptorImpl.java       |   27 +
 .../impl/InputS3FileDescriptorImpl.java         |   16 +
 .../impl/MapAnonymizeDescriptorImpl.java        |   21 +-
 .../inputconfig/impl/MapDateDescriptorImpl.java |   20 +-
 .../impl/MapFieldCopyDescriptorImpl.java        |   14 +-
 .../impl/MapFieldDescriptorImpl.java            |   33 +
 .../impl/MapFieldNameDescriptorImpl.java        |   14 +-
 .../impl/MapFieldValueDescriptorImpl.java       |   20 +-
 .../inputconfig/impl/PostMapValuesAdapter.java  |    2 +-
 .../ambari-logsearch-logfeeder/docs/filter.md   |    4 +-
 .../ambari-logsearch-logfeeder/docs/input.md    |   10 +-
 .../docs/postMapValues.md                       |    2 +-
 .../logfeeder/common/LogEntryParseTester.java   |    2 +-
 .../ambari/logfeeder/mapper/MapperDate.java     |   42 +-
 .../ambari/logfeeder/filter/FilterJSONTest.java |   12 +-
 .../common/ShipperConfigDescriptionStorage.java |   67 +
 .../ambari/logsearch/doc/DocConstants.java      |    1 +
 .../ambari/logsearch/manager/InfoManager.java   |    9 +
 .../response/ShipperConfigDescriptionData.java  |   52 +
 .../ambari/logsearch/rest/InfoResource.java     |   10 +
 .../server/api/query/JpaPredicateVisitor.java   |    8 +-
 .../controller/ActionExecutionContext.java      |   28 +
 .../controller/AmbariActionExecutionHelper.java |   27 +-
 .../controller/DeleteIdentityHandler.java       |   77 +-
 .../server/controller/KerberosHelper.java       |    2 +-
 .../server/controller/KerberosHelperImpl.java   |    5 +-
 .../BlueprintConfigurationProcessor.java        |   59 +-
 .../ClusterStackVersionResourceProvider.java    |  163 ++-
 .../utilities/KerberosIdentityCleaner.java      |   88 +-
 .../utilities/RemovableIdentities.java          |  145 ++
 .../controller/utilities/UsedIdentities.java    |  101 ++
 .../ServiceComponentUninstalledEvent.java       |    6 +
 .../server/events/ServiceRemovedEvent.java      |   29 +-
 .../ambari/server/orm/dao/ClusterDAO.java       |   15 +
 .../orm/entities/ClusterConfigEntity.java       |    3 +
 .../upgrades/UpgradeUserKerberosDescriptor.java |  142 +-
 .../org/apache/ambari/server/state/Cluster.java |    7 +
 .../ambari/server/state/ConfigHelper.java       |   32 +
 .../apache/ambari/server/state/ServiceImpl.java |   14 +-
 .../ambari/server/state/UpgradeContext.java     |   16 +-
 .../server/state/cluster/ClusterImpl.java       |    9 +
 .../AbstractKerberosDescriptorContainer.java    |   12 +
 .../kerberos/KerberosComponentDescriptor.java   |   15 -
 .../kerberos/KerberosIdentityDescriptor.java    |   14 +-
 .../ambari/server/topology/AmbariContext.java   |   46 +-
 .../server/upgrade/UpgradeCatalog252.java       |   61 +
 .../HBASE/0.96.0.2.0/package/scripts/hbase.py   |   12 +-
 .../0.96.0.2.0/package/scripts/params_linux.py  |    3 +
 .../package/alerts/alert_hive_metastore.py      |   11 +-
 .../package/alerts/alert_llap_app_status.py     |   12 +-
 .../HIVE/0.12.0.2.0/package/scripts/hive.py     |    3 +-
 .../0.12.0.2.0/package/scripts/params_linux.py  |    6 +
 .../0.12.0.2.0/package/scripts/service_check.py |    3 +-
 .../package/alerts/alert_check_oozie_server.py  |    8 +-
 .../SPARK/1.2.1/package/scripts/params.py       |   11 +-
 .../SPARK/1.2.1/package/scripts/setup_spark.py  |    6 +-
 .../1.2.1/package/scripts/spark_service.py      |    6 +-
 .../0.6.0.2.5/configuration/zeppelin-env.xml    |    2 +-
 .../resources/host_scripts/alert_disk_space.py  |   10 +-
 .../host_scripts/alert_version_select.py        |   16 +-
 .../HDP/2.0.6/configuration/cluster-env.xml     |   16 +-
 .../HDP/2.0.6/properties/stack_features.json    |  852 +++++------
 .../HDP/2.0.6/properties/stack_tools.json       |   16 +-
 .../stacks/HDP/2.5/services/stack_advisor.py    |    5 +-
 .../resources/stacks/HDP/2.6/repos/repoinfo.xml |    6 +-
 .../services/HIVE/configuration/hive-env.xml    |   78 +-
 .../HIVE/configuration/hive-interactive-env.xml |   62 +-
 .../services/HIVE/configuration/hive-site.xml   |   35 +
 .../stacks/HDP/2.6/services/HIVE/kerberos.json  |  151 ++
 .../stacks/HDP/2.6/services/YARN/kerberos.json  |    2 +-
 .../PERF/1.0/configuration/cluster-env.xml      |   16 +-
 .../PERF/1.0/properties/stack_features.json     |   38 +-
 .../stacks/PERF/1.0/properties/stack_tools.json |   16 +-
 .../src/main/resources/stacks/stack_advisor.py  |   19 +-
 .../BlueprintConfigurationProcessorTest.java    |   41 +-
 ...ClusterStackVersionResourceProviderTest.java |    4 +-
 .../utilities/KerberosIdentityCleanerTest.java  |  102 +-
 .../server/orm/dao/ServiceConfigDAOTest.java    |   12 +
 .../UpgradeUserKerberosDescriptorTest.java      |   59 +-
 .../ClusterConfigurationRequestTest.java        |   60 +-
 .../src/test/python/TestStackFeature.py         |   44 +-
 .../common-services/configs/hawq_default.json   |    6 +-
 .../python/host_scripts/TestAlertDiskSpace.py   |   16 +-
 .../2.0.6/HBASE/test_phoenix_queryserver.py     |   23 -
 .../stacks/2.0.6/HIVE/test_hive_server.py       |    2 +
 .../stacks/2.0.6/YARN/test_historyserver.py     |   21 +-
 .../test/python/stacks/2.0.6/configs/nn_eu.json |    2 +-
 .../stacks/2.0.6/configs/nn_eu_standby.json     |    2 +-
 .../2.1/configs/hive-metastore-upgrade.json     |    2 +-
 .../python/stacks/2.2/configs/knox_upgrade.json |    2 +-
 .../stacks/2.5/common/test_stack_advisor.py     |  150 +-
 .../2.5/configs/ranger-admin-default.json       |  990 ++++++-------
 .../2.5/configs/ranger-admin-secured.json       | 1108 +++++++--------
 .../stacks/2.5/configs/ranger-kms-default.json  | 1158 +++++++--------
 .../stacks/2.5/configs/ranger-kms-secured.json  | 1320 +++++++++---------
 .../2.6/configs/ranger-admin-default.json       |  953 +++++++------
 .../2.6/configs/ranger-admin-secured.json       | 1066 +++++++-------
 .../src/test/python/stacks/utils/RMFTestCase.py |    8 +-
 ambari-web/app/controllers/main/service.js      |   13 +-
 ambari-web/app/utils/ajax/ajax.js               |   22 +
 .../test/controllers/main/service_test.js       |    4 +-
 .../hive20/src/main/resources/ui/package.json   |    1 +
 .../src/main/resources/ui/package.json          |    1 +
 117 files changed, 6255 insertions(+), 4411 deletions(-)
----------------------------------------------------------------------



[17/31] ambari git commit: AMBARI-21441. Discrepancy in the OS name in ambari for PPC (aonishuk)

Posted by ab...@apache.org.
AMBARI-21441. Discrepancy in the OS name in ambari for PPC (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/51e62ad5
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/51e62ad5
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/51e62ad5

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 51e62ad578ab1f21a163c08c2bff6dec3fb24f7c
Parents: 639f452
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Tue Jul 11 13:24:19 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Tue Jul 11 13:24:19 2017 +0300

----------------------------------------------------------------------
 .../src/main/resources/stacks/HDP/2.6/repos/repoinfo.xml       | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/51e62ad5/ambari-server/src/main/resources/stacks/HDP/2.6/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/repos/repoinfo.xml
index 23441f5..ff132aa 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/repos/repoinfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/repos/repoinfo.xml
@@ -31,15 +31,15 @@
       <unique>false</unique>
     </repo>
   </os>
-  <os family="redhat-ppc6">
+  <os family="redhat-ppc7">
     <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.6.0.3</baseurl>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos7/2.x/updates/2.6.0.3</baseurl>
       <repoid>HDP-2.6</repoid>
       <reponame>HDP</reponame>
       <unique>true</unique>
     </repo>
     <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6</baseurl>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/ppc64le</baseurl>
       <repoid>HDP-UTILS-1.1.0.21</repoid>
       <reponame>HDP-UTILS</reponame>
       <unique>false</unique>


[21/31] ambari git commit: AMBARI-21435 Add python functions to get the Ambari version of the agent (mgergely)

Posted by ab...@apache.org.
AMBARI-21435 Add python functions to get the Ambari version of the agent (mgergely)

Change-Id: I9481b32babac92ad5d7496fe4abb208eefaac922


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/383b8c7d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/383b8c7d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/383b8c7d

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 383b8c7d83545bc4cb21058794e91b5a4aece425
Parents: e767aa4
Author: Miklos Gergely <mg...@hortonworks.com>
Authored: Wed Jul 12 13:22:33 2017 +0200
Committer: Miklos Gergely <mg...@hortonworks.com>
Committed: Wed Jul 12 13:22:33 2017 +0200

----------------------------------------------------------------------
 .../python/ambari_commons/get_ambari_version.py | 44 ++++++++++++++++++++
 1 file changed, 44 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/383b8c7d/ambari-common/src/main/python/ambari_commons/get_ambari_version.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/ambari_commons/get_ambari_version.py b/ambari-common/src/main/python/ambari_commons/get_ambari_version.py
new file mode 100644
index 0000000..c8c5336
--- /dev/null
+++ b/ambari-common/src/main/python/ambari_commons/get_ambari_version.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import ConfigParser
+from resource_management.core.logger import Logger
+
+"""
+returns the ambari version on an agent host
+"""
+def get_ambari_version_agent():
+  ambari_version = None
+  AMBARI_AGENT_CONF = '/etc/ambari-agent/conf/ambari-agent.ini'
+  if os.path.exists(AMBARI_AGENT_CONF):
+    try:
+      ambari_agent_config = ConfigParser.RawConfigParser()
+      ambari_agent_config.read(AMBARI_AGENT_CONF)
+      data_dir = ambari_agent_config.get('agent', 'prefix')
+      ver_file = os.path.join(data_dir, 'version')
+      with open(ver_file, "r") as f:
+        ambari_version = f.read().strip()
+    except Exception, e:
+      Logger.info('Unable to determine ambari version from the agent version file.')
+      Logger.debug('Exception: %s' % str(e))
+      pass
+    pass
+  return ambari_version


[22/31] ambari git commit: AMBARI-21444. Hive warehouse fixes.(vbrodetskyi)

Posted by ab...@apache.org.
AMBARI-21444. Hive warehouse fixes.(vbrodetskyi)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/31b9d777
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/31b9d777
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/31b9d777

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 31b9d7774b22f59a4d7120c9836c73a5216fd529
Parents: 383b8c7
Author: Vitaly Brodetskyi <vb...@hortonworks.com>
Authored: Wed Jul 12 15:35:53 2017 +0300
Committer: Vitaly Brodetskyi <vb...@hortonworks.com>
Committed: Wed Jul 12 15:35:53 2017 +0300

----------------------------------------------------------------------
 .../HIVE/0.12.0.2.0/package/scripts/hive.py     |  3 +-
 .../0.12.0.2.0/package/scripts/params_linux.py  |  2 ++
 .../services/HIVE/configuration/hive-site.xml   | 35 ++++++++++++++++++++
 .../stacks/2.0.6/HIVE/test_hive_server.py       |  2 ++
 4 files changed, 41 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/31b9d777/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
index 36725c3..8e176b6 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
@@ -224,7 +224,8 @@ def setup_hiveserver2():
                          type="directory",
                           action="create_on_execute",
                           owner=params.hive_user,
-                          mode=0777
+                          group=params.user_group,
+                          mode=params.hive_apps_whs_mode
     )
   else:
     Logger.info(format("Not creating warehouse directory '{hive_apps_whs_dir}', as the location is not in DFS."))

http://git-wip-us.apache.org/repos/asf/ambari/blob/31b9d777/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
index 078076a..21b3d8b 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
@@ -505,6 +505,8 @@ hive_env_sh_template = config['configurations']['hive-env']['content']
 
 hive_hdfs_user_dir = format("/user/{hive_user}")
 hive_hdfs_user_mode = 0755
+#Parameter for custom warehouse directory permissions. Permissions are in octal format and need to be converted to decimal
+hive_apps_whs_mode = int(default('/configurations/hive-site/custom.hive.warehouse.mode', '0777'), 8)
 hive_apps_whs_dir = config['configurations']['hive-site']["hive.metastore.warehouse.dir"]
 whs_dir_protocol = urlparse(hive_apps_whs_dir).scheme
 hive_exec_scratchdir = config['configurations']['hive-site']["hive.exec.scratchdir"]

http://git-wip-us.apache.org/repos/asf/ambari/blob/31b9d777/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-site.xml
new file mode 100644
index 0000000..a07c16f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-site.xml
@@ -0,0 +1,35 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+<configuration supports_final="true">
+  <property>
+    <name>hive.warehouse.subdir.inherit.perms</name>
+    <value>true</value>
+    <description>Set this to true if table directories should inherit the permissions of the warehouse or database directory instead of being created with permissions derived from dfs umask
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>hive.start.cleanup.scratchdir</name>
+    <value>false</value>
+    <description>To cleanup the hive scratchdir while starting the hive server.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/31b9d777/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
index ae2ec86..fc6d14e 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
@@ -491,6 +491,7 @@ class TestHiveServer(RMFTestCase):
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
         action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='missing_principal', default_fs=default_fs_default,
+        group = 'hadoop',
         mode = 0777,
     )
     self.assertResourceCalled('HdfsResource', '/user/hive',
@@ -703,6 +704,7 @@ class TestHiveServer(RMFTestCase):
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
         action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
+        group = 'hadoop',
         mode = 0777,
     )
     self.assertResourceCalled('HdfsResource', '/user/hive',


[18/31] ambari git commit: AMBARI-21432 - Allow Services To Be Stopped During an EU Between Stack Vendors (jonathanhurley)

Posted by ab...@apache.org.
AMBARI-21432 - Allow Services To Be Stopped During an EU Between Stack Vendors (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/880853a6
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/880853a6
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/880853a6

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 880853a665dc07c68ec5f05975e01eba7bb561ee
Parents: 51e62ad
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Sun Jul 9 18:18:22 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Tue Jul 11 10:31:12 2017 -0400

----------------------------------------------------------------------
 .../libraries/functions/conf_select.py          | 56 +++++---------------
 .../2.0.6/HBASE/test_phoenix_queryserver.py     | 23 --------
 .../stacks/2.0.6/YARN/test_historyserver.py     | 21 +-------
 3 files changed, 15 insertions(+), 85 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/880853a6/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py b/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
index 3e01cf6..4f11633 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
@@ -399,7 +399,6 @@ def get_hadoop_conf_dir(force_latest_on_upgrade=False):
   stack_root = Script.get_stack_root()
   stack_version = Script.get_stack_version()
   version = None
-  allow_setting_conf_select_symlink = False
 
   if not Script.in_stack_upgrade():
     # During normal operation, the HDP stack must be 2.3 or higher
@@ -413,27 +412,10 @@ def get_hadoop_conf_dir(force_latest_on_upgrade=False):
 
       if not os.path.islink(hadoop_conf_dir) and stack_name and version:
         version = str(version)
-        allow_setting_conf_select_symlink = True
   else:
-    # During an upgrade/downgrade, which can be a Rolling or Express Upgrade, need to calculate it based on the version
-    '''
-    Whenever upgrading to HDP 2.2, or downgrading back to 2.2, need to use /etc/hadoop/conf
-    Whenever upgrading to HDP 2.3, or downgrading back to 2.3, need to use a versioned hadoop conf dir
-
-    Type__|_Source_|_Target_|_Direction_____________|_Comment_____________________________________________________________
-    Normal|        | 2.2    |                       | Use /etc/hadoop/conf
-    Normal|        | 2.3    |                       | Use /etc/hadoop/conf, which should be a symlink to <stack-root>/current/hadoop-client/conf
-    EU    | 2.1    | 2.3    | Upgrade               | Use versioned <stack-root>/current/hadoop-client/conf
-          |        |        | No Downgrade Allowed  | Invalid
-    EU/RU | 2.2    | 2.2.*  | Any                   | Use <stack-root>/current/hadoop-client/conf
-    EU/RU | 2.2    | 2.3    | Upgrade               | Use <stack-root>/$version/hadoop/conf, which should be a symlink destination
-          |        |        | Downgrade             | Use <stack-root>/current/hadoop-client/conf
-    EU/RU | 2.3    | 2.3.*  | Any                   | Use <stack-root>/$version/hadoop/conf, which should be a symlink destination
-    '''
-
     # The "stack_version" is the desired stack, e.g., 2.2 or 2.3
     # In an RU, it is always the desired stack, and doesn't change even during the Downgrade!
-    # In an RU Downgrade from HDP 2.3 to 2.2, the first thing we do is 
+    # In an RU Downgrade from HDP 2.3 to 2.2, the first thing we do is
     # rm /etc/[component]/conf and then mv /etc/[component]/conf.backup /etc/[component]/conf
     if stack_version and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version):
       hadoop_conf_dir = os.path.join(stack_root, "current", "hadoop-client", "conf")
@@ -442,13 +424,16 @@ def get_hadoop_conf_dir(force_latest_on_upgrade=False):
       # is the version upgrading/downgrading to.
       stack_info = stack_select._get_upgrade_stack()
 
-      if stack_info is not None:
-        stack_name = stack_info[0]
-        version = stack_info[1]
-      else:
-        raise Fail("Unable to get parameter 'version'")
-      
-      Logger.info("In the middle of a stack upgrade/downgrade for Stack {0} and destination version {1}, determining which hadoop conf dir to use.".format(stack_name, version))
+      if stack_info is None:
+        raise Fail("Unable to retrieve the upgrade/downgrade stack information from the request")
+
+      stack_name = stack_info[0]
+      version = stack_info[1]
+
+      Logger.info(
+        "An upgrade/downgrade for {0}-{1} is in progress, determining which hadoop conf dir to use.".format(
+          stack_name, version))
+
       # This is the version either upgrading or downgrading to.
       if version and check_stack_feature(StackFeature.CONFIG_VERSIONING, version):
         # Determine if <stack-selector-tool> has been run and if not, then use the current
@@ -465,21 +450,6 @@ def get_hadoop_conf_dir(force_latest_on_upgrade=False):
         hadoop_conf_dir = os.path.join(stack_root, version, "hadoop", "conf")
         Logger.info("Hadoop conf dir: {0}".format(hadoop_conf_dir))
 
-        allow_setting_conf_select_symlink = True
-
-  if allow_setting_conf_select_symlink:
-    # If not in the middle of an upgrade and on HDP 2.3 or higher, or if
-    # upgrading stack to version 2.3.0.0 or higher (which may be upgrade or downgrade), then consider setting the
-    # symlink for /etc/hadoop/conf.
-    # If a host does not have any HDFS or YARN components (e.g., only ZK), then it will not contain /etc/hadoop/conf
-    # Therefore, any calls to <conf-selector-tool> will fail.
-    # For that reason, if the hadoop conf directory exists, then make sure it is set.
-    if os.path.exists(hadoop_conf_dir):
-      conf_selector_name = stack_tools.get_stack_tool_name(stack_tools.CONF_SELECTOR_NAME)
-      Logger.info("The hadoop conf dir {0} exists, will call {1} on it for version {2}".format(
-              hadoop_conf_dir, conf_selector_name, version))
-      select(stack_name, "hadoop", version)
-
   Logger.info("Using hadoop conf dir: {0}".format(hadoop_conf_dir))
   return hadoop_conf_dir
 
@@ -587,7 +557,7 @@ def convert_conf_directories_to_symlinks(package, version, dirs, skip_existing_l
 
 
   # <stack-root>/current/[component] is already set to to the correct version, e.g., <stack-root>/[version]/[component]
-  
+
   select(stack_name, package, version, ignore_errors = True)
 
   # Symlink /etc/[component]/conf to /etc/[component]/conf.backup
@@ -702,4 +672,4 @@ def _get_backup_conf_directory(old_conf):
   """
   old_parent = os.path.abspath(os.path.join(old_conf, os.pardir))
   backup_dir = os.path.join(old_parent, "conf.backup")
-  return backup_dir
+  return backup_dir
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/880853a6/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py
index 60022e1..1b324d4 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py
@@ -76,8 +76,6 @@ class TestPhoenixQueryServer(RMFTestCase):
       call_mocks = [(0, None, None)]
     )
 
-    self.assert_call_to_get_hadoop_conf_dir()
-
     self.assertResourceCalled('Execute',
       '/usr/hdp/current/phoenix-server/bin/queryserver.py stop',
       environment = {'JAVA_HOME':'/usr/jdk64/jdk1.8.0_40',
@@ -134,8 +132,6 @@ class TestPhoenixQueryServer(RMFTestCase):
       call_mocks = [(0, None, None)]
     )
 
-    self.assert_call_to_get_hadoop_conf_dir()
-
     self.assertResourceCalled('Execute',
       '/usr/hdp/current/phoenix-server/bin/queryserver.py stop',
       environment = {'JAVA_HOME':'/usr/jdk64/jdk1.8.0_40',
@@ -217,18 +213,7 @@ class TestPhoenixQueryServer(RMFTestCase):
 
     self.assertNoMoreResources()
 
-  def assert_call_to_get_hadoop_conf_dir(self):
-    # From call to conf_select.get_hadoop_conf_dir()
-    self.assertResourceCalled("Execute", ("cp", "-R", "-p", "/etc/hadoop/conf", "/etc/hadoop/conf.backup"),
-                              not_if = "test -e /etc/hadoop/conf.backup",
-                              sudo = True)
-    self.assertResourceCalled("Directory", "/etc/hadoop/conf",
-                              action = ["delete"])
-    self.assertResourceCalled("Link", "/etc/hadoop/conf", to="/etc/hadoop/conf.backup")
-
   def assert_configure_default(self):
-    self.assert_call_to_get_hadoop_conf_dir()
-
     self.assertResourceCalled('Directory', '/etc/hbase',
       mode = 0755
     )
@@ -330,8 +315,6 @@ class TestPhoenixQueryServer(RMFTestCase):
     )
 
   def assert_configure_secured(self):
-    self.assert_call_to_get_hadoop_conf_dir()
-
     self.assertResourceCalled('Directory', '/etc/hbase',
       mode = 0755
     )
@@ -459,10 +442,4 @@ class TestPhoenixQueryServer(RMFTestCase):
         cd_access = 'a',
     )
     self.assertResourceCalledIgnoreEarlier('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'phoenix-server', '2.3.0.0-1234'), sudo=True)
-
-    self.assertResourceCalled("Execute", ("cp", "-R", "-p", "/etc/hadoop/conf", "/etc/hadoop/conf.backup"),
-                              not_if = "test -e /etc/hadoop/conf.backup",
-                              sudo = True)
-    self.assertResourceCalled("Directory", "/etc/hadoop/conf", action = ["delete"])
-    self.assertResourceCalled("Link", "/etc/hadoop/conf", to="/etc/hadoop/conf.backup")
     self.assertNoMoreResources()

http://git-wip-us.apache.org/repos/asf/ambari/blob/880853a6/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
index 53d16fd..b29cfb5 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
@@ -749,15 +749,6 @@ class TestHistoryServer(RMFTestCase):
                               group = 'hadoop',
                               )
 
-  def assert_call_to_get_hadoop_conf_dir(self):
-    # From call to conf_select.get_hadoop_conf_dir()
-    self.assertResourceCalled("Execute", ("cp", "-R", "-p", "/etc/hadoop/conf", "/etc/hadoop/conf.backup"),
-                              not_if = "test -e /etc/hadoop/conf.backup",
-                              sudo = True)
-    self.assertResourceCalled("Directory", "/etc/hadoop/conf",
-                              action = ["delete"])
-    self.assertResourceCalled("Link", "/etc/hadoop/conf", to="/etc/hadoop/conf.backup")
-
   @patch.object(functions, "get_stack_version", new = MagicMock(return_value="2.3.0.0-1234"))
   @patch("resource_management.libraries.functions.copy_tarball.copy_to_hdfs")
   def test_pre_upgrade_restart_23(self, copy_to_hdfs_mock):
@@ -783,8 +774,6 @@ class TestHistoryServer(RMFTestCase):
     self.assertTrue(call("slider", "hadoop", "hdfs", skip=False) in copy_to_hdfs_mock.call_args_list)
 
     # From call to conf_select.get_hadoop_conf_dir()
-    self.assert_call_to_get_hadoop_conf_dir()
-    self.assert_call_to_get_hadoop_conf_dir()
 
     self.assertResourceCalled('HdfsResource', None,
         immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
@@ -800,11 +789,5 @@ class TestHistoryServer(RMFTestCase):
 
     self.assertNoMoreResources()
 
-    self.assertEquals(5, mocks_dict['call'].call_count)
-    self.assertEquals(5, mocks_dict['checked_call'].call_count)
-    self.assertEquals(
-      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
-       mocks_dict['checked_call'].call_args_list[0][0][0])
-    self.assertEquals(
-      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
-       mocks_dict['call'].call_args_list[0][0][0])
+    self.assertEquals(1, mocks_dict['call'].call_count)
+    self.assertEquals(1, mocks_dict['checked_call'].call_count)


[08/31] ambari git commit: AMBARI-21430 - Allow Multiple Versions of Stack Tools to Co-Exist (jonathanhurley)

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_tools.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_tools.json b/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_tools.json
index 535b9d9..62562f8 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_tools.json
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_tools.json
@@ -1,4 +1,14 @@
 {
-  "stack_selector": ["distro-select", "/usr/bin/distro-select", "distro-select"],
-  "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"]
-}
\ No newline at end of file
+  "PERF": {
+    "stack_selector": [
+      "distro-select",
+      "/usr/bin/distro-select",
+      "distro-select"
+    ],
+    "conf_selector": [
+      "conf-select",
+      "/usr/bin/conf-select",
+      "conf-select"
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
index ca579ea..bade238 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
@@ -46,9 +46,11 @@ import org.apache.ambari.server.controller.KerberosHelper;
 import org.apache.ambari.server.controller.StackConfigurationResponse;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.PropertyDependencyInfo;
 import org.apache.ambari.server.state.PropertyInfo;
 import org.apache.ambari.server.state.ServiceInfo;
+import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.ValueAttributesInfo;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
 import org.apache.ambari.server.topology.AdvisedConfiguration;
@@ -66,6 +68,7 @@ import org.apache.ambari.server.topology.HostGroupInfo;
 import org.apache.ambari.server.topology.InvalidTopologyException;
 import org.apache.ambari.server.topology.TopologyRequest;
 import org.apache.commons.lang.StringUtils;
+import org.easymock.EasyMock;
 import org.easymock.EasyMockRule;
 import org.easymock.EasyMockSupport;
 import org.easymock.Mock;
@@ -95,6 +98,10 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
 
   private static final Configuration EMPTY_CONFIG = new Configuration(Collections.<String, Map<String, String>>emptyMap(), Collections.<String, Map<String, Map<String, String>>>emptyMap());
   private final Map<String, Collection<String>> serviceComponents = new HashMap<>();
+  private final Map<String, Map<String, String>> stackProperties = new HashMap<>();
+
+  private final String STACK_NAME = "testStack";
+  private final String STACK_VERSION = "1";
 
   @Rule
   public EasyMockRule mocks = new EasyMockRule(this);
@@ -129,13 +136,16 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
   @Mock
   private TopologyRequest topologyRequestMock;
 
+  @Mock(type = MockType.NICE)
+  private ConfigHelper configHelper;
+
   @Before
   public void init() throws Exception {
     expect(bp.getStack()).andReturn(stack).anyTimes();
     expect(bp.getName()).andReturn("test-bp").anyTimes();
 
-    expect(stack.getName()).andReturn("testStack").anyTimes();
-    expect(stack.getVersion()).andReturn("1").anyTimes();
+    expect(stack.getName()).andReturn(STACK_NAME).atLeastOnce();
+    expect(stack.getVersion()).andReturn(STACK_VERSION).atLeastOnce();
     // return false for all components since for this test we don't care about the value
     expect(stack.isMasterComponent((String) anyObject())).andReturn(false).anyTimes();
     expect(stack.getConfigurationPropertiesWithMetadata(anyObject(String.class), anyObject(String.class))).andReturn(Collections.<String, Stack.ConfigProperty>emptyMap()).anyTimes();
@@ -225,6 +235,11 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
 
     Set<String> emptySet = Collections.emptySet();
     expect(stack.getExcludedConfigurationTypes(anyObject(String.class))).andReturn(emptySet).anyTimes();
+
+    expect(ambariContext.getConfigHelper()).andReturn(configHelper).anyTimes();
+    expect(configHelper.getDefaultStackProperties(
+        EasyMock.eq(new StackId(STACK_NAME, STACK_VERSION)))).andReturn(stackProperties).anyTimes();
+
     expect(ambariContext.isClusterKerberosEnabled(1)).andReturn(true).once();
     expect(ambariContext.getClusterName(1L)).andReturn("clusterName").anyTimes();
     PowerMock.mockStatic(AmbariServer.class);
@@ -234,14 +249,14 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     expect(controller.getKerberosHelper()).andReturn(kerberosHelper).anyTimes();
     expect(controller.getClusters()).andReturn(clusters).anyTimes();
     expect(kerberosHelper.getKerberosDescriptor(cluster)).andReturn(kerberosDescriptor).anyTimes();
-    Set<String> properties = new HashSet<String>();
+    Set<String> properties = new HashSet<>();
     properties.add("core-site/hadoop.security.auth_to_local");
     expect(kerberosDescriptor.getAllAuthToLocalProperties()).andReturn(properties).anyTimes();
   }
 
   @After
   public void tearDown() {
-    reset(bp, serviceInfo, stack, ambariContext);
+    reset(bp, serviceInfo, stack, ambariContext, configHelper);
   }
 
   @Test
@@ -6322,13 +6337,16 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     topology.getAdvisedConfigurations().putAll(createAdvisedConfigMap());
     topology.setConfigRecommendationStrategy(ConfigRecommendationStrategy.ONLY_STACK_DEFAULTS_APPLY);
     BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
+
     reset(stack);
+    expect(stack.getName()).andReturn(STACK_NAME).anyTimes();
+    expect(stack.getVersion()).andReturn(STACK_VERSION).anyTimes();
     expect(stack.getConfiguration(bp.getServices())).andReturn(createStackDefaults()).anyTimes();
 
     Set<String> emptySet = Collections.emptySet();
     expect(stack.getExcludedConfigurationTypes(anyObject(String.class))).andReturn(emptySet).anyTimes();
-
     replay(stack);
+
     // WHEN
     Set<String> configTypeUpdated = configProcessor.doUpdateForClusterCreate();
     // THEN
@@ -6379,13 +6397,17 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     topology.getAdvisedConfigurations().putAll(createAdvisedConfigMap());
     topology.setConfigRecommendationStrategy(ConfigRecommendationStrategy.ONLY_STACK_DEFAULTS_APPLY);
     BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
+
     reset(stack);
+    expect(stack.getName()).andReturn(STACK_NAME).anyTimes();
+    expect(stack.getVersion()).andReturn(STACK_VERSION).anyTimes();
     expect(stack.getConfiguration(bp.getServices())).andReturn(createStackDefaults()).anyTimes();
 
     Set<String> emptySet = Collections.emptySet();
     expect(stack.getExcludedConfigurationTypes(anyObject(String.class))).andReturn(emptySet).anyTimes();
 
     replay(stack);
+
     // WHEN
     configProcessor.doUpdateForClusterCreate();
     // THEN
@@ -8050,6 +8072,10 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
   @Test
   public void testValuesTrimming() throws Exception {
     reset(stack);
+
+    expect(stack.getName()).andReturn(STACK_NAME).anyTimes();
+    expect(stack.getVersion()).andReturn(STACK_VERSION).anyTimes();
+
     Map<String, Map<String, String>> properties = new HashMap<>();
 
     Map<String, String> hdfsSite = new HashMap<>();
@@ -8073,6 +8099,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
       new StackConfigurationResponse(null, null, null, null, "hdfs-site", null, Collections.singleton(PropertyInfo.PropertyType.PASSWORD), null, null, null)));
     propertyConfigs.put("test.host", new Stack.ConfigProperty(
       new StackConfigurationResponse(null, null, null, null, "hdfs-site", null, null, null, valueAttributesInfoHost, null)));
+
     expect(stack.getServiceForConfigType("hdfs-site")).andReturn("HDFS").anyTimes();
     expect(stack.getConfigurationPropertiesWithMetadata("HDFS", "hdfs-site")).andReturn(propertyConfigs).anyTimes();
 
@@ -8144,7 +8171,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     throws InvalidTopologyException {
 
 
-    replay(stack, serviceInfo, ambariContext, controller, kerberosHelper, kerberosDescriptor, clusters, cluster);
+    replay(stack, serviceInfo, ambariContext, configHelper, controller, kerberosHelper, kerberosDescriptor, clusters, cluster);
 
     Map<String, HostGroupInfo> hostGroupInfo = new HashMap<>();
     Collection<String> allServices = new HashSet<>();
@@ -8207,7 +8234,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
       this.name = name;
       this.components = components;
       this.hosts = hosts;
-      this.configuration = new Configuration(Collections.<String, Map<String, String>>emptyMap(),
+      configuration = new Configuration(Collections.<String, Map<String, String>>emptyMap(),
         Collections.<String, Map<String, Map<String, String>>>emptyMap());
     }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
index 32a5358..39aee82 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
@@ -1414,8 +1414,8 @@ public class ClusterStackVersionResourceProviderTest {
     expect(cluster.getClusterId()).andReturn(1L).anyTimes();
     expect(cluster.getHosts()).andReturn(hostsForCluster.values()).atLeastOnce();
     expect(cluster.getServices()).andReturn(serviceMap).anyTimes();
-    expect(cluster.getServiceComponentHosts(anyObject(String.class))).andReturn(
-        serviceComponentHosts).anyTimes();
+    expect(cluster.getServiceComponentHosts(anyObject(String.class))).andReturn(serviceComponentHosts).anyTimes();
+    expect(cluster.getCurrentStackVersion()).andReturn(stackId).atLeastOnce();
 
     expect(repositoryVersionDAOMock.findByStackAndVersion(anyObject(StackId.class),
         anyObject(String.class))).andReturn(repoVersionEntity);

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterConfigurationRequestTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterConfigurationRequestTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterConfigurationRequestTest.java
index 8b08dc4..5535256 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterConfigurationRequestTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterConfigurationRequestTest.java
@@ -50,6 +50,8 @@ import org.apache.ambari.server.controller.internal.Stack;
 import org.apache.ambari.server.serveraction.kerberos.KerberosInvalidConfigurationException;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.ConfigHelper;
+import org.apache.ambari.server.state.StackId;
 import org.easymock.Capture;
 import org.easymock.CaptureType;
 import org.easymock.EasyMock;
@@ -103,6 +105,13 @@ public class ClusterConfigurationRequestTest {
   @Mock(type = MockType.NICE)
   private KerberosHelper kerberosHelper;
 
+  @Mock(type = MockType.NICE)
+  private ConfigHelper configHelper;
+
+  private final String STACK_NAME = "testStack";
+  private final String STACK_VERSION = "1";
+  private final Map<String, Map<String, String>> stackProperties = new HashMap<>();
+
   /**
    * testConfigType config type should be in updatedConfigTypes, as no custom property in Blueprint
    * ==> Kerberos config property should be updated
@@ -221,6 +230,8 @@ public class ClusterConfigurationRequestTest {
     expect(clusters.getCluster("testCluster")).andReturn(cluster).anyTimes();
 
     expect(blueprint.getStack()).andReturn(stack).anyTimes();
+    expect(stack.getName()).andReturn(STACK_NAME).anyTimes();
+    expect(stack.getVersion()).andReturn(STACK_VERSION).anyTimes();
     expect(stack.getServiceForConfigType("testConfigType")).andReturn("KERBEROS").anyTimes();
     expect(stack.getAllConfigurationTypes(anyString())).andReturn(Collections.singletonList("testConfigType")
     ).anyTimes();
@@ -246,6 +257,7 @@ public class ClusterConfigurationRequestTest {
     expect(blueprint.getComponents("KERBEROS")).andReturn(kerberosComponents).anyTimes();
     expect(blueprint.getComponents("ZOOKEPER")).andReturn(zookeeperComponents).anyTimes();
 
+    expect(topology.getAmbariContext()).andReturn(ambariContext).anyTimes();
     expect(topology.getConfigRecommendationStrategy()).andReturn(ConfigRecommendationStrategy.NEVER_APPLY).anyTimes();
     expect(topology.getBlueprint()).andReturn(blueprint).anyTimes();
     expect(blueprint.isValidConfigType("testConfigType")).andReturn(true).anyTimes();
@@ -256,10 +268,14 @@ public class ClusterConfigurationRequestTest {
     expect(topology.getHostGroupsForComponent(anyString())).andReturn(Collections.<String>emptyList())
       .anyTimes();
 
-      expect(ambariContext.getClusterName(Long.valueOf(1))).andReturn("testCluster").anyTimes();
+    expect(ambariContext.getConfigHelper()).andReturn(configHelper).anyTimes();
+    expect(ambariContext.getClusterName(Long.valueOf(1))).andReturn("testCluster").anyTimes();
     expect(ambariContext.createConfigurationRequests(EasyMock.<Map<String, Object>>anyObject())).andReturn(Collections
       .<ConfigurationRequest>emptyList()).anyTimes();
 
+    expect(configHelper.getDefaultStackProperties(
+        EasyMock.eq(new StackId(STACK_NAME, STACK_VERSION)))).andReturn(stackProperties).anyTimes();
+
     if (kerberosConfig == null) {
       kerberosConfig = new HashMap<>();
       Map<String, String> properties = new HashMap<>();
@@ -277,15 +293,14 @@ public class ClusterConfigurationRequestTest {
       (captureUpdatedConfigTypes));
     expectLastCall();
 
-    PowerMock.replay(stack, blueprint, topology, controller, clusters, kerberosHelper, ambariContext,
-      AmbariContext
-        .class);
+    PowerMock.replay(stack, blueprint, topology, controller, clusters, kerberosHelper,
+        ambariContext, AmbariContext.class, configHelper);
 
     ClusterConfigurationRequest clusterConfigurationRequest = new ClusterConfigurationRequest(
       ambariContext, topology, false, stackAdvisorBlueprintProcessor, true);
     clusterConfigurationRequest.process();
 
-    verify(blueprint, topology, ambariContext, controller, kerberosHelper);
+    verify(blueprint, topology, ambariContext, controller, kerberosHelper, configHelper);
 
 
     String clusterName = captureClusterName.getValue();
@@ -308,8 +323,9 @@ public class ClusterConfigurationRequestTest {
     expect(clusters.getCluster("testCluster")).andReturn(cluster).anyTimes();
 
     expect(blueprint.getStack()).andReturn(stack).anyTimes();
-    expect(stack.getAllConfigurationTypes(anyString())).andReturn(Collections.singletonList("testConfigType")
-    ).anyTimes();
+    expect(stack.getName()).andReturn(STACK_NAME).anyTimes();
+    expect(stack.getVersion()).andReturn(STACK_VERSION).anyTimes();
+    expect(stack.getAllConfigurationTypes(anyString())).andReturn(Collections.<String>singletonList("testConfigType")).anyTimes();
     expect(stack.getExcludedConfigurationTypes(anyString())).andReturn(Collections.<String>emptySet()).anyTimes();
     expect(stack.getConfigurationPropertiesWithMetadata(anyString(), anyString())).andReturn(Collections.<String,
       Stack.ConfigProperty>emptyMap()).anyTimes();
@@ -331,25 +347,29 @@ public class ClusterConfigurationRequestTest {
     expect(blueprint.getComponents("KERBEROS")).andReturn(kerberosComponents).anyTimes();
     expect(blueprint.getComponents("ZOOKEPER")).andReturn(zookeeperComponents).anyTimes();
 
+    expect(topology.getAmbariContext()).andReturn(ambariContext).anyTimes();
     expect(topology.getConfigRecommendationStrategy()).andReturn(ConfigRecommendationStrategy.NEVER_APPLY).anyTimes();
     expect(topology.getBlueprint()).andReturn(blueprint).anyTimes();
     expect(topology.getConfiguration()).andReturn(stackConfig).anyTimes();
     expect(topology.getHostGroupInfo()).andReturn(Collections.<String, HostGroupInfo>emptyMap()).anyTimes();
     expect(topology.getClusterId()).andReturn(Long.valueOf(1)).anyTimes();
+
+    expect(ambariContext.getConfigHelper()).andReturn(configHelper).anyTimes();
     expect(ambariContext.getClusterName(Long.valueOf(1))).andReturn("testCluster").anyTimes();
     expect(ambariContext.createConfigurationRequests(EasyMock.<Map<String, Object>>anyObject())).andReturn(Collections
       .<ConfigurationRequest>emptyList()).anyTimes();
 
+    expect(configHelper.getDefaultStackProperties(
+        EasyMock.eq(new StackId(STACK_NAME, STACK_VERSION)))).andReturn(stackProperties).anyTimes();
 
     PowerMock.replay(stack, blueprint, topology, controller, clusters, ambariContext,
-      AmbariContext
-        .class);
+        AmbariContext.class, configHelper);
 
     ClusterConfigurationRequest clusterConfigurationRequest = new ClusterConfigurationRequest(
       ambariContext, topology, false, stackAdvisorBlueprintProcessor);
     clusterConfigurationRequest.process();
 
-    verify(blueprint, topology, ambariContext, controller);
+    verify(blueprint, topology, ambariContext, controller, configHelper);
 
   }
 
@@ -365,6 +385,7 @@ public class ClusterConfigurationRequestTest {
     hg1.setConfiguration(createConfigurationsForHostGroup());
     hostGroupInfoMap.put("hg1", hg1);
 
+    expect(topology.getAmbariContext()).andReturn(ambariContext).anyTimes();
     expect(topology.getConfiguration()).andReturn(configuration).anyTimes();
     expect(topology.getBlueprint()).andReturn(blueprint).anyTimes();
     expect(topology.getHostGroupInfo()).andReturn(hostGroupInfoMap);
@@ -377,7 +398,12 @@ public class ClusterConfigurationRequestTest {
     expect(blueprint.isValidConfigType("cluster-env")).andReturn(true).anyTimes();
     expect(blueprint.isValidConfigType("global")).andReturn(true).anyTimes();
 
-    EasyMock.replay(stack, blueprint, topology);
+    expect(ambariContext.getConfigHelper()).andReturn(configHelper).anyTimes();
+
+    expect(configHelper.getDefaultStackProperties(
+        EasyMock.eq(new StackId(STACK_NAME, STACK_VERSION)))).andReturn(stackProperties).anyTimes();
+
+    EasyMock.replay(stack, blueprint, topology, ambariContext, configHelper);
     // WHEN
     new ClusterConfigurationRequest(ambariContext, topology, false, stackAdvisorBlueprintProcessor);
     // THEN
@@ -388,7 +414,7 @@ public class ClusterConfigurationRequestTest {
 
     assertFalse("SPARK service not present in topology host group config thus 'spark-env' config type should be removed from config.", hg1.getConfiguration().getFullAttributes().containsKey("spark-env"));
     assertTrue("HDFS service is present in topology host group config thus 'hdfs-site' config type should be left in the config.", hg1.getConfiguration().getFullAttributes().containsKey("hdfs-site"));
-    verify(stack, blueprint, topology);
+    verify(stack, blueprint, topology, ambariContext, configHelper);
   }
 
   @Test
@@ -409,6 +435,7 @@ public class ClusterConfigurationRequestTest {
     hg1.setConfiguration(createConfigurationsForHostGroup());
     hostGroupInfoMap.put("hg1", hg1);
 
+    expect(topology.getAmbariContext()).andReturn(ambariContext).anyTimes();
     expect(topology.getConfiguration()).andReturn(configuration).anyTimes();
     expect(topology.getBlueprint()).andReturn(blueprint).anyTimes();
     expect(topology.getHostGroupInfo()).andReturn(hostGroupInfoMap);
@@ -419,7 +446,12 @@ public class ClusterConfigurationRequestTest {
     expect(blueprint.isValidConfigType("cluster-env")).andReturn(true).anyTimes();
     expect(blueprint.isValidConfigType("global")).andReturn(true).anyTimes();
 
-    EasyMock.replay(stack, blueprint, topology);
+    expect(ambariContext.getConfigHelper()).andReturn(configHelper).anyTimes();
+
+    expect(configHelper.getDefaultStackProperties(
+        EasyMock.eq(new StackId(STACK_NAME, STACK_VERSION)))).andReturn(stackProperties).anyTimes();
+
+    EasyMock.replay(stack, blueprint, topology, ambariContext, configHelper);
 
     // When
 
@@ -431,7 +463,7 @@ public class ClusterConfigurationRequestTest {
 
     assertFalse("SPARK service not present in topology host group config thus 'spark-env' config type should be removed from config.", hg1.getConfiguration().getFullAttributes().containsKey("spark-env"));
     assertTrue("HDFS service is present in topology host group config thus 'hdfs-site' config type should be left in the config.", hg1.getConfiguration().getFullAttributes().containsKey("hdfs-site"));
-    verify(stack, blueprint, topology);
+    verify(stack, blueprint, topology, ambariContext, configHelper);
 
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/test/python/common-services/configs/hawq_default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/common-services/configs/hawq_default.json b/ambari-server/src/test/python/common-services/configs/hawq_default.json
index 79864a9..1b6fafb 100644
--- a/ambari-server/src/test/python/common-services/configs/hawq_default.json
+++ b/ambari-server/src/test/python/common-services/configs/hawq_default.json
@@ -73,7 +73,11 @@
         "cluster-env": {
             "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
-            "user_group": "hadoop"
+            "user_group": "hadoop",
+            "stack_name": "PHD",
+            "stack_root": "{\"PHD\": \"/usr/phd\"}",
+            "stack_tools": "{\n \"PHD\": { \"stack_selector\": [\"phd-select\", \"/usr/bin/phd-select\", \"phd-select\"],\n  \"conf_selector\": [\"conf-select\", \"/usr/bin/conf-select\", \"conf-select\"]\n}\n}",
+            "stack_features": "{\"PHD\":{\"stack_features\":[{\"name\":\"express_upgrade\",\"description\":\"Express upgrade support\",\"min_version\":\"3.0.0.0\"},{\"name\":\"rolling_upgrade\",\"description\":\"Rolling upgrade support\",\"min_version\":\"3.0.0.0\"},{\"name\":\"config_versioning\",\"description\":\"Configurable versions support\",\"min_version\":\"3.0.0.0\"}]\n}\n}"
         }
     },
     "clusterHostInfo": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/test/python/host_scripts/TestAlertDiskSpace.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/host_scripts/TestAlertDiskSpace.py b/ambari-server/src/test/python/host_scripts/TestAlertDiskSpace.py
index 0d47061..e6cce98 100644
--- a/ambari-server/src/test/python/host_scripts/TestAlertDiskSpace.py
+++ b/ambari-server/src/test/python/host_scripts/TestAlertDiskSpace.py
@@ -41,7 +41,11 @@ class TestAlertDiskSpace(RMFTestCase):
       total = 21673930752L, used = 5695861760L,
       free = 15978068992L, path="/")
 
-    res = alert_disk_space.execute(configurations={'{{cluster-env/stack_root}}': '/usr/hdp'})
+    configurations = {'{{cluster-env/stack_name}}': 'HDP',
+      '{{cluster-env/stack_root}}': '{"HDP":"/usr/hdp"}'}
+
+    res = alert_disk_space.execute(configurations=configurations)
+
     self.assertEqual(res,
       ('OK', ['Capacity Used: [26.28%, 5.7 GB], Capacity Total: [21.7 GB], path=/']))
 
@@ -50,7 +54,7 @@ class TestAlertDiskSpace(RMFTestCase):
       total = 21673930752L, used = 14521533603L,
       free = 7152397149L, path="/")
 
-    res = alert_disk_space.execute(configurations={'{{cluster-env/stack_root}}': '/usr/hdp'})
+    res = alert_disk_space.execute(configurations = configurations)
     self.assertEqual(res, (
       'WARNING',
       ['Capacity Used: [67.00%, 14.5 GB], Capacity Total: [21.7 GB], path=/']))
@@ -60,7 +64,7 @@ class TestAlertDiskSpace(RMFTestCase):
       total = 21673930752L, used = 20590234214L,
       free = 1083696538, path="/")
 
-    res = alert_disk_space.execute(configurations={'{{cluster-env/stack_root}}': '/usr/hdp'})
+    res = alert_disk_space.execute(configurations = configurations)
     self.assertEqual(res, ('CRITICAL',
     ['Capacity Used: [95.00%, 20.6 GB], Capacity Total: [21.7 GB], path=/']))
 
@@ -69,7 +73,7 @@ class TestAlertDiskSpace(RMFTestCase):
       total = 5418482688L, used = 1625544806L,
       free = 3792937882L, path="/")
 
-    res = alert_disk_space.execute(configurations={'{{cluster-env/stack_root}}': '/usr/hdp'})
+    res = alert_disk_space.execute(configurations = configurations)
     self.assertEqual(res, ('WARNING', [
       'Capacity Used: [30.00%, 1.6 GB], Capacity Total: [5.4 GB], path=/. Total free space is less than 5.0 GB']))
 
@@ -81,7 +85,7 @@ class TestAlertDiskSpace(RMFTestCase):
       total = 21673930752L, used = 5695861760L,
       free = 15978068992L, path="/usr/hdp")
 
-    res = alert_disk_space.execute(configurations={'{{cluster-env/stack_root}}': '/usr/hdp'})
+    res = alert_disk_space.execute(configurations = configurations)
     self.assertEqual(res,
       ('OK', ['Capacity Used: [26.28%, 5.7 GB], Capacity Total: [21.7 GB], path=/usr/hdp']))
 
@@ -90,6 +94,6 @@ class TestAlertDiskSpace(RMFTestCase):
       total = 5418482688L, used = 1625544806L,
       free = 3792937882L, path="/usr/hdp")
 
-    res = alert_disk_space.execute(configurations={'{{cluster-env/stack_root}}': '/usr/hdp'})
+    res = alert_disk_space.execute(configurations = configurations)
     self.assertEqual(res, (
       'WARNING', ["Capacity Used: [30.00%, 1.6 GB], Capacity Total: [5.4 GB], path=/usr/hdp. Total free space is less than 5.0 GB"]))