You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by am...@apache.org on 2018/01/05 07:56:23 UTC

[27/45] ambari git commit: AMBARI-22718. Cannot set security.inter.broker.protocol: SASL_SSL via Blueprint with Kerberos

AMBARI-22718. Cannot set security.inter.broker.protocol: SASL_SSL via Blueprint with Kerberos


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/13056cf2
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/13056cf2
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/13056cf2

Branch: refs/heads/branch-feature-AMBARI-22008-isilon
Commit: 13056cf2b0bf4ff20192860e2bfee556c1c57b4a
Parents: a85ff23
Author: Doroszlai, Attila <ad...@hortonworks.com>
Authored: Wed Jan 3 09:30:33 2018 +0100
Committer: Doroszlai, Attila <ad...@hortonworks.com>
Committed: Thu Jan 4 12:53:04 2018 +0100

----------------------------------------------------------------------
 .../KAFKA/0.8.1/package/scripts/params.py       |   6 +-
 .../2.6/KAFKA/test_kafka_broker_other_sasl.py   |  16 +
 .../2.6/configs/secure_kafka_sasl_ssl.json      | 632 +++++++++++++++++++
 3 files changed, 652 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/13056cf2/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py
index 46fdfba..dc42b93 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py
@@ -160,8 +160,10 @@ if has_metric_collector:
 kerberos_security_enabled = config['configurations']['cluster-env']['security_enabled']
 
 kafka_kerberos_enabled = (('security.inter.broker.protocol' in config['configurations']['kafka-broker']) and
-                         ((config['configurations']['kafka-broker']['security.inter.broker.protocol'] == "PLAINTEXTSASL") or
-                          (config['configurations']['kafka-broker']['security.inter.broker.protocol'] == "SASL_PLAINTEXT")))
+                         ((config['configurations']['kafka-broker']['security.inter.broker.protocol'] in ("PLAINTEXTSASL", "SASL_PLAINTEXT")) or
+                         ((config['configurations']['kafka-broker']['security.inter.broker.protocol'] == "SASL_SSL") and
+                           check_stack_feature(StackFeature.KAFKA_EXTENDED_SASL_SUPPORT, stack_version_formatted))))
+
 
 kafka_other_sasl_enabled = not kerberos_security_enabled and check_stack_feature(StackFeature.KAFKA_LISTENERS, stack_version_formatted) and \
                           check_stack_feature(StackFeature.KAFKA_EXTENDED_SASL_SUPPORT, stack_version_formatted) and \

http://git-wip-us.apache.org/repos/asf/ambari/blob/13056cf2/ambari-server/src/test/python/stacks/2.6/KAFKA/test_kafka_broker_other_sasl.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.6/KAFKA/test_kafka_broker_other_sasl.py b/ambari-server/src/test/python/stacks/2.6/KAFKA/test_kafka_broker_other_sasl.py
index e0bab40..a0eab8e 100644
--- a/ambari-server/src/test/python/stacks/2.6/KAFKA/test_kafka_broker_other_sasl.py
+++ b/ambari-server/src/test/python/stacks/2.6/KAFKA/test_kafka_broker_other_sasl.py
@@ -75,6 +75,22 @@ class TestKafkaBroker(RMFTestCase):
                                                owner = 'kafka')
 
 
+    def test_configure_sasl_ssl_kerberos(self):
+        self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/kafka_broker.py",
+                           classname = "KafkaBroker",
+                           command = "configure",
+                           config_file="secure_kafka_sasl_ssl.json",
+                           stack_version = self.STACK_VERSION,
+                           target = RMFTestCase.TARGET_COMMON_SERVICES
+                           )
+
+        self.assertResourceCalledIgnoreEarlier('TemplateConfig', '/usr/hdp/current/kafka-broker/config/kafka_jaas.conf',
+                                               owner = 'kafka')
+
+        self.assertResourceCalledIgnoreEarlier('TemplateConfig', '/usr/hdp/current/kafka-broker/config/kafka_client_jaas.conf',
+                                               owner = 'kafka')
+
+
     def test_configure_plaintext(self):
         self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/kafka_broker.py",
                            classname = "KafkaBroker",

http://git-wip-us.apache.org/repos/asf/ambari/blob/13056cf2/ambari-server/src/test/python/stacks/2.6/configs/secure_kafka_sasl_ssl.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.6/configs/secure_kafka_sasl_ssl.json b/ambari-server/src/test/python/stacks/2.6/configs/secure_kafka_sasl_ssl.json
new file mode 100644
index 0000000..cb5b257
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.6/configs/secure_kafka_sasl_ssl.json
@@ -0,0 +1,632 @@
+{
+  "roleCommand": "SERVICE_CHECK",
+  "clusterName": "c1",
+  "hostname": "c6401.ambari.apache.org",
+  "hostLevelParams": {
+    "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
+    "agent_stack_retry_count": "5",
+    "agent_stack_retry_on_unavailability": "false",
+    "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
+    "ambari_db_rca_password": "mapred",
+    "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
+    "jce_name": "UnlimitedJCEPolicyJDK7.zip",
+    "stack_version": "2.6.5.0",
+    "stack_name": "HDP",
+    "ambari_db_rca_driver": "org.postgresql.Driver",
+    "jdk_name": "jdk-7u67-linux-x64.tar.gz",
+    "ambari_db_rca_username": "mapred",
+    "java_home": "/usr/jdk64/jdk1.7.0_45",
+    "db_name": "ambari",
+    "custom_mysql_jdbc_name": "mysql-connector-java.jar"
+  },
+  "commandType": "EXECUTION_COMMAND",
+  "roleParams": {},
+  "serviceName": "SLIDER",
+  "role": "SLIDER",
+  "commandParams": {
+    "version": "2.6.5.0-1235",
+    "command_timeout": "300",
+    "service_package_folder": "OOZIE",
+    "script_type": "PYTHON",
+    "script": "scripts/service_check.py",
+    "excluded_hosts": "host1,host2"
+  },
+  "taskId": 152,
+  "public_hostname": "c6401.ambari.apache.org",
+  "configurations": {
+    "slider-client": {
+      "slider.yarn.queue": "default"
+    },
+    "sqoop-site": {
+      "atlas.cluster.name": "c1",
+      "sqoop.job.data.publish.class": "org.apache.atlas.sqoop.hook.SqoopHook"
+    },
+    "mahout-env": {
+      "mahout_user": "mahout"
+    },
+    "hbase-env": {
+      "hbase_user": "hbase"
+    },
+    "yarn-env": {
+      "yarn_user": "yarn"
+    },
+    "mahout-log4j": {
+      "content": "\n            #\n            #\n            # Licensed to the Apache Software Foundation (ASF) under one\n            # or more contributor license agreements.  See the NOTICE file\n            # distributed with this work for additional information\n            # regarding copyright ownership.  The ASF licenses this file\n            # to you under the Apache License, Version 2.0 (the\n            # \"License\"); you may not use this file except in compliance\n            # with the License.  You may obtain a copy of the License at\n            #\n            #   http://www.apache.org/licenses/LICENSE-2.0\n            #\n            # Unless required by applicable law or agreed to in writing,\n            # software distributed under the License is distributed on an\n            # \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n            # KIND, either express or implied.  See the License for the\n            # specific language governing permissions a
 nd limitations\n            # under the License.\n            #\n            #\n            #\n\n            # Set everything to be logged to the console\n            log4j.rootCategory=WARN, console\n            log4j.appender.console=org.apache.log4j.ConsoleAppender\n            log4j.appender.console.target=System.err\n            log4j.appender.console.layout=org.apache.log4j.PatternLayout\n            log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n\n\n            # Settings to quiet third party logs that are too verbose\n            log4j.logger.org.eclipse.jetty=WARN\n            log4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=WARN\n            log4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=WARN"
+    },
+    "hadoop-env": {
+      "hdfs_user": "hdfs",
+      "hdfs_tmp_dir": "/tmp"
+    },
+    "core-site": {
+      "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020"
+    },
+    "hdfs-site": {
+      "a": "b"
+    },
+    "yarn-site": {
+      "yarn.application.classpath": "/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*",
+      "yarn.resourcemanager.address": "c6401.ambari.apache.org:8050",
+      "yarn.resourcemanager.scheduler.address": "c6401.ambari.apache.org:8030"
+    },
+    "cluster-env": {
+      "managed_hdfs_resource_property_names": "",
+      "security_enabled": "true",
+      "ignore_groupsusers_create": "false",
+      "smokeuser": "ambari-qa",
+      "kerberos_domain": "EXAMPLE.COM",
+      "user_group": "hadoop"
+    },
+    "webhcat-site": {
+      "templeton.jar": "/usr/hdp/current/hive-webhcat/share/webhcat/svr/lib/hive-webhcat-*.jar",
+      "templeton.pig.archive": "hdfs:///hdp/apps/{{ hdp_stack_version }}/pig/pig.tar.gz",
+      "templeton.hive.archive": "hdfs:///hdp/apps/{{ hdp_stack_version }}/hive/hive.tar.gz",
+      "templeton.sqoop.archive": "hdfs:///hdp/apps/{{ hdp_stack_version }}/sqoop/sqoop.tar.gz",
+      "templeton.streaming.jar": "hdfs:///hdp/apps/{{ hdp_stack_version }}/mr/hadoop-streaming.jar"
+    },
+    "slider-log4j": {
+      "content": "log4jproperties\nline2"
+    },
+    "slider-env": {
+      "content": "envproperties\nline2"
+    },
+    "gateway-site": {
+      "java.security.auth.login.config": "/etc/knox/conf/krb5JAASLogin.conf",
+      "gateway.hadoop.kerberos.secured": "false",
+      "gateway.gateway.conf.dir": "deployments",
+      "gateway.path": "gateway",
+      "sun.security.krb5.debug": "true",
+      "java.security.krb5.conf": "/etc/knox/conf/krb5.conf",
+      "gateway.port": "8443"
+    },
+
+    "users-ldif": {
+      "content": "\n            # Licensed to the Apache Software Foundation (ASF) under one\n            # or more contributor license agreements.  See the NOTICE file\n            # distributed with this work for additional information\n            # regarding copyright ownership.  The ASF licenses this file\n            # to you under the Apache License, Version 2.0 (the\n            # \"License\"); you may not use this file except in compliance\n            # with the License.  You may obtain a copy of the License at\n            #\n            #     http://www.apache.org/licenses/LICENSE-2.0\n            #\n            # Unless required by applicable law or agreed to in writing, software\n            # distributed under the License is distributed on an \"AS IS\" BASIS,\n            # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n            # See the License for the specific language governing permissions and\n            # limitations under the Lice
 nse.\n\n            version: 1\n\n            # Please replace with site specific values\n            dn: dc=hadoop,dc=apache,dc=org\n            objectclass: organization\n            objectclass: dcObject\n            o: Hadoop\n            dc: hadoop\n\n            # Entry for a sample people container\n            # Please replace with site specific values\n            dn: ou=people,dc=hadoop,dc=apache,dc=org\n            objectclass:top\n            objectclass:organizationalUnit\n            ou: people\n\n            # Entry for a sample end user\n            # Please replace with site specific values\n            dn: uid=guest,ou=people,dc=hadoop,dc=apache,dc=org\n            objectclass:top\n            objectclass:person\n            objectclass:organizationalPerson\n            objectclass:inetOrgPerson\n            cn: Guest\n            sn: User\n            uid: guest\n            userPassword:guest-password\n\n            # entry for sample user admin\n            dn: 
 uid=admin,ou=people,dc=hadoop,dc=apache,dc=org\n            objectclass:top\n            objectclass:person\n            objectclass:organizationalPerson\n            objectclass:inetOrgPerson\n            cn: Admin\n            sn: Admin\n            uid: admin\n            userPassword:admin-password\n\n            # entry for sample user sam\n            dn: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org\n            objectclass:top\n            objectclass:person\n            objectclass:organizationalPerson\n            objectclass:inetOrgPerson\n            cn: sam\n            sn: sam\n            uid: sam\n            userPassword:sam-password\n\n            # entry for sample user tom\n            dn: uid=tom,ou=people,dc=hadoop,dc=apache,dc=org\n            objectclass:top\n            objectclass:person\n            objectclass:organizationalPerson\n            objectclass:inetOrgPerson\n            cn: tom\n            sn: tom\n            uid: tom\n            userPasswor
 d:tom-password\n\n            # create FIRST Level groups branch\n            dn: ou=groups,dc=hadoop,dc=apache,dc=org\n            objectclass:top\n            objectclass:organizationalUnit\n            ou: groups\n            description: generic groups branch\n\n            # create the analyst group under groups\n            dn: cn=analyst,ou=groups,dc=hadoop,dc=apache,dc=org\n            objectclass:top\n            objectclass: groupofnames\n            cn: analyst\n            description:analyst  group\n            member: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org\n            member: uid=tom,ou=people,dc=hadoop,dc=apache,dc=org\n\n\n            # create the scientist group under groups\n            dn: cn=scientist,ou=groups,dc=hadoop,dc=apache,dc=org\n            objectclass:top\n            objectclass: groupofnames\n            cn: scientist\n            description: scientist group\n            member: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org"
+    },
+
+    "topology": {
+      "content": "\n        <topology>\n\n            <gateway>\n\n                <provider>\n                    <role>authentication</role>\n                    <name>ShiroProvider</name>\n                    <enabled>true</enabled>\n                    <param>\n                        <name>sessionTimeout</name>\n                        <value>30</value>\n                    </param>\n                    <param>\n                        <name>main.ldapRealm</name>\n                        <value>org.apache.hadoop.gateway.shirorealm.KnoxLdapRealm</value>\n                    </param>\n                    <param>\n                        <name>main.ldapRealm.userDnTemplate</name>\n                        <value>uid={0},ou=people,dc=hadoop,dc=apache,dc=org</value>\n                    </param>\n                    <param>\n                        <name>main.ldapRealm.contextFactory.url</name>\n                        <value>ldap://{{knox_host_name}}:33389</value>\n                 
    </param>\n                    <param>\n                        <name>main.ldapRealm.contextFactory.authenticationMechanism</name>\n                        <value>simple</value>\n                    </param>\n                    <param>\n                        <name>urls./**</name>\n                        <value>authcBasic</value>\n                    </param>\n                </provider>\n\n                <provider>\n                    <role>identity-assertion</role>\n                    <name>Default</name>\n                    <enabled>true</enabled>\n                </provider>\n\n            </gateway>\n\n            <service>\n                <role>NAMENODE</role>\n                <url>hdfs://{{namenode_host}}:{{namenode_rpc_port}}</url>\n            </service>\n\n            <service>\n                <role>JOBTRACKER</role>\n                <url>rpc://{{rm_host}}:{{jt_rpc_port}}</url>\n            </service>\n\n            <service>\n                <role>WEBHDFS</role
 >\n                <url>http://{{namenode_host}}:{{namenode_http_port}}/webhdfs</url>\n            </service>\n\n            <service>\n                <role>WEBHCAT</role>\n                <url>http://{{webhcat_server_host}}:{{templeton_port}}/templeton</url>\n            </service>\n\n            <service>\n                <role>OOZIE</role>\n                <url>http://{{oozie_server_host}}:{{oozie_server_port}}/oozie</url>\n            </service>\n\n            <service>\n                <role>WEBHBASE</role>\n                <url>http://{{hbase_master_host}}:{{hbase_master_port}}</url>\n            </service>\n\n            <service>\n                <role>HIVE</role>\n                <url>http://{{hive_server_host}}:{{hive_http_port}}/{{hive_http_path}}</url>\n            </service>\n\n            <service>\n                <role>RESOURCEMANAGER</role>\n                <url>http://{{rm_host}}:{{rm_port}}/ws</url>\n            </service>\n        </topology>"
+    },
+
+    "ldap-log4j": {
+      "content": "\n        # Licensed to the Apache Software Foundation (ASF) under one\n        # or more contributor license agreements.  See the NOTICE file\n        # distributed with this work for additional information\n        # regarding copyright ownership.  The ASF licenses this file\n        # to you under the Apache License, Version 2.0 (the\n        # \"License\"); you may not use this file except in compliance\n        # with the License.  You may obtain a copy of the License at\n        #\n        #     http://www.apache.org/licenses/LICENSE-2.0\n        #\n        # Unless required by applicable law or agreed to in writing, software\n        # distributed under the License is distributed on an \"AS IS\" BASIS,\n        # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n        # See the License for the specific language governing permissions and\n        # limitations under the License.\n        #testing\n\n        app.log.dir=${launcher.dir
 }/../logs\n        app.log.file=${launcher.name}.log\n\n        log4j.rootLogger=ERROR, drfa\n        log4j.logger.org.apache.directory.server.ldap.LdapServer=INFO\n        log4j.logger.org.apache.directory=WARN\n\n        log4j.appender.stdout=org.apache.log4j.ConsoleAppender\n        log4j.appender.stdout.layout=org.apache.log4j.PatternLayout\n        log4j.appender.stdout.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n        log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender\n        log4j.appender.drfa.File=${app.log.dir}/${app.log.file}\n        log4j.appender.drfa.DatePattern=.yyyy-MM-dd\n        log4j.appender.drfa.layout=org.apache.log4j.PatternLayout\n        log4j.appender.drfa.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n"
+    },
+
+    "gateway-log4j": {
+      "content": "\n\n      # Licensed to the Apache Software Foundation (ASF) under one\n      # or more contributor license agreements. See the NOTICE file\n      # distributed with this work for additional information\n      # regarding copyright ownership. The ASF licenses this file\n      # to you under the Apache License, Version 2.0 (the\n      # \"License\"); you may not use this file except in compliance\n      # with the License. You may obtain a copy of the License at\n      #\n      # http://www.apache.org/licenses/LICENSE-2.0\n      #\n      # Unless required by applicable law or agreed to in writing, software\n      # distributed under the License is distributed on an \"AS IS\" BASIS,\n      # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n      # See the License for the specific language governing permissions and\n      # limitations under the License.\n\n      app.log.dir=${launcher.dir}/../logs\n      app.log.file=${launcher.name}.log\n   
    app.audit.file=${launcher.name}-audit.log\n\n      log4j.rootLogger=ERROR, drfa\n\n      log4j.logger.org.apache.hadoop.gateway=INFO\n      #log4j.logger.org.apache.hadoop.gateway=DEBUG\n\n      #log4j.logger.org.eclipse.jetty=DEBUG\n      #log4j.logger.org.apache.shiro=DEBUG\n      #log4j.logger.org.apache.http=DEBUG\n      #log4j.logger.org.apache.http.client=DEBUG\n      #log4j.logger.org.apache.http.headers=DEBUG\n      #log4j.logger.org.apache.http.wire=DEBUG\n\n      log4j.appender.stdout=org.apache.log4j.ConsoleAppender\n      log4j.appender.stdout.layout=org.apache.log4j.PatternLayout\n      log4j.appender.stdout.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n      log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender\n      log4j.appender.drfa.File=${app.log.dir}/${app.log.file}\n      log4j.appender.drfa.DatePattern=.yyyy-MM-dd\n      log4j.appender.drfa.layout=org.apache.log4j.PatternLayout\n      log4j.appender.drfa.layout.ConversionPattern=%
 d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n      log4j.logger.audit=INFO, auditfile\n      log4j.appender.auditfile=org.apache.log4j.DailyRollingFileAppender\n      log4j.appender.auditfile.File=${app.log.dir}/${app.audit.file}\n      log4j.appender.auditfile.Append = true\n      log4j.appender.auditfile.DatePattern = '.'yyyy-MM-dd\n      log4j.appender.auditfile.layout = org.apache.hadoop.gateway.audit.log4j.layout.AuditLayout"
+    },
+    "knox-env": {
+      "knox_master_secret": "sa",
+      "knox_group": "knox",
+      "knox_pid_dir": "/var/run/knox",
+      "knox_user": "knox"
+    },
+    "kafka-env": {
+      "kafka_user_nproc_limit": "65536",
+      "content": "\n#!/bin/bash\n\n# Set KAFKA specific environment variables here.\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}\nexport PATH=$PATH:$JAVA_HOME/bin\nexport PID_DIR={{kafka_pid_dir}}\nexport LOG_DIR={{kafka_log_dir}}\n{% if kerberos_security_enabled or kafka_other_sasl_enabled %}\nexport KAFKA_KERBEROS_PARAMS=\"-Djavax.security.auth.useSubjectCredsOnly=false {{kafka_kerberos_params}}\"\n{% else %}\nexport KAFKA_KERBEROS_PARAMS={{kafka_kerberos_params}}\n{% endif %}\n# Add kafka sink to classpath and related depenencies\nif [ -e \"/usr/lib/ambari-metrics-kafka-sink/ambari-metrics-kafka-sink.jar\" ]; then\n  export CLASSPATH=$CLASSPATH:/usr/lib/ambari-metrics-kafka-sink/ambari-metrics-kafka-sink.jar\n  export CLASSPATH=$CLASSPATH:/usr/lib/ambari-metrics-kafka-sink/lib/*\nfi\nif [ -f /etc/kafka/conf/kafka-ranger-env.sh ]; then\n. /etc/kafka/conf/kafka-ranger-env.sh\nfi",
+      "kafka_log_dir": "/var/log/kafka",
+      "kafka_pid_dir": "/var/run/kafka",
+      "kafka_user_nofile_limit": "128000",
+      "is_supported_kafka_ranger": "true",
+      "kafka_user": "kafka"
+    },
+    "kafka-log4j": {
+      "kafka_log_maxbackupindex": "20",
+      "content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\nkafka.logs.dir=logs\n\nlog4j.rootLogger=INFO, stdout\n\nlog4j.appender.stdout=org.apache.log4j.ConsoleAppender\nlog4j.appender.stdout.layout=org.apache.log4j
 .PatternLayout\nlog4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n\n\nlog4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log\nlog4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n\nlog4j.appender.kafkaAppender.MaxFileSize = {{kafka_log_maxfilesize}}MB\nlog4j.appender.kafkaAppender.MaxBackupIndex = {{kafka_log_maxbackupindex}}\n\nlog4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log\nlog4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n\n\nlog4j.appender.requestAppender=org.apache.log4j.Daily
 RollingFileAppender\nlog4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log\nlog4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n\n\nlog4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log\nlog4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n\n\nlog4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log\nlog4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.controllerAppender.layout.ConversionPatt
 ern=[%d] %p %m (%c)%n\nlog4j.appender.controllerAppender.MaxFileSize = {{controller_log_maxfilesize}}MB\nlog4j.appender.controllerAppender.MaxBackupIndex = {{controller_log_maxbackupindex}}\n# Turn on all our debugging info\n#log4j.logger.kafka.producer.async.DefaultEventHandler=DEBUG, kafkaAppender\n#log4j.logger.kafka.client.ClientUtils=DEBUG, kafkaAppender\n#log4j.logger.kafka.perf=DEBUG, kafkaAppender\n#log4j.logger.kafka.perf.ProducerPerformance$ProducerThread=DEBUG, kafkaAppender\n#log4j.logger.org.I0Itec.zkclient.ZkClient=DEBUG\nlog4j.logger.kafka=INFO, kafkaAppender\nlog4j.logger.kafka.network.RequestChannel$=WARN, requestAppender\nlog4j.additivity.kafka.network.RequestChannel$=false\n\n#log4j.logger.kafka.network.Processor=TRACE, requestAppender\n#log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender\n#log4j.additivity.kafka.server.KafkaApis=false\nlog4j.logger.kafka.request.logger=WARN, requestAppender\nlog4j.additivity.kafka.request.logger=false\n\nlog4j.logger.kafka
 .controller=TRACE, controllerAppender\nlog4j.additivity.kafka.controller=false\n\nlog4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender\nlog4j.additivity.kafka.log.LogCleaner=false\n\nlog4j.logger.state.change.logger=TRACE, stateChangeAppender\nlog4j.additivity.state.change.logger=false",
+      "kafka_log_maxfilesize": "256",
+      "controller_log_maxfilesize": "256",
+      "controller_log_maxbackupindex": "20"
+    },
+    "kafka-broker": {
+      "auto.leader.rebalance.enable": "true",
+      "kafka.ganglia.metrics.port": "8671",
+      "socket.send.buffer.bytes": "102400",
+      "num.network.threads": "3",
+      "log.segment.bytes": "1073741824",
+      "kafka.ganglia.metrics.host": "localhost",
+      "kafka.timeline.metrics.maxRowCacheSize": "10000",
+      "replica.lag.time.max.ms": "10000",
+      "num.io.threads": "8",
+      "offsets.retention.minutes": "86400000",
+      "fetch.purgatory.purge.interval.requests": "10000",
+      "offsets.topic.compression.codec": "0",
+      "default.replication.factor": "1",
+      "port": "6667",
+      "num.recovery.threads.per.data.dir": "1",
+      "kafka.metrics.reporters": "org.apache.hadoop.metrics2.sink.kafka.KafkaTimelineMetricsReporter",
+      "log.retention.bytes": "-1",
+      "producer.purgatory.purge.interval.requests": "10000",
+      "replica.lag.max.messages": "4000",
+      "replica.high.watermark.checkpoint.interval.ms": "5000",
+      "zookeeper.connect": "ydavis-kafka-ambari-upgrade-1.openstacklocal:2181",
+      "controlled.shutdown.retry.backoff.ms": "5000",
+      "sasl.enabled.mechanisms": "GSSAPI",
+      "kafka.timeline.metrics.reporter.sendInterval": "5900",
+      "num.partitions": "1",
+      "offsets.topic.segment.bytes": "104857600",
+      "replica.fetch.min.bytes": "1",
+      "zookeeper.sync.time.ms": "2000",
+      "offset.metadata.max.bytes": "4096",
+      "kafka.timeline.metrics.reporter.enabled": "true",
+      "kafka.timeline.metrics.truststore.type": "{{metric_truststore_type}}",
+      "controlled.shutdown.max.retries": "3",
+      "leader.imbalance.per.broker.percentage": "10",
+      "min.insync.replicas": "1",
+      "offsets.commit.required.acks": "-1",
+      "replica.fetch.wait.max.ms": "500",
+      "controlled.shutdown.enable": "true",
+      "log.roll.hours": "168",
+      "log.cleanup.interval.mins": "10",
+      "replica.socket.receive.buffer.bytes": "65536",
+      "kafka.ganglia.metrics.reporter.enabled": "true",
+      "kafka.timeline.metrics.truststore.path": "{{metric_truststore_path}}",
+      "zookeeper.connection.timeout.ms": "25000",
+      "delete.topic.enable": "false",
+      "offsets.load.buffer.size": "5242880",
+      "num.replica.fetchers": "1",
+      "socket.request.max.bytes": "104857600",
+      "message.max.bytes": "1000000",
+      "controller.message.queue.size": "10",
+      "kafka.ganglia.metrics.group": "kafka",
+      "compression.type": "producer",
+      "queued.max.requests": "500",
+      "log.index.interval.bytes": "4096",
+      "replica.fetch.max.bytes": "1048576",
+      "offsets.topic.num.partitions": "50",
+      "socket.receive.buffer.bytes": "102400",
+      "kafka.timeline.metrics.port": "{{metric_collector_port}}",
+      "offsets.commit.timeout.ms": "5000",
+      "offsets.topic.replication.factor": "3",
+      "external.kafka.metrics.include.prefix": "kafka.network.RequestMetrics.ResponseQueueTimeMs.request.OffsetCommit.98percentile,kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Offsets.95percentile,kafka.network.RequestMetrics.ResponseSendTimeMs.request.Fetch.95percentile,kafka.network.RequestMetrics.RequestsPerSec.request",
+      "offsets.retention.check.interval.ms": "600000",
+      "log.index.size.max.bytes": "10485760",
+      "log.dirs": "/kafka-logs",
+      "listeners": "SASL_PLAINTEXT://localhost:6667,SASL_SSL://localhost:6668",
+      "security.inter.broker.protocol": "SASL_SSL",
+      "kafka.timeline.metrics.hosts": "{{ams_collector_hosts}}",
+      "controller.socket.timeout.ms": "30000",
+      "replica.socket.timeout.ms": "30000",
+      "zookeeper.session.timeout.ms": "30000",
+      "auto.create.topics.enable": "true",
+      "kafka.timeline.metrics.truststore.password": "{{metric_truststore_password}}",
+      "external.kafka.metrics.exclude.prefix": "kafka.network.RequestMetrics,kafka.server.DelayedOperationPurgatory,kafka.server.BrokerTopicMetrics.BytesRejectedPerSec,kafka.server.KafkaServer.ClusterId",
+      "leader.imbalance.check.interval.seconds": "300",
+      "sasl.mechanism.inter.broker.protocol": "GSSAPI",
+      "log.retention.hours": "168",
+      "kafka.timeline.metrics.protocol": "{{metric_collector_protocol}}"
+    },
+    "spark-defaults": {
+      "spark.yarn.applicationMaster.waitTries": "10",
+      "spark.history.kerberos.keytab": "none",
+      "spark.yarn.preserve.staging.files": "false",
+      "spark.yarn.submit.file.replication": "3",
+      "spark.history.kerberos.principal": "none",
+      "spark.yarn.driver.memoryOverhead": "384",
+      "spark.yarn.queue": "default",
+      "spark.yarn.containerLauncherMaxThreads": "25",
+      "spark.yarn.scheduler.heartbeat.interval-ms": "5000",
+      "spark.history.ui.port": "18080",
+      "spark.yarn.max.executor.failures": "3",
+      "spark.driver.extraJavaOptions": "",
+      "spark.history.provider": "org.apache.spark.deploy.yarn.history.YarnHistoryProvider",
+      "spark.yarn.am.extraJavaOptions": "",
+      "spark.yarn.executor.memoryOverhead": "384"
+    },
+    "spark-javaopts-properties": {
+      "content": " "
+    },
+    "spark-log4j-properties": {
+      "content": "\n# Set everything to be logged to the console\nlog4j.rootCategory=INFO, console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n\n\n# Settings to quiet third party logs that are too verbose\nlog4j.logger.org.eclipse.jetty=WARN\nlog4j.logger.org.eclipse.jetty.util.component.AbstractLifeCycle=ERROR\nlog4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO\nlog4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO"
+    },
+    "spark-env": {
+      "content": "\n#!/usr/bin/env bash\n\n# This file is sourced when running various Spark programs.\n# Copy it as spark-env.sh and edit that to configure Spark for your site.\n\n# Options read in YARN client mode\n#SPARK_EXECUTOR_INSTANCES=\"2\" #Number of workers to start (Default: 2)\n#SPARK_EXECUTOR_CORES=\"1\" #Number of cores for the workers (Default: 1).\n#SPARK_EXECUTOR_MEMORY=\"1G\" #Memory per Worker (e.g. 1000M, 2G) (Default: 1G)\n#SPARK_DRIVER_MEMORY=\"512 Mb\" #Memory for Master (e.g. 1000M, 2G) (Default: 512 Mb)\n#SPARK_YARN_APP_NAME=\"spark\" #The name of your application (Default: Spark)\n#SPARK_YARN_QUEUE=\"~@~Xdefault~@~Y\" #The hadoop queue to use for allocation requests (Default: @~Xdefault~@~Y)\n#SPARK_YARN_DIST_FILES=\"\" #Comma separated list of files to be distributed with the job.\n#SPARK_YARN_DIST_ARCHIVES=\"\" #Comma separated list of archives to be distributed with the job.\n\n# Generic options for the daemons used in the standalone deploy mode\n\n# Alt
 ernate conf dir. (Default: ${SPARK_HOME}/conf)\nexport SPARK_CONF_DIR=${SPARK_HOME:-{{spark_home}}}/conf\n\n# Where log files are stored.(Default:${SPARK_HOME}/logs)\n#export SPARK_LOG_DIR=${SPARK_HOME:-{{spark_home}}}/logs\nexport SPARK_LOG_DIR={{spark_log_dir}}\n\n# Where the pid file is stored. (Default: /tmp)\nexport SPARK_PID_DIR={{spark_pid_dir}}\n\n# A string representing this instance of spark.(Default: $USER)\nSPARK_IDENT_STRING=$USER\n\n# The scheduling priority for daemons. (Default: 0)\nSPARK_NICENESS=0\n\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\nexport HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n# The java implementation to use.\nexport JAVA_HOME={{java_home}}\n\nif [ -d \"/etc/tez/conf/\" ]; then\n  export TEZ_CONF_DIR=/etc/tez/conf\nelse\n  export TEZ_CONF_DIR=\nfi",
+      "spark_pid_dir": "/var/run/spark",
+      "spark_log_dir": "/var/log/spark",
+      "spark_group": "spark",
+      "spark_user": "spark"
+    },
+    "spark2-env": {
+      "content": "\n#!/usr/bin/env bash\n\n# This file is sourced when running various Spark programs.\n# Copy it as spark-env.sh and edit that to configure Spark for your site.\n\n# Options read in YARN client mode\n#SPARK_EXECUTOR_INSTANCES=\"2\" #Number of workers to start (Default: 2)\n#SPARK_EXECUTOR_CORES=\"1\" #Number of cores for the workers (Default: 1).\n#SPARK_EXECUTOR_MEMORY=\"1G\" #Memory per Worker (e.g. 1000M, 2G) (Default: 1G)\n#SPARK_DRIVER_MEMORY=\"512 Mb\" #Memory for Master (e.g. 1000M, 2G) (Default: 512 Mb)\n#SPARK_YARN_APP_NAME=\"spark\" #The name of your application (Default: Spark)\n#SPARK_YARN_QUEUE=\"~@~Xdefault~@~Y\" #The hadoop queue to use for allocation requests (Default: @~Xdefault~@~Y)\n#SPARK_YARN_DIST_FILES=\"\" #Comma separated list of files to be distributed with the job.\n#SPARK_YARN_DIST_ARCHIVES=\"\" #Comma separated list of archives to be distributed with the job.\n\n# Generic options for the daemons used in the standalone deploy mode\n\n# Alt
 ernate conf dir. (Default: ${SPARK_HOME}/conf)\nexport SPARK_CONF_DIR=${SPARK_HOME:-{{spark_home}}}/conf\n\n# Where log files are stored.(Default:${SPARK_HOME}/logs)\n#export SPARK_LOG_DIR=${SPARK_HOME:-{{spark_home}}}/logs\nexport SPARK_LOG_DIR={{spark_log_dir}}\n\n# Where the pid file is stored. (Default: /tmp)\nexport SPARK_PID_DIR={{spark_pid_dir}}\n\n# A string representing this instance of spark.(Default: $USER)\nSPARK_IDENT_STRING=$USER\n\n# The scheduling priority for daemons. (Default: 0)\nSPARK_NICENESS=0\n\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\nexport HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n# The java implementation to use.\nexport JAVA_HOME={{java_home}}\n\nif [ -d \"/etc/tez/conf/\" ]; then\n  export TEZ_CONF_DIR=/etc/tez/conf\nelse\n  export TEZ_CONF_DIR=\nfi",
+      "spark_pid_dir": "/var/run/spark",
+      "spark_log_dir": "/var/log/spark",
+      "spark_group": "spark",
+      "spark_user": "spark"
+    },
+    "spark-metrics-properties": {
+      "content": "\n# syntax: [instance].sink|source.[name].[options]=[value]\n\n# This file configures Spark's internal metrics system. The metrics system is\n# divided into instances which correspond to internal components.\n# Each instance can be configured to report its metrics to one or more sinks.\n# Accepted values for [instance] are \"master\", \"worker\", \"executor\", \"driver\",\n# and \"applications\". A wild card \"*\" can be used as an instance name, in\n# which case all instances will inherit the supplied property.\n#\n# Within an instance, a \"source\" specifies a particular set of grouped metrics.\n# there are two kinds of sources:\n# 1. Spark internal sources, like MasterSource, WorkerSource, etc, which will\n# collect a Spark component's internal state. Each instance is paired with a\n# Spark source that is added automatically.\n# 2. Common sources, like JvmSource, which will collect low level state.\n# These can be added through configuration options and are then
  loaded\n# using reflection.\n#\n# A \"sink\" specifies where metrics are delivered to. Each instance can be\n# assigned one or more sinks.\n#\n# The sink|source field specifies whether the property relates to a sink or\n# source.\n#\n# The [name] field specifies the name of source or sink.\n#\n# The [options] field is the specific property of this source or sink. The\n# source or sink is responsible for parsing this property.\n#\n# Notes:\n# 1. To add a new sink, set the \"class\" option to a fully qualified class\n# name (see examples below).\n# 2. Some sinks involve a polling period. The minimum allowed polling period\n# is 1 second.\n# 3. Wild card properties can be overridden by more specific properties.\n# For example, master.sink.console.period takes precedence over\n# *.sink.console.period.\n# 4. A metrics specific configuration\n# \"spark.metrics.conf=${SPARK_HOME}/conf/metrics.properties\" should be\n# added to Java properties using -Dspark.metrics.conf=xxx if you want to\
 n# customize metrics system. You can also put the file in ${SPARK_HOME}/conf\n# and it will be loaded automatically.\n# 5. MetricsServlet is added by default as a sink in master, worker and client\n# driver, you can send http request \"/metrics/json\" to get a snapshot of all the\n# registered metrics in json format. For master, requests \"/metrics/master/json\" and\n# \"/metrics/applications/json\" can be sent seperately to get metrics snapshot of\n# instance master and applications. MetricsServlet may not be configured by self.\n#\n\n## List of available sinks and their properties.\n\n# org.apache.spark.metrics.sink.ConsoleSink\n# Name: Default: Description:\n# period 10 Poll period\n# unit seconds Units of poll period\n\n# org.apache.spark.metrics.sink.CSVSink\n# Name: Default: Description:\n# period 10 Poll period\n# unit seconds Units of poll period\n# directory /tmp Where to store CSV files\n\n# org.apache.spark.metrics.sink.GangliaSink\n# Name: Default: Description:\n# host N
 ONE Hostname or multicast group of Ganglia server\n# port NONE Port of Ganglia server(s)\n# period 10 Poll period\n# unit seconds Units of poll period\n# ttl 1 TTL of messages sent by Ganglia\n# mode multicast Ganglia network mode ('unicast' or 'multicast')\n\n# org.apache.spark.metrics.sink.JmxSink\n\n# org.apache.spark.metrics.sink.MetricsServlet\n# Name: Default: Description:\n# path VARIES* Path prefix from the web server root\n# sample false Whether to show entire set of samples for histograms ('false' or 'true')\n#\n# * Default path is /metrics/json for all instances except the master. The master has two paths:\n# /metrics/aplications/json # App information\n# /metrics/master/json # Master information\n\n# org.apache.spark.metrics.sink.GraphiteSink\n# Name: Default: Description:\n# host NONE Hostname of Graphite server\n# port NONE Port of Graphite server\n# period 10 Poll period\n# unit seconds Units of poll period\n# prefix EMPTY STRING Prefix to prepend to metric name\n\n##
  Examples\n# Enable JmxSink for all instances by class name\n#*.sink.jmx.class=org.apache.spark.metrics.sink.JmxSink\n\n# Enable ConsoleSink for all instances by class name\n#*.sink.console.class=org.apache.spark.metrics.sink.ConsoleSink\n\n# Polling period for ConsoleSink\n#*.sink.console.period=10\n\n#*.sink.console.unit=seconds\n\n# Master instance overlap polling period\n#master.sink.console.period=15\n\n#master.sink.console.unit=seconds\n\n# Enable CsvSink for all instances\n#*.sink.csv.class=org.apache.spark.metrics.sink.CsvSink\n\n# Polling period for CsvSink\n#*.sink.csv.period=1\n\n#*.sink.csv.unit=minutes\n\n# Polling directory for CsvSink\n#*.sink.csv.directory=/tmp/\n\n# Worker instance overlap polling period\n#worker.sink.csv.period=10\n\n#worker.sink.csv.unit=minutes\n\n# Enable jvm source for instance master, worker, driver and executor\n#master.source.jvm.class=org.apache.spark.metrics.source.JvmSource\n\n#worker.source.jvm.class=org.apache.spark.metrics.source.JvmSo
 urce\n\n#driver.source.jvm.class=org.apache.spark.metrics.source.JvmSource\n\n#executor.source.jvm.class=org.apache.spark.metrics.source.JvmSource"
+    },
+    "spark-metrics-properties": {
+      "content": "\n# syntax: [instance].sink|source.[name].[options]=[value]\n\n# This file configures Spark's internal metrics system. The metrics system is\n# divided into instances which correspond to internal components.\n# Each instance can be configured to report its metrics to one or more sinks.\n# Accepted values for [instance] are \"master\", \"worker\", \"executor\", \"driver\",\n# and \"applications\". A wild card \"*\" can be used as an instance name, in\n# which case all instances will inherit the supplied property.\n#\n# Within an instance, a \"source\" specifies a particular set of grouped metrics.\n# there are two kinds of sources:\n# 1. Spark internal sources, like MasterSource, WorkerSource, etc, which will\n# collect a Spark component's internal state. Each instance is paired with a\n# Spark source that is added automatically.\n# 2. Common sources, like JvmSource, which will collect low level state.\n# These can be added through configuration options and are then
  loaded\n# using reflection.\n#\n# A \"sink\" specifies where metrics are delivered to. Each instance can be\n# assigned one or more sinks.\n#\n# The sink|source field specifies whether the property relates to a sink or\n# source.\n#\n# The [name] field specifies the name of source or sink.\n#\n# The [options] field is the specific property of this source or sink. The\n# source or sink is responsible for parsing this property.\n#\n# Notes:\n# 1. To add a new sink, set the \"class\" option to a fully qualified class\n# name (see examples below).\n# 2. Some sinks involve a polling period. The minimum allowed polling period\n# is 1 second.\n# 3. Wild card properties can be overridden by more specific properties.\n# For example, master.sink.console.period takes precedence over\n# *.sink.console.period.\n# 4. A metrics specific configuration\n# \"spark.metrics.conf=${SPARK_HOME}/conf/metrics.properties\" should be\n# added to Java properties using -Dspark.metrics.conf=xxx if you want to\
 n# customize metrics system. You can also put the file in ${SPARK_HOME}/conf\n# and it will be loaded automatically.\n# 5. MetricsServlet is added by default as a sink in master, worker and client\n# driver, you can send http request \"/metrics/json\" to get a snapshot of all the\n# registered metrics in json format. For master, requests \"/metrics/master/json\" and\n# \"/metrics/applications/json\" can be sent seperately to get metrics snapshot of\n# instance master and applications. MetricsServlet may not be configured by self.\n#\n\n## List of available sinks and their properties.\n\n# org.apache.spark.metrics.sink.ConsoleSink\n# Name: Default: Description:\n# period 10 Poll period\n# unit seconds Units of poll period\n\n# org.apache.spark.metrics.sink.CSVSink\n# Name: Default: Description:\n# period 10 Poll period\n# unit seconds Units of poll period\n# directory /tmp Where to store CSV files\n\n# org.apache.spark.metrics.sink.GangliaSink\n# Name: Default: Description:\n# host N
 ONE Hostname or multicast group of Ganglia server\n# port NONE Port of Ganglia server(s)\n# period 10 Poll period\n# unit seconds Units of poll period\n# ttl 1 TTL of messages sent by Ganglia\n# mode multicast Ganglia network mode ('unicast' or 'multicast')\n\n# org.apache.spark.metrics.sink.JmxSink\n\n# org.apache.spark.metrics.sink.MetricsServlet\n# Name: Default: Description:\n# path VARIES* Path prefix from the web server root\n# sample false Whether to show entire set of samples for histograms ('false' or 'true')\n#\n# * Default path is /metrics/json for all instances except the master. The master has two paths:\n# /metrics/aplications/json # App information\n# /metrics/master/json # Master information\n\n# org.apache.spark.metrics.sink.GraphiteSink\n# Name: Default: Description:\n# host NONE Hostname of Graphite server\n# port NONE Port of Graphite server\n# period 10 Poll period\n# unit seconds Units of poll period\n# prefix EMPTY STRING Prefix to prepend to metric name\n\n##
  Examples\n# Enable JmxSink for all instances by class name\n#*.sink.jmx.class=org.apache.spark.metrics.sink.JmxSink\n\n# Enable ConsoleSink for all instances by class name\n#*.sink.console.class=org.apache.spark.metrics.sink.ConsoleSink\n\n# Polling period for ConsoleSink\n#*.sink.console.period=10\n\n#*.sink.console.unit=seconds\n\n# Master instance overlap polling period\n#master.sink.console.period=15\n\n#master.sink.console.unit=seconds\n\n# Enable CsvSink for all instances\n#*.sink.csv.class=org.apache.spark.metrics.sink.CsvSink\n\n# Polling period for CsvSink\n#*.sink.csv.period=1\n\n#*.sink.csv.unit=minutes\n\n# Polling directory for CsvSink\n#*.sink.csv.directory=/tmp/\n\n# Worker instance overlap polling period\n#worker.sink.csv.period=10\n\n#worker.sink.csv.unit=minutes\n\n# Enable jvm source for instance master, worker, driver and executor\n#master.source.jvm.class=org.apache.spark.metrics.source.JvmSource\n\n#worker.source.jvm.class=org.apache.spark.metrics.source.JvmSo
 urce\n\n#driver.source.jvm.class=org.apache.spark.metrics.source.JvmSource\n\n#executor.source.jvm.class=org.apache.spark.metrics.source.JvmSource"
+    },
+    "livy-log4j-properties": {
+      "content": "\n            # Set everything to be logged to the console\n            log4j.rootCategory=INFO, console\n            log4j.appender.console=org.apache.log4j.ConsoleAppender\n            log4j.appender.console.target=System.err\n            log4j.appender.console.layout=org.apache.log4j.PatternLayout\n            log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n\n\n            log4j.logger.org.eclipse.jetty=WARN"
+    },
+    "livy2-log4j-properties": {
+      "content": "\n            # Set everything to be logged to the console\n            log4j.rootCategory=INFO, console\n            log4j.appender.console=org.apache.log4j.ConsoleAppender\n            log4j.appender.console.target=System.err\n            log4j.appender.console.layout=org.apache.log4j.PatternLayout\n            log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n\n\n            log4j.logger.org.eclipse.jetty=WARN"
+    },
+    "livy-conf": {
+      "livy.server.port": "8998",
+      "livy.server.csrf_protection.enabled": "true",
+      "livy.environment": "production",
+      "livy.impersonation.enabled": "true",
+      "livy.server.session.timeout": "3600000"
+    },
+    "livy2-conf": {
+      "livy.server.port": "8999",
+      "livy.server.csrf_protection.enabled": "true",
+      "livy.environment": "production",
+      "livy.impersonation.enabled": "true",
+      "livy.server.session.timeout": "3600000"
+    },
+    "livy-spark-blacklist": {
+      "content": "\n            #\n            # Configuration override / blacklist. Defines a list of properties that users are not allowed\n            # to override when starting Spark sessions.\n            #\n            # This file takes a list of property names (one per line). Empty lines and lines starting with \"#\"\n            # are ignored.\n            #"
+    },
+    "livy2-spark-blacklist": {
+      "content": "\n            #\n            # Configuration override / blacklist. Defines a list of properties that users are not allowed\n            # to override when starting Spark sessions.\n            #\n            # This file takes a list of property names (one per line). Empty lines and lines starting with \"#\"\n            # are ignored.\n            #"
+    },
+    "livy-env": {
+      "livy_group": "livy",
+      "spark_home": "/usr/hdp/current/spark-client",
+      "content": "\n            #!/usr/bin/env bash\n\n            # - SPARK_HOME      Spark which you would like to use in livy\n            # - HADOOP_CONF_DIR Directory containing the Hadoop / YARN configuration to use.\n            # - LIVY_LOG_DIR    Where log files are stored.  (Default: ${LIVY_HOME}/logs)\n            # - LIVY_PID_DIR    Where the pid file is stored. (Default: /tmp)\n            # - LIVY_SERVER_JAVA_OPTS  Java Opts for running livy server (You can set jvm related setting here, like jvm memory/gc algorithm and etc.)\n        export SPARK_HOME=/usr/hdp/current/spark-client\n        export HADOOP_CONF_DIR=/etc/hadoop/conf\n        export LIVY_LOG_DIR={{livy_log_dir}}\n        export LIVY_PID_DIR={{livy_pid_dir}}\n        export LIVY_SERVER_JAVA_OPTS=\"-Xmx2g\"",
+      "livy_pid_dir": "/var/run/livy",
+      "livy_log_dir": "/var/log/livy",
+      "livy_user": "livy"
+    },
+    "livy2-env": {
+      "livy2_group": "livy",
+      "spark_home": "/usr/hdp/current/spark2-client",
+      "content": "\n            #!/usr/bin/env bash\n\n            # - SPARK_HOME      Spark which you would like to use in livy\n            # - HADOOP_CONF_DIR Directory containing the Hadoop / YARN configuration to use.\n            # - LIVY_LOG_DIR    Where log files are stored.  (Default: ${LIVY_HOME}/logs)\n            # - LIVY_PID_DIR    Where the pid file is stored. (Default: /tmp)\n            # - LIVY_SERVER_JAVA_OPTS  Java Opts for running livy server (You can set jvm related setting here, like jvm memory/gc algorithm and etc.)\n        export SPARK_HOME=/usr/hdp/current/spark2-client\n        export HADOOP_CONF_DIR=/etc/hadoop/conf\n        export LIVY_LOG_DIR={{livy_log_dir}}\n        export LIVY_PID_DIR={{livy_pid_dir}}\n        export LIVY_SERVER_JAVA_OPTS=\"-Xmx2g\"",
+      "livy2_pid_dir": "/var/run/livy2",
+      "livy2_log_dir": "/var/log/livy2",
+      "livy2_user": "livy"
+    },
+    "infra-solr-env": {
+      "infra_solr_znode": "/infra-solr",
+      "infra_solr_user": "solr",
+      "infra_solr_client_log_dir" :"/var/log/ambari-infra-solr-client"
+    },
+    "infra-solr-client-log4j" : {
+      "infra_solr_client_log_dir" : "/var/log/ambari-infra-solr-client",
+      "content" : "content"
+    },
+    "application-properties": {
+      "atlas.cluster.name" : "c2",
+      "atlas.rest.address": "http://c6401.ambari.apache.org:21000",
+      "atlas.graph.storage.backend": "berkeleyje",
+      "atlas.graph.storage.directory": "data/berkley",
+      "atlas.graph.index.search.backend": "solr5",
+      "atlas.graph.index.search.directory": "data/es",
+      "atlas.graph.index.search.elasticsearch.client-only": false,
+      "atlas.graph.index.search.elasticsearch.local-mode": true,
+      "atlas.lineage.hive.table.type.name": "Table",
+      "atlas.lineage.hive.column.type.name": "Column",
+      "atlas.lineage.hive.table.column.name": "columns",
+      "atlas.lineage.hive.process.type.name": "LoadProcess",
+      "atlas.lineage.hive.process.inputs.name": "inputTables",
+      "atlas.lineage.hive.process.outputs.name": "outputTables",
+      "atlas.enableTLS": false,
+      "atlas.authentication.method": "simple",
+      "atlas.authentication.principal": "atlas",
+      "atlas.authentication.keytab": "/etc/security/keytabs/atlas.service.keytab",
+      "atlas.http.authentication.enabled": false,
+      "atlas.http.authentication.type": "simple",
+      "atlas.http.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
+      "atlas.http.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+      "atlas.http.authentication.kerberos.name.rules": "DEFAULT",
+      "atlas.server.http.port" : "21000",
+      "atlas.notification.embedded" : false,
+      "atlas.kafka.bootstrap.servers" : "c6401.ambari.apache.org:6667",
+      "atlas.kafka.data" : "/usr/hdp/current/atlas-server/data/kafka",
+      "atlas.kafka.entities.group.id" : "entities",
+      "atlas.kafka.hook.group.id" : "atlas",
+      "atlas.kafka.zookeeper.connect" : "c6401.ambari.apache.org:2181"
+    },
+    "atlas-env": {
+      "content": "# The java implementation to use. If JAVA_HOME is not found we expect java and jar to be in path\nexport JAVA_HOME={{java64_home}}\n# any additional java opts you want to set. This will apply to both client and server operations\nexport METADATA_OPTS={{metadata_opts}}\n# metadata configuration directory\nexport METADATA_CONF={{conf_dir}}\n# Where log files are stored. Defatult is logs directory under the base install location\nexport METADATA_LOG_DIR={{log_dir}}\n# additional classpath entries\nexport METADATACPPATH={{metadata_classpath}}\n# data dir\nexport METADATA_DATA_DIR={{data_dir}}\n# Where do you want to expand the war file. By Default it is in /server/webapp dir under the base install dir.\nexport METADATA_EXPANDED_WEBAPP_DIR={{expanded_war_dir}}",
+      "metadata_user": "atlas",
+      "metadata_port": 21000,
+      "metadata_pid_dir": "/var/run/atlas",
+      "metadata_log_dir": "/var/log/atlas",
+      "metadata_data_dir": "/var/lib/atlas/data",
+      "metadata_expanded_war_dir": "/var/lib/atlas/server/webapp"
+    },
+    "atlas-log4j": {
+      "content": "<property><name>content</name><description>Custom log4j.properties</description><value></value></property>",
+      "atlas_log_level": "debug",
+      "audit_log_level": "OFF"
+    },
+    "atlas-solrconfig": {
+      "content": "<property><name>content</name><description>Custom solrconfig properties</description><value></value></property>"
+    },
+    "zeppelin-env": {
+      "zeppelin.server.kerberos.keytab": "",
+      "shiro_ini_content": "\n[users]\n# List of users with their password allowed to access Zeppelin.\n# To use a different strategy (LDAP / Database / ...) check the shiro doc at http://shiro.apache.org/configuration.html#Configuration-INISections\n#admin = password1\n#user1 = password2, role1, role2\n#user2 = password3, role3\n#user3 = password4, role2\n\n# Sample LDAP configuration, for user Authentication, currently tested for single Realm\n[main]\n#ldapRealm = org.apache.shiro.realm.ldap.JndiLdapRealm\n#ldapRealm.userDnTemplate = uid={0},cn=users,cn=accounts,dc=hortonworks,dc=com\n#ldapRealm.contextFactory.url = ldap://ldaphost:389\n#ldapRealm.contextFactory.authenticationMechanism = SIMPLE\n#sessionManager = org.apache.shiro.web.session.mgt.DefaultWebSessionManager\n#securityManager.sessionManager = $sessionManager\n# 86,400,000 milliseconds = 24 hour\n#securityManager.sessionManager.globalSessionTimeout = 86400000\nshiro.loginUrl = /api/login\n\n[urls]\n# anon means the acce
 ss is anonymous.\n# authcBasic means Basic Auth Security\n# To enfore security, comment the line below and uncomment the next one\n/api/version = anon\n/** = anon\n#/** = authc",
+      "zeppelin.spark.jar.dir": "/apps/zeppelin",
+      "zeppelin.executor.mem": "512m",
+      "zeppelin_pid_dir": "/var/run/zeppelin",
+      "zeppelin.executor.instances": "2",
+      "log4j_properties_content": "\nlog4j.rootLogger = INFO, dailyfile\nlog4j.appender.stdout = org.apache.log4j.ConsoleAppender\nlog4j.appender.stdout.layout = org.apache.log4j.PatternLayout\nlog4j.appender.stdout.layout.ConversionPattern=%5p [%d] ({%t} %F[%M]:%L) - %m%n\nlog4j.appender.dailyfile.DatePattern=.yyyy-MM-dd\nlog4j.appender.dailyfile.Threshold = INFO\nlog4j.appender.dailyfile = org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.dailyfile.File = ${zeppelin.log.file}\nlog4j.appender.dailyfile.layout = org.apache.log4j.PatternLayout\nlog4j.appender.dailyfile.layout.ConversionPattern=%5p [%d] ({%t} %F[%M]:%L) - %m%n",
+      "zeppelin.server.kerberos.principal": "",
+      "zeppelin_user": "zeppelin",
+      "zeppelin_env_content": "\n# Spark master url. eg. spark://master_addr:7077. Leave empty if you want to use local mode\nexport MASTER=yarn-client\nexport SPARK_YARN_JAR={{spark_jar}}\n\n\n# Where log files are stored.  PWD by default.\nexport ZEPPELIN_LOG_DIR={{zeppelin_log_dir}}\n\n# The pid files are stored. /tmp by default.\nexport ZEPPELIN_PID_DIR={{zeppelin_pid_dir}}\n\n\nexport JAVA_HOME={{java64_home}}\n\n# Additional jvm options. for example, export ZEPPELIN_JAVA_OPTS=\"-Dspark.executor.memory=8g -Dspark.cores.max=16\"\nexport ZEPPELIN_JAVA_OPTS=\"-Dhdp.version={{full_stack_version}} -Dspark.executor.memory={{executor_mem}} -Dspark.executor.instances={{executor_instances}} -Dspark.yarn.queue={{spark_queue}}\"\n\n\n# Zeppelin jvm mem options Default -Xmx1024m -XX:MaxPermSize=512m\n# export ZEPPELIN_MEM\n\n# zeppelin interpreter process jvm mem options. Defualt = ZEPPELIN_MEM\n# export ZEPPELIN_INTP_MEM\n\n# zeppelin interpreter process jvm options. Default = ZEPPELIN_JA
 VA_OPTS\n# export ZEPPELIN_INTP_JAVA_OPTS\n\n# Where notebook saved\n# export ZEPPELIN_NOTEBOOK_DIR\n\n# Id of notebook to be displayed in homescreen. ex) 2A94M5J1Z\n# export ZEPPELIN_NOTEBOOK_HOMESCREEN\n\n# hide homescreen notebook from list when this value set to \"true\". default \"false\"\n# export ZEPPELIN_NOTEBOOK_HOMESCREEN_HIDE\n\n# Bucket where notebook saved\n# export ZEPPELIN_NOTEBOOK_S3_BUCKET\n\n# User in bucket where notebook saved. For example bucket/user/notebook/2A94M5J1Z/note.json\n# export ZEPPELIN_NOTEBOOK_S3_USER\n\n# A string representing this instance of zeppelin. $USER by default\n# export ZEPPELIN_IDENT_STRING\n\n# The scheduling priority for daemons. Defaults to 0.\n# export ZEPPELIN_NICENESS\n\n\n#### Spark interpreter configuration ####\n\n## Use provided spark installation ##\n## defining SPARK_HOME makes Zeppelin run spark interpreter process using spark-submit\n##\n# (required) When it is defined, load it instead of Zeppelin embedded Spark libraries\n
 export SPARK_HOME={{spark_home}}\n\n# (optional) extra options to pass to spark submit. eg) \"--driver-memory 512M --executor-memory 1G\".\n# export SPARK_SUBMIT_OPTIONS\n\n## Use embedded spark binaries ##\n## without SPARK_HOME defined, Zeppelin still able to run spark interpreter process using embedded spark binaries.\n## however, it is not encouraged when you can define SPARK_HOME\n##\n# Options read in YARN client mode\n# yarn-site.xml is located in configuration directory in HADOOP_CONF_DIR.\nexport HADOOP_CONF_DIR=/etc/hadoop/conf\n\n# Pyspark (supported with Spark 1.2.1 and above)\n# To configure pyspark, you need to set spark distribution's path to 'spark.home' property in Interpreter setting screen in Zeppelin GUI\n# path to the python command. must be the same path on the driver(Zeppelin) and all workers.\n# export PYSPARK_PYTHON\n\nexport PYTHONPATH=\"${SPARK_HOME}/python:${SPARK_HOME}/python/lib/py4j-0.8.2.1-src.zip\"\nexport SPARK_YARN_USER_ENV=\"PYTHONPATH=${PYTHONPAT
 H}\"\n\n## Spark interpreter options ##\n##\n# Use HiveContext instead of SQLContext if set true. true by default.\n# export ZEPPELIN_SPARK_USEHIVECONTEXT\n\n# Execute multiple SQL concurrently if set true. false by default.\n# export ZEPPELIN_SPARK_CONCURRENTSQL\n\n# Max number of SparkSQL result to display. 1000 by default.\n# export ZEPPELIN_SPARK_MAXRESULT",
+      "zeppelin_log_dir": "/var/log/zeppelin",
+      "zeppelin_group": "zeppelin"
+    },
+"zeppelin-config": {
+            "zeppelin.server.port": "9995",
+            "zeppelin.server.ssl.port": "9995",
+            "zeppelin.ssl.truststore.password": "change me",
+            "zeppelin.interpreters": "org.apache.zeppelin.spark.SparkInterpreter,org.apache.zeppelin.spark.PySparkInterpreter,org.apache.zeppelin.spark.SparkSqlInterpreter,org.apache.zeppelin.spark.DepInterpreter,org.apache.zeppelin.markdown.Markdown,org.apache.zeppelin.angular.AngularInterpreter,org.apache.zeppelin.shell.ShellInterpreter,org.apache.zeppelin.jdbc.JDBCInterpreter,org.apache.zeppelin.phoenix.PhoenixInterpreter,org.apache.zeppelin.livy.LivySparkInterpreter,org.apache.zeppelin.livy.LivyPySparkInterpreter,org.apache.zeppelin.livy.LivySparkRInterpreter,org.apache.zeppelin.livy.LivySparkSQLInterpreter",
+            "zeppelin.interpreter.group.order": "spark,angular,jdbc,livy,md,sh",
+            "zeppelin.ssl.truststore.path": "conf/truststore",
+            "zeppelin.notebook.dir": "notebook",
+            "zeppelin.ssl.keystore.password": "change me",
+            "zeppelin.ssl.keystore.path": "conf/keystore",
+            "zeppelin.server.addr": "0.0.0.0",
+            "zeppelin.ssl.client.auth": "false",
+            "zeppelin.notebook.homescreen": " ",
+            "zeppelin.interpreter.dir": "interpreter",
+            "zeppelin.ssl.keystore.type": "JKS",
+            "zeppelin.notebook.s3.user": "user",
+            "zeppelin.ssl.key.manager.password": "change me",
+            "zeppelin.anonymous.allowed": "true",
+            "zeppelin.ssl.truststore.type": "JKS",
+            "zeppelin.ssl": "false",
+            "zeppelin.notebook.storage": "org.apache.zeppelin.notebook.repo.VFSNotebookRepo",
+            "zeppelin.websocket.max.text.message.size": "1024000",
+            "zeppelin.interpreter.connect.timeout": "30000",
+            "zeppelin.notebook.s3.bucket": "zeppelin",
+            "zeppelin.notebook.homescreen.hide": "false",
+            "zeppelin.server.allowed.origins": "*"
+        },
+    "zoo.cfg": {
+      "clientPort": "2181"
+    },
+    "ranger-hbase-plugin-properties": {
+      "ranger-hbase-plugin-enabled":"yes"
+    },
+    "ranger-hive-plugin-properties": {
+      "ranger-hive-plugin-enabled":"yes"
+    },
+    "ranger-env": {
+      "xml_configurations_supported" : "true"
+    },
+    "tagsync-application-properties": {
+            "atlas.kafka.hook.group.id": "atlas",
+            "atlas.kafka.zookeeper.connect": "os-mv-31-dev-4.novalocal:2181",
+            "atlas.kafka.acks": "1",
+            "atlas.kafka.entities.group.id": "ranger_entities_consumer",
+            "atlas.kafka.data": "/usr/hdp/current/atlas-server/data/kafka",
+            "atlas.kafka.bootstrap.servers": "localhost:2181",
+            "atlas.notification.embedded": "false"
+    },
+    "ranger-tagsync-site": {
+            "ranger.tagsync.sink.impl.class": "org.apache.ranger.tagsync.sink.tagadmin.TagAdminRESTSink",
+            "ranger.tagsync.atlasrestsource.endpoint": "",
+            "ranger.tagsync.tagadmin.rest.ssl.config.file": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks",
+            "ranger.tagsync.logdir": "/var/log/ranger/tagsync",
+            "ranger.tagsync.filesource.filename": "/usr/hdp/current/ranger-tagsync/conf/etc/ranger/data/tags.json",
+            "ranger.tagsync.enabled": "true",
+            "ranger.tagsync.tagadmin.rest.url": "{{ranger_external_url}}",
+            "ranger.tagsync.atlasrestsource.download.interval": "",
+            "ranger.tagsync.filesource.modtime.check.interval": "60000",
+            "ranger.tagsync.tagadmin.password": "rangertagsync",
+            "ranger.tagsync.source.impl.class": "file",
+            "ranger.tagsync.source.atlas.custom.resource.mappers": "",
+            "ranger.tagsync.tagadmin.alias": "tagsync.tagadmin",
+            "ranger.tagsync.tagadmin.keystore": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks",
+            "ranger.tagsync.atlas.to.service.mapping": ""
+    },
+    "druid-env": {
+      "druid_log_dir" : "/var/log/druid",
+      "druid_pid_dir" : "/var/run/druid",
+      "content" : "#!/bin/bash\n # Set DRUID specific environment variables here.\n# The java implementation to use\nexport JAVA_HOME={{java8_home}}\nexport PATH=$PATH:$JAVA_HOME/bin\nexport DRUID_PID_DIR={{druid_pid_dir}}\nexport DRUID_LOG_DIR={{druid_log_dir}}\nexport DRUID_CONF_DIR={{druid_conf_dir}}\nexport DRUID_LIB_DIR={{druid_home}}/lib",
+      "druid.coordinator.jvm.heap.memory" : 1024,
+      "druid.coordinator.jvm.direct.memory": 2048,
+      "druid.coordinator.jvm.opts": "-Duser.timezone=UTC -Dfile.encoding=UTF-8",
+      "druid.broker.jvm.heap.memory" : 1024,
+      "druid.broker.jvm.direct.memory": 2048,
+      "druid.broker.jvm.opts": "-Duser.timezone=UTC -Dfile.encoding=UTF-8",
+      "druid.middlemanager.jvm.heap.memory" : 1024,
+      "druid.middlemanager.jvm.direct.memory": 2048,
+      "druid.middlemanager.jvm.opts": "-Duser.timezone=UTC -Dfile.encoding=UTF-8",
+      "druid.historical.jvm.heap.memory" : 1024,
+      "druid.historical.jvm.direct.memory": 2048,
+      "druid.historical.jvm.opts": "-Duser.timezone=UTC -Dfile.encoding=UTF-8",
+      "druid.overlord.jvm.heap.memory" : 1024,
+      "druid.overlord.jvm.direct.memory": 2048,
+      "druid.overlord.jvm.opts": "-Duser.timezone=UTC -Dfile.encoding=UTF-8",
+      "druid.router.jvm.heap.memory" : 1024,
+      "druid.router.jvm.direct.memory": 2048,
+      "druid.router.jvm.opts": "-Duser.timezone=UTC -Dfile.encoding=UTF-8",
+      "druid_user": "druid"
+    },
+    "druid-common" : {
+      "druid.metadata.storage.type" : "mysql",
+      "druid.metadata.storage.connector.connectURI" : "jdbc:mysql://my-db-host:3306/druid?createDatabaseIfNotExist=true",
+      "druid.metadata.storage.connector.user" : "druid",
+      "druid.metadata.storage.connector.password" : "diurd",
+      "druid.storage.type" : "hdfs",
+      "druid.storage.storageDirectory" : "/user/druid/data",
+      "druid.indexer.logs.type": "hdfs",
+      "druid.indexer.logs.directory": "/user/druid/logs",
+      "druid.extensions.pullList": "[\"custom-druid-extension\"]",
+      "druid.extensions.repositoryList": "[\"http://custom-mvn-repo/public/release\"]",
+      "druid.extensions.loadList": "[\"mysql-metadata-storage\", \"druid-datasketches\"]",
+      "druid.security.extensions.loadList": "[\"druid-kerberos\"]"
+    },
+    "druid-historical" : {
+      "druid.segmentCache.infoDir" : "/apps/druid/segmentCache/info_dir",
+      "druid.segmentCache.locations" :"[{\"path\":\"/apps/druid/segmentCache\",\"maxSize\":300000000000}]"
+    },
+    "druid-coordinator" : {
+      "druid.service" : "druid/coordinator"
+    },
+    "druid-overlord" : {
+      "druid.service" : "druid/overlord"
+    },
+    "druid-broker" : {
+      "druid.service" : "druid/broker"
+    },
+    "druid-middlemanager" : {
+      "druid.service" : "druid/middlemanager",
+      "druid.indexer.task.hadoopWorkingPath" : "/tmp/druid-indexing",
+      "druid.indexer.task.baseTaskDir" : "/apps/druid/tasks"
+    },
+    "druid-router" : {
+      "druid.service" : "druid/router"
+    },
+    "druid-log4j" : {
+      "content" : "<![CDATA[<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n<!--\n  ~ Licensed to the Apache Software Foundation (ASF) under one\n  ~ or more contributor license agreements.  See the NOTICE file\n  ~ distributed with this work for additional information\n  ~ regarding copyright ownership.  The ASF licenses this file\n  ~ to you under the Apache License, Version 2.0 (the\n  ~ \"License\"); you may not use this file except in compliance\n  ~ with the License.  You may obtain a copy of the License at\n  ~\n  ~     http://www.apache.org/licenses/LICENSE-2.0\n  ~\n  ~ Unless required by applicable law or agreed to in writing, software\n  ~ distributed under the License is distributed on an \"AS IS\" BASIS,\n  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n  ~ See the License for the specific language governing permissions and\n  ~ limitations under the License.\n  -->\n    <Configuration>\n        <Appenders>\n            <Console name=\"Console\
 " target=\"SYSTEM_OUT\">\n                <PatternLayout pattern=\"%d{ISO8601} %p [%t] %c - %m%n\"/>\n            </Console>\n        </Appenders>\n        <Loggers>\n            <Logger name=\"com.metamx\" level=\"{{metamx_log_level}}\"/>\n            <Logger name=\"io.druid\" level=\"{{druid_log_level}}\"/>\n            <Root level=\"{{root_log_level}}\">\n                <AppenderRef ref=\"Console\"/>\n            </Root>\n        </Loggers>\n    </Configuration>\n      ]]>\n"
+    },
+    "druid-logrotate" : {
+      "content" : "<![CDATA[\n    {{druid_log_dir}}/*.log {\n        copytruncate\n        rotate 7\n        daily\n        nocompress\n        missingok\n        notifempty\n        create 660 druid users\n        dateext\n        dateformat -%Y-%m-%d-%s\n        }\n      ]]>\n"
+    },
+    "superset" : {
+      "SUPERSET_DATABASE_TYPE" : "sqllite"
+    }
+  },
+  "configuration_attributes": {
+    "sqoop-site": {},
+    "yarn-site": {
+      "final": {
+        "yarn.nodemanager.disk-health-checker.min-healthy-disks": "true",
+        "yarn.nodemanager.container-executor.class": "true",
+        "yarn.nodemanager.local-dirs": "true"
+      }
+    },
+    "yarn-site": {
+      "final": {
+        "is_supported_yarn_ranger": "true"
+      }
+    },
+    "hdfs-site": {
+      "final": {
+        "dfs.web.ugi": "true",
+        "dfs.support.append": "true",
+        "dfs.cluster.administrators": "true"
+      }
+    },
+    "core-site": {
+      "final": {
+        "hadoop.proxyuser.hive.groups": "true",
+        "webinterface.private.actions": "true",
+        "hadoop.proxyuser.oozie.hosts": "true"
+      }
+    },
+    "knox-env": {},
+    "gateway-site": {},
+    "users-ldif": {},
+    "kafka-env": {},
+    "kafka-log4j": {},
+    "kafka-broker": {},
+    "metadata-env": {},
+    "atlas-hbase-site": {},
+    "tagsync-application-properties": {},
+    "ranger-tagsync-site": {}
+  },
+  "configurationTags": {
+    "slider-client": {
+      "tag": "version1"
+    },
+    "slider-log4j": {
+      "tag": "version1"
+    },
+    "slider-env": {
+      "tag": "version1"
+    },
+    "core-site": {
+      "tag": "version1"
+    },
+    "hdfs-site": {
+      "tag": "version1"
+    },
+    "yarn-site": {
+      "tag": "version1"
+    },
+    "gateway-site": {
+      "tag": "version1"
+    },
+    "topology": {
+      "tag": "version1"
+    },
+    "users-ldif": {
+      "tag": "version1"
+    },
+    "kafka-env": {
+      "tag": "version1"
+    },
+    "kafka-log4j": {
+      "tag": "version1"
+    },
+    "kafka-broker": {
+      "tag": "version1"
+    },
+    "metadata-env": {
+      "tag": "version1"
+    },
+    "tagsync-application-properties": {
+      "tag": "version1"
+    },
+    "ranger-tagsync-site": {
+      "tag": "version1"
+    }
+  },
+  "commandId": "7-1",
+  "clusterHostInfo": {
+    "ambari_server_host": [
+      "c6401.ambari.apache.org"
+    ],
+    "all_ping_ports": [
+      "8670",
+      "8670"
+    ],
+    "rm_host": [
+      "c6402.ambari.apache.org"
+    ],
+    "all_hosts": [
+      "c6401.ambari.apache.org",
+      "c6402.ambari.apache.org"
+    ],
+    "knox_gateway_hosts": [
+      "jaimin-knox-1.c.pramod-thangali.internal"
+    ],
+    "kafka_broker_hosts": [
+      "c6401.ambari.apache.org"
+    ],
+    "infra_solr_hosts": [
+      "c6401.ambari.apache.org"
+    ],
+    "zookeeper_hosts": [
+      "c6401.ambari.apache.org"
+    ],
+    "ranger_tagsync_hosts": [
+      "c6401.ambari.apache.org"
+    ],
+    "atlas_server_hosts": [
+      "c6401.ambari.apache.org"
+    ],
+    "zeppelin_master_hosts": [
+      "c6401.ambari.apache.org"
+    ]
+  }
+}