You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by tb...@apache.org on 2016/03/23 23:49:24 UTC

ambari git commit: AMBARI-15431 - Atlas Integration : Rename Atlas Configurations - fix for 2.5

Repository: ambari
Updated Branches:
  refs/heads/trunk 1fbf7f15d -> c5c5da75a


AMBARI-15431 - Atlas Integration : Rename Atlas Configurations - fix for 2.5


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c5c5da75
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c5c5da75
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c5c5da75

Branch: refs/heads/trunk
Commit: c5c5da75af617637ea91a29528ac9cc8f6113ab1
Parents: 1fbf7f1
Author: tbeerbower <tb...@hortonworks.com>
Authored: Wed Mar 23 18:48:58 2016 -0400
Committer: tbeerbower <tb...@hortonworks.com>
Committed: Wed Mar 23 18:49:17 2016 -0400

----------------------------------------------------------------------
 .../services/ATLAS/configuration/atlas-env.xml  | 114 +++++++
 .../stacks/HDP/2.5/services/ATLAS/metainfo.xml  |  19 ++
 .../services/ATLAS/configuration/atlas-env.xml  | 114 -------
 .../stacks/HDP/2.6/services/ATLAS/metainfo.xml  |  19 --
 .../stacks/2.5/ATLAS/test_atlas_server.py       | 107 ++++++
 .../test/python/stacks/2.5/configs/default.json | 329 +++++++++++++++++++
 .../stacks/2.6/ATLAS/test_atlas_server.py       | 107 ------
 .../test/python/stacks/2.6/configs/default.json | 329 -------------------
 8 files changed, 569 insertions(+), 569 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/c5c5da75/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/atlas-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/atlas-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/atlas-env.xml
new file mode 100644
index 0000000..42503b5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/atlas-env.xml
@@ -0,0 +1,114 @@
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+  <property require-input="false">
+    <name>metadata_log_dir</name>
+    <value>/var/log/atlas</value>
+    <description>Atlas log directory.</description>
+  </property>
+  <property require-input="false">
+    <name>metadata_pid_dir</name>
+    <value>/var/run/atlas</value>
+    <description>Atlas pid-file directory.</description>
+  </property>
+  <property>
+    <name>metadata_user</name>
+    <display-name>Metadata User</display-name>
+    <value>atlas</value>
+    <property-type>USER</property-type>
+    <description>Metadata User Name.</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+  <property>
+    <name>metadata_opts</name>
+    <value>-Xmx1024m -Dlog4j.configuration=atlas-log4j.xml</value>
+    <description>Metadata Server command line options.</description>
+  </property>
+  <property>
+    <name>metadata_classpath</name>
+    <value> </value>
+    <description>Metadata Server additional classpath.</description>
+  </property>
+  <property require-input="false">
+    <name>metadata_data_dir</name>
+    <value>/var/lib/atlas/data</value>
+    <description>Atlas data directory.</description>
+  </property>
+  <property require-input="false">
+    <name>metadata_expanded_war_dir</name>
+    <value>./server/webapp</value>
+    <description>Atlas expanded WAR directory.</description>
+  </property>
+  <property>
+    <name>metadata_conf_file</name>
+    <value>atlas-application.properties</value>
+    <description>Atlas configuration file</description>
+    <value-attributes>
+      <read-only>true</read-only>
+      <overridable>false</overridable>
+      <visible>false</visible>
+    </value-attributes>
+  </property>
+
+  <!-- metadata-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for metadata-env.sh file</description>
+    <value>
+      # The java implementation to use. If JAVA_HOME is not found we expect java and jar to be in path
+      export JAVA_HOME={{java64_home}}
+
+      # any additional java opts you want to set. This will apply to both client and server operations
+      {% if security_enabled %}
+      export ATLAS_OPTS="{{metadata_opts}} -Djava.security.auth.login.config={{atlas_jaas_file}}"
+      {% else %}
+      export ATLAS_OPTS="{{metadata_opts}}"
+      {% endif %}
+
+      # metadata configuration directory
+      export ATLAS_CONF={{conf_dir}}
+
+      # Where log files are stored. Defatult is logs directory under the base install location
+      export ATLAS_LOG_DIR={{log_dir}}
+
+      # additional classpath entries
+      export ATLASCPPATH={{metadata_classpath}}
+
+      # data dir
+      export ATLAS_DATA_DIR={{data_dir}}
+
+      # pid dir
+      export ATLAS_PID_DIR={{pid_dir}}
+
+      # hbase conf dir
+      export HBASE_CONF_DIR=/etc/hbase/conf
+
+      # Where do you want to expand the war file. By Default it is in /server/webapp dir under the base install dir.
+      export ATLAS_EXPANDED_WEBAPP_DIR={{expanded_war_dir}}
+    </value>
+    <value-attributes>
+      <type>content</type>
+    </value-attributes>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c5c5da75/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/metainfo.xml
index 66aea9d..7061d6b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/metainfo.xml
@@ -21,6 +21,25 @@
     <service>
       <name>ATLAS</name>
       <version>0.5.0.2.5</version>
+      <components>
+        <component>
+          <name>ATLAS_SERVER</name>
+          <configFile>
+            <type>properties</type>
+            <fileName>atlas-application.properties</fileName>
+            <dictionaryName>application-properties</dictionaryName>
+          </configFile>
+        </component>
+
+        <component>
+          <name>ATLAS_CLIENT</name>
+          <configFile>
+            <type>properties</type>
+            <fileName>atlas-application.properties</fileName>
+            <dictionaryName>application-properties</dictionaryName>
+          </configFile>
+        </component>
+      </components>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c5c5da75/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/configuration/atlas-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/configuration/atlas-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/configuration/atlas-env.xml
deleted file mode 100644
index 42503b5..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/configuration/atlas-env.xml
+++ /dev/null
@@ -1,114 +0,0 @@
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_adding_forbidden="true">
-  <property require-input="false">
-    <name>metadata_log_dir</name>
-    <value>/var/log/atlas</value>
-    <description>Atlas log directory.</description>
-  </property>
-  <property require-input="false">
-    <name>metadata_pid_dir</name>
-    <value>/var/run/atlas</value>
-    <description>Atlas pid-file directory.</description>
-  </property>
-  <property>
-    <name>metadata_user</name>
-    <display-name>Metadata User</display-name>
-    <value>atlas</value>
-    <property-type>USER</property-type>
-    <description>Metadata User Name.</description>
-    <value-attributes>
-      <type>user</type>
-      <overridable>false</overridable>
-    </value-attributes>
-  </property>
-  <property>
-    <name>metadata_opts</name>
-    <value>-Xmx1024m -Dlog4j.configuration=atlas-log4j.xml</value>
-    <description>Metadata Server command line options.</description>
-  </property>
-  <property>
-    <name>metadata_classpath</name>
-    <value> </value>
-    <description>Metadata Server additional classpath.</description>
-  </property>
-  <property require-input="false">
-    <name>metadata_data_dir</name>
-    <value>/var/lib/atlas/data</value>
-    <description>Atlas data directory.</description>
-  </property>
-  <property require-input="false">
-    <name>metadata_expanded_war_dir</name>
-    <value>./server/webapp</value>
-    <description>Atlas expanded WAR directory.</description>
-  </property>
-  <property>
-    <name>metadata_conf_file</name>
-    <value>atlas-application.properties</value>
-    <description>Atlas configuration file</description>
-    <value-attributes>
-      <read-only>true</read-only>
-      <overridable>false</overridable>
-      <visible>false</visible>
-    </value-attributes>
-  </property>
-
-  <!-- metadata-env.sh -->
-  <property>
-    <name>content</name>
-    <description>This is the jinja template for metadata-env.sh file</description>
-    <value>
-      # The java implementation to use. If JAVA_HOME is not found we expect java and jar to be in path
-      export JAVA_HOME={{java64_home}}
-
-      # any additional java opts you want to set. This will apply to both client and server operations
-      {% if security_enabled %}
-      export ATLAS_OPTS="{{metadata_opts}} -Djava.security.auth.login.config={{atlas_jaas_file}}"
-      {% else %}
-      export ATLAS_OPTS="{{metadata_opts}}"
-      {% endif %}
-
-      # metadata configuration directory
-      export ATLAS_CONF={{conf_dir}}
-
-      # Where log files are stored. Defatult is logs directory under the base install location
-      export ATLAS_LOG_DIR={{log_dir}}
-
-      # additional classpath entries
-      export ATLASCPPATH={{metadata_classpath}}
-
-      # data dir
-      export ATLAS_DATA_DIR={{data_dir}}
-
-      # pid dir
-      export ATLAS_PID_DIR={{pid_dir}}
-
-      # hbase conf dir
-      export HBASE_CONF_DIR=/etc/hbase/conf
-
-      # Where do you want to expand the war file. By Default it is in /server/webapp dir under the base install dir.
-      export ATLAS_EXPANDED_WEBAPP_DIR={{expanded_war_dir}}
-    </value>
-    <value-attributes>
-      <type>content</type>
-    </value-attributes>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c5c5da75/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/metainfo.xml
index af1a047..9514576 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/metainfo.xml
@@ -21,25 +21,6 @@
     <service>
       <name>ATLAS</name>
       <version>0.5.0.2.6</version>
-      <components>
-        <component>
-          <name>ATLAS_SERVER</name>
-          <configFile>
-            <type>properties</type>
-            <fileName>atlas-application.properties</fileName>
-            <dictionaryName>application-properties</dictionaryName>
-          </configFile>
-        </component>
-
-        <component>
-          <name>ATLAS_CLIENT</name>
-          <configFile>
-            <type>properties</type>
-            <fileName>atlas-application.properties</fileName>
-            <dictionaryName>application-properties</dictionaryName>
-          </configFile>
-        </component>
-      </components>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c5c5da75/ambari-server/src/test/python/stacks/2.5/ATLAS/test_atlas_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/ATLAS/test_atlas_server.py b/ambari-server/src/test/python/stacks/2.5/ATLAS/test_atlas_server.py
new file mode 100644
index 0000000..21d2907
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.5/ATLAS/test_atlas_server.py
@@ -0,0 +1,107 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+from stacks.utils.RMFTestCase import *
+
+from only_for_platform import not_for_platform, PLATFORM_WINDOWS
+
+@not_for_platform(PLATFORM_WINDOWS)
+class TestAtlasServer(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "ATLAS/0.1.0.2.3/package"
+  STACK_VERSION = "2.5"
+
+  def configureResourcesCalled(self):
+    self.assertResourceCalled('Directory', '/var/run/atlas',
+                              owner='atlas',
+                              group='hadoop',
+                              create_parents = True,
+                              cd_access='a',
+                              mode=0755
+                              )
+    self.assertResourceCalled('Directory', '/etc/atlas/conf',
+                              owner='atlas',
+                              group='hadoop',
+                              create_parents = True,
+                              cd_access='a',
+                              mode=0755
+                              )
+    self.assertResourceCalled('Directory', '/var/log/atlas',
+                              owner='atlas',
+                              group='hadoop',
+                              create_parents = True,
+                              cd_access='a',
+                              mode=0755
+                              )
+    self.assertResourceCalled('Directory', '/var/lib/atlas/data',
+                              owner='atlas',
+                              group='hadoop',
+                              create_parents = True,
+                              cd_access='a',
+                              mode=0644
+                              )
+    self.assertResourceCalled('Directory', '/var/lib/atlas/server/webapp',
+                              owner='atlas',
+                              group='hadoop',
+                              create_parents = True,
+                              cd_access='a',
+                              mode=0644
+                              )
+    self.assertResourceCalled('File', '/var/lib/atlas/server/webapp/atlas.war',
+                              content = StaticFile('/usr/hdp/current/atlas-server/server/webapp/atlas.war'),
+                              )
+    appprops =  dict(self.getConfig()['configurations'][
+                       'application-properties'])
+    appprops['atlas.http.authentication.kerberos.name.rules'] = ' \\ \n'.join(appprops['atlas.http.authentication.kerberos.name.rules'].splitlines())
+    appprops['atlas.server.bind.address'] = 'c6401.ambari.apache.org'
+
+    self.assertResourceCalled('PropertiesFile',
+                              '/etc/atlas/conf/atlas-application.properties',
+                              properties=appprops,
+                              owner='atlas',
+                              group='hadoop',
+                              mode=0644,
+                              )
+    self.assertResourceCalled('File', '/etc/atlas/conf/atlas-env.sh',
+                              content=InlineTemplate(
+                                  self.getConfig()['configurations'][
+                                    'atlas-env']['content']),
+                              owner='atlas',
+                              group='hadoop',
+                              mode=0755,
+                              )
+    self.assertResourceCalled('File', '/etc/atlas/conf/atlas-log4j.xml',
+                              content=StaticFile('atlas-log4j.xml'),
+                              owner='atlas',
+                              group='hadoop',
+                              mode=0644,
+                              )
+
+  def test_configure_default(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/metadata_server.py",
+                       classname = "MetadataServer",
+                       command = "configure",
+                       config_file="default.json",
+                       stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
+                       )
+
+    self.configureResourcesCalled()
+    self.assertNoMoreResources()
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/c5c5da75/ambari-server/src/test/python/stacks/2.5/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/configs/default.json b/ambari-server/src/test/python/stacks/2.5/configs/default.json
new file mode 100644
index 0000000..2e1bc68
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.5/configs/default.json
@@ -0,0 +1,329 @@
+{
+  "roleCommand": "SERVICE_CHECK",
+  "clusterName": "c1",
+  "hostname": "c6401.ambari.apache.org",
+  "hostLevelParams": {
+    "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
+    "agent_stack_retry_count": "5",
+    "agent_stack_retry_on_unavailability": "false",
+    "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
+    "ambari_db_rca_password": "mapred",
+    "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
+    "jce_name": "UnlimitedJCEPolicyJDK7.zip",
+    "stack_version": "2.3",
+    "stack_name": "HDP",
+    "ambari_db_rca_driver": "org.postgresql.Driver",
+    "jdk_name": "jdk-7u67-linux-x64.tar.gz",
+    "ambari_db_rca_username": "mapred",
+    "java_home": "/usr/jdk64/jdk1.7.0_45",
+    "db_name": "ambari"
+  },
+  "commandType": "EXECUTION_COMMAND",
+  "roleParams": {},
+  "serviceName": "SLIDER",
+  "role": "SLIDER",
+  "commandParams": {
+    "version": "2.2.1.0-2067",
+    "command_timeout": "300",
+    "service_package_folder": "OOZIE",
+    "script_type": "PYTHON",
+    "script": "scripts/service_check.py",
+    "excluded_hosts": "host1,host2"
+  },
+  "taskId": 152,
+  "public_hostname": "c6401.ambari.apache.org",
+  "configurations": {
+    "slider-client": {
+      "slider.yarn.queue": "default"
+    },
+    "sqoop-site": {
+      "atlas.cluster.name": "c1",
+      "sqoop.job.data.publish.class": "org.apache.atlas.sqoop.hook.SqoopHook"
+    },
+    "mahout-env": {
+      "mahout_user": "mahout"
+    },
+    "yarn-env": {
+      "yarn_user": "yarn"
+    },
+    "mahout-log4j": {
+      "content": "\n            #\n            #\n            # Licensed to the Apache Software Foundation (ASF) under one\n            # or more contributor license agreements.  See the NOTICE file\n            # distributed with this work for additional information\n            # regarding copyright ownership.  The ASF licenses this file\n            # to you under the Apache License, Version 2.0 (the\n            # \"License\"); you may not use this file except in compliance\n            # with the License.  You may obtain a copy of the License at\n            #\n            #   http://www.apache.org/licenses/LICENSE-2.0\n            #\n            # Unless required by applicable law or agreed to in writing,\n            # software distributed under the License is distributed on an\n            # \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n            # KIND, either express or implied.  See the License for the\n            # specific language governing permissions a
 nd limitations\n            # under the License.\n            #\n            #\n            #\n\n            # Set everything to be logged to the console\n            log4j.rootCategory=WARN, console\n            log4j.appender.console=org.apache.log4j.ConsoleAppender\n            log4j.appender.console.target=System.err\n            log4j.appender.console.layout=org.apache.log4j.PatternLayout\n            log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n\n\n            # Settings to quiet third party logs that are too verbose\n            log4j.logger.org.eclipse.jetty=WARN\n            log4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=WARN\n            log4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=WARN"
+    },
+    "hadoop-env": {
+      "hdfs_user": "hdfs"
+    },
+    "core-site": {
+      "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020"
+    },
+    "hdfs-site": {
+      "a": "b"
+    },
+    "yarn-site": {
+      "yarn.application.classpath": "/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*",
+      "yarn.resourcemanager.address": "c6401.ambari.apache.org:8050",
+      "yarn.resourcemanager.scheduler.address": "c6401.ambari.apache.org:8030"
+    },
+    "cluster-env": {
+      "managed_hdfs_resource_property_names": "",
+      "security_enabled": "false",
+      "ignore_groupsusers_create": "false",
+      "smokeuser": "ambari-qa",
+      "kerberos_domain": "EXAMPLE.COM",
+      "user_group": "hadoop"
+    },
+    "webhcat-site": {
+      "templeton.jar": "/usr/hdp/current/hive-webhcat/share/webhcat/svr/lib/hive-webhcat-*.jar",
+      "templeton.pig.archive": "hdfs:///hdp/apps/{{ hdp_stack_version }}/pig/pig.tar.gz",
+      "templeton.hive.archive": "hdfs:///hdp/apps/{{ hdp_stack_version }}/hive/hive.tar.gz",
+      "templeton.sqoop.archive": "hdfs:///hdp/apps/{{ hdp_stack_version }}/sqoop/sqoop.tar.gz",
+      "templeton.streaming.jar": "hdfs:///hdp/apps/{{ hdp_stack_version }}/mr/hadoop-streaming.jar"
+    },
+    "slider-log4j": {
+      "content": "log4jproperties\nline2"
+    },
+    "slider-env": {
+      "content": "envproperties\nline2"
+    },
+    "gateway-site": {
+      "java.security.auth.login.config": "/etc/knox/conf/krb5JAASLogin.conf",
+      "gateway.hadoop.kerberos.secured": "false",
+      "gateway.gateway.conf.dir": "deployments",
+      "gateway.path": "gateway",
+      "sun.security.krb5.debug": "true",
+      "java.security.krb5.conf": "/etc/knox/conf/krb5.conf",
+      "gateway.port": "8443"
+    },
+
+    "users-ldif": {
+      "content": "\n            # Licensed to the Apache Software Foundation (ASF) under one\n            # or more contributor license agreements.  See the NOTICE file\n            # distributed with this work for additional information\n            # regarding copyright ownership.  The ASF licenses this file\n            # to you under the Apache License, Version 2.0 (the\n            # \"License\"); you may not use this file except in compliance\n            # with the License.  You may obtain a copy of the License at\n            #\n            #     http://www.apache.org/licenses/LICENSE-2.0\n            #\n            # Unless required by applicable law or agreed to in writing, software\n            # distributed under the License is distributed on an \"AS IS\" BASIS,\n            # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n            # See the License for the specific language governing permissions and\n            # limitations under the Lice
 nse.\n\n            version: 1\n\n            # Please replace with site specific values\n            dn: dc=hadoop,dc=apache,dc=org\n            objectclass: organization\n            objectclass: dcObject\n            o: Hadoop\n            dc: hadoop\n\n            # Entry for a sample people container\n            # Please replace with site specific values\n            dn: ou=people,dc=hadoop,dc=apache,dc=org\n            objectclass:top\n            objectclass:organizationalUnit\n            ou: people\n\n            # Entry for a sample end user\n            # Please replace with site specific values\n            dn: uid=guest,ou=people,dc=hadoop,dc=apache,dc=org\n            objectclass:top\n            objectclass:person\n            objectclass:organizationalPerson\n            objectclass:inetOrgPerson\n            cn: Guest\n            sn: User\n            uid: guest\n            userPassword:guest-password\n\n            # entry for sample user admin\n            dn: 
 uid=admin,ou=people,dc=hadoop,dc=apache,dc=org\n            objectclass:top\n            objectclass:person\n            objectclass:organizationalPerson\n            objectclass:inetOrgPerson\n            cn: Admin\n            sn: Admin\n            uid: admin\n            userPassword:admin-password\n\n            # entry for sample user sam\n            dn: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org\n            objectclass:top\n            objectclass:person\n            objectclass:organizationalPerson\n            objectclass:inetOrgPerson\n            cn: sam\n            sn: sam\n            uid: sam\n            userPassword:sam-password\n\n            # entry for sample user tom\n            dn: uid=tom,ou=people,dc=hadoop,dc=apache,dc=org\n            objectclass:top\n            objectclass:person\n            objectclass:organizationalPerson\n            objectclass:inetOrgPerson\n            cn: tom\n            sn: tom\n            uid: tom\n            userPasswor
 d:tom-password\n\n            # create FIRST Level groups branch\n            dn: ou=groups,dc=hadoop,dc=apache,dc=org\n            objectclass:top\n            objectclass:organizationalUnit\n            ou: groups\n            description: generic groups branch\n\n            # create the analyst group under groups\n            dn: cn=analyst,ou=groups,dc=hadoop,dc=apache,dc=org\n            objectclass:top\n            objectclass: groupofnames\n            cn: analyst\n            description:analyst  group\n            member: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org\n            member: uid=tom,ou=people,dc=hadoop,dc=apache,dc=org\n\n\n            # create the scientist group under groups\n            dn: cn=scientist,ou=groups,dc=hadoop,dc=apache,dc=org\n            objectclass:top\n            objectclass: groupofnames\n            cn: scientist\n            description: scientist group\n            member: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org"
+    },
+
+    "topology": {
+      "content": "\n        <topology>\n\n            <gateway>\n\n                <provider>\n                    <role>authentication</role>\n                    <name>ShiroProvider</name>\n                    <enabled>true</enabled>\n                    <param>\n                        <name>sessionTimeout</name>\n                        <value>30</value>\n                    </param>\n                    <param>\n                        <name>main.ldapRealm</name>\n                        <value>org.apache.hadoop.gateway.shirorealm.KnoxLdapRealm</value>\n                    </param>\n                    <param>\n                        <name>main.ldapRealm.userDnTemplate</name>\n                        <value>uid={0},ou=people,dc=hadoop,dc=apache,dc=org</value>\n                    </param>\n                    <param>\n                        <name>main.ldapRealm.contextFactory.url</name>\n                        <value>ldap://{{knox_host_name}}:33389</value>\n                 
    </param>\n                    <param>\n                        <name>main.ldapRealm.contextFactory.authenticationMechanism</name>\n                        <value>simple</value>\n                    </param>\n                    <param>\n                        <name>urls./**</name>\n                        <value>authcBasic</value>\n                    </param>\n                </provider>\n\n                <provider>\n                    <role>identity-assertion</role>\n                    <name>Default</name>\n                    <enabled>true</enabled>\n                </provider>\n\n            </gateway>\n\n            <service>\n                <role>NAMENODE</role>\n                <url>hdfs://{{namenode_host}}:{{namenode_rpc_port}}</url>\n            </service>\n\n            <service>\n                <role>JOBTRACKER</role>\n                <url>rpc://{{rm_host}}:{{jt_rpc_port}}</url>\n            </service>\n\n            <service>\n                <role>WEBHDFS</role
 >\n                <url>http://{{namenode_host}}:{{namenode_http_port}}/webhdfs</url>\n            </service>\n\n            <service>\n                <role>WEBHCAT</role>\n                <url>http://{{webhcat_server_host}}:{{templeton_port}}/templeton</url>\n            </service>\n\n            <service>\n                <role>OOZIE</role>\n                <url>http://{{oozie_server_host}}:{{oozie_server_port}}/oozie</url>\n            </service>\n\n            <service>\n                <role>WEBHBASE</role>\n                <url>http://{{hbase_master_host}}:{{hbase_master_port}}</url>\n            </service>\n\n            <service>\n                <role>HIVE</role>\n                <url>http://{{hive_server_host}}:{{hive_http_port}}/{{hive_http_path}}</url>\n            </service>\n\n            <service>\n                <role>RESOURCEMANAGER</role>\n                <url>http://{{rm_host}}:{{rm_port}}/ws</url>\n            </service>\n        </topology>"
+    },
+
+    "ldap-log4j": {
+      "content": "\n        # Licensed to the Apache Software Foundation (ASF) under one\n        # or more contributor license agreements.  See the NOTICE file\n        # distributed with this work for additional information\n        # regarding copyright ownership.  The ASF licenses this file\n        # to you under the Apache License, Version 2.0 (the\n        # \"License\"); you may not use this file except in compliance\n        # with the License.  You may obtain a copy of the License at\n        #\n        #     http://www.apache.org/licenses/LICENSE-2.0\n        #\n        # Unless required by applicable law or agreed to in writing, software\n        # distributed under the License is distributed on an \"AS IS\" BASIS,\n        # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n        # See the License for the specific language governing permissions and\n        # limitations under the License.\n        #testing\n\n        app.log.dir=${launcher.dir
 }/../logs\n        app.log.file=${launcher.name}.log\n\n        log4j.rootLogger=ERROR, drfa\n        log4j.logger.org.apache.directory.server.ldap.LdapServer=INFO\n        log4j.logger.org.apache.directory=WARN\n\n        log4j.appender.stdout=org.apache.log4j.ConsoleAppender\n        log4j.appender.stdout.layout=org.apache.log4j.PatternLayout\n        log4j.appender.stdout.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n        log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender\n        log4j.appender.drfa.File=${app.log.dir}/${app.log.file}\n        log4j.appender.drfa.DatePattern=.yyyy-MM-dd\n        log4j.appender.drfa.layout=org.apache.log4j.PatternLayout\n        log4j.appender.drfa.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n"
+    },
+
+    "gateway-log4j": {
+      "content": "\n\n      # Licensed to the Apache Software Foundation (ASF) under one\n      # or more contributor license agreements. See the NOTICE file\n      # distributed with this work for additional information\n      # regarding copyright ownership. The ASF licenses this file\n      # to you under the Apache License, Version 2.0 (the\n      # \"License\"); you may not use this file except in compliance\n      # with the License. You may obtain a copy of the License at\n      #\n      # http://www.apache.org/licenses/LICENSE-2.0\n      #\n      # Unless required by applicable law or agreed to in writing, software\n      # distributed under the License is distributed on an \"AS IS\" BASIS,\n      # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n      # See the License for the specific language governing permissions and\n      # limitations under the License.\n\n      app.log.dir=${launcher.dir}/../logs\n      app.log.file=${launcher.name}.log\n   
    app.audit.file=${launcher.name}-audit.log\n\n      log4j.rootLogger=ERROR, drfa\n\n      log4j.logger.org.apache.hadoop.gateway=INFO\n      #log4j.logger.org.apache.hadoop.gateway=DEBUG\n\n      #log4j.logger.org.eclipse.jetty=DEBUG\n      #log4j.logger.org.apache.shiro=DEBUG\n      #log4j.logger.org.apache.http=DEBUG\n      #log4j.logger.org.apache.http.client=DEBUG\n      #log4j.logger.org.apache.http.headers=DEBUG\n      #log4j.logger.org.apache.http.wire=DEBUG\n\n      log4j.appender.stdout=org.apache.log4j.ConsoleAppender\n      log4j.appender.stdout.layout=org.apache.log4j.PatternLayout\n      log4j.appender.stdout.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n      log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender\n      log4j.appender.drfa.File=${app.log.dir}/${app.log.file}\n      log4j.appender.drfa.DatePattern=.yyyy-MM-dd\n      log4j.appender.drfa.layout=org.apache.log4j.PatternLayout\n      log4j.appender.drfa.layout.ConversionPattern=%
 d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n      log4j.logger.audit=INFO, auditfile\n      log4j.appender.auditfile=org.apache.log4j.DailyRollingFileAppender\n      log4j.appender.auditfile.File=${app.log.dir}/${app.audit.file}\n      log4j.appender.auditfile.Append = true\n      log4j.appender.auditfile.DatePattern = '.'yyyy-MM-dd\n      log4j.appender.auditfile.layout = org.apache.hadoop.gateway.audit.log4j.layout.AuditLayout"
+    },
+    "knox-env": {
+      "knox_master_secret": "sa",
+      "knox_group": "knox",
+      "knox_pid_dir": "/var/run/knox",
+      "knox_user": "knox"
+    },
+    "kafka-env": {
+      "content": "\n#!/bin/bash\n\n# Set KAFKA specific environment variables here.\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}\nexport PATH=$PATH:$JAVA_HOME/bin",
+      "kafka_user": "kafka",
+      "kafka_log_dir": "/var/log/kafka",
+      "kafka_pid_dir": "/var/run/kafka"
+    },
+    "kafka-log4j": {
+      "content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\nkafka.logs.dir=logs\n\nlog4j.rootLogger=INFO, stdout\n\nlog4j.appender.stdout=org.apache.log4j.ConsoleAppender\nlog4j.appender.stdout.layout=org.apache.log4j
 .PatternLayout\nlog4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n\n\nlog4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log\nlog4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n\n\nlog4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log\nlog4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n\n\nlog4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-reques
 t.log\nlog4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n\n\nlog4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log\nlog4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n\n\nlog4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log\nlog4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n\n\n# Turn on all our debugging info\n#log4j.logger.kafka.producer.async.DefaultEventHandler=DEBUG, kafkaAppender\n#log4j.logge
 r.kafka.client.ClientUtils=DEBUG, kafkaAppender\n#log4j.logger.kafka.perf=DEBUG, kafkaAppender\n#log4j.logger.kafka.perf.ProducerPerformance$ProducerThread=DEBUG, kafkaAppender\n#log4j.logger.org.I0Itec.zkclient.ZkClient=DEBUG\nlog4j.logger.kafka=INFO, kafkaAppender\nlog4j.logger.kafka.network.RequestChannel$=WARN, requestAppender\nlog4j.additivity.kafka.network.RequestChannel$=false\n\n#log4j.logger.kafka.network.Processor=TRACE, requestAppender\n#log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender\n#log4j.additivity.kafka.server.KafkaApis=false\nlog4j.logger.kafka.request.logger=WARN, requestAppender\nlog4j.additivity.kafka.request.logger=false\n\nlog4j.logger.kafka.controller=TRACE, controllerAppender\nlog4j.additivity.kafka.controller=false\n\nlog4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender\nlog4j.additivity.kafka.log.LogCleaner=false\n\nlog4j.logger.state.change.logger=TRACE, stateChangeAppender\nlog4j.additivity.state.change.logger=false"
+    },
+    "kafka-broker": {
+      "log.segment.bytes": "1073741824",
+      "socket.send.buffer.bytes": "102400",
+      "num.network.threads": "3",
+      "log.flush.scheduler.interval.ms": "3000",
+      "kafka.ganglia.metrics.host": "localhost",
+      "zookeeper.session.timeout.ms": "6000",
+      "replica.lag.time.max.ms": "10000",
+      "num.io.threads": "8",
+      "kafka.ganglia.metrics.group": "kafka",
+      "replica.lag.max.messages": "4000",
+      "port": "6667",
+      "log.retention.bytes": "-1",
+      "fetch.purgatory.purge.interval.requests": "10000",
+      "producer.purgatory.purge.interval.requests": "10000",
+      "default.replication.factor": "1",
+      "replica.high.watermark.checkpoint.interval.ms": "5000",
+      "zookeeper.connect": "c6402.ambari.apache.org:2181",
+      "controlled.shutdown.retry.backoff.ms": "5000",
+      "num.partitions": "1",
+      "log.flush.interval.messages": "10000",
+      "replica.fetch.min.bytes": "1",
+      "queued.max.requests": "500",
+      "controlled.shutdown.max.retries": "3",
+      "replica.fetch.wait.max.ms": "500",
+      "controlled.shutdown.enable": "false",
+      "log.roll.hours": "168",
+      "log.cleanup.interval.mins": "10",
+      "replica.socket.receive.buffer.bytes": "65536",
+      "zookeeper.connection.timeout.ms": "6000",
+      "replica.fetch.max.bytes": "1048576",
+      "num.replica.fetchers": "1",
+      "socket.request.max.bytes": "104857600",
+      "message.max.bytes": "1000000",
+      "zookeeper.sync.time.ms": "2000",
+      "socket.receive.buffer.bytes": "102400",
+      "controller.message.queue.size": "10",
+      "log.flush.interval.ms": "3000",
+      "log.dirs": "/tmp/log/dir",
+      "controller.socket.timeout.ms": "30000",
+      "replica.socket.timeout.ms": "30000",
+      "auto.create.topics.enable": "true",
+      "log.index.size.max.bytes": "10485760",
+      "kafka.ganglia.metrics.port": "8649",
+      "log.index.interval.bytes": "4096",
+      "log.retention.hours": "168"
+    },
+    "application-properties": {
+      "atlas.cluster.name" : "c2",
+      "atlas.graph.storage.backend": "berkeleyje",
+      "atlas.graph.storage.directory": "data/berkley",
+      "atlas.graph.index.search.backend": "elasticsearch",
+      "atlas.graph.index.search.directory": "data/es",
+      "atlas.graph.index.search.elasticsearch.client-only": false,
+      "atlas.graph.index.search.elasticsearch.local-mode": true,
+      "atlas.lineage.hive.table.type.name": "Table",
+      "atlas.lineage.hive.column.type.name": "Column",
+      "atlas.lineage.hive.table.column.name": "columns",
+      "atlas.lineage.hive.process.type.name": "LoadProcess",
+      "atlas.lineage.hive.process.inputs.name": "inputTables",
+      "atlas.lineage.hive.process.outputs.name": "outputTables",
+      "atlas.enableTLS": false,
+      "atlas.authentication.method": "simple",
+      "atlas.authentication.principal": "atlas",
+      "atlas.authentication.keytab": "/etc/security/keytabs/atlas.service.keytab",
+      "atlas.http.authentication.enabled": false,
+      "atlas.http.authentication.type": "simple",
+      "atlas.http.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
+      "atlas.http.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+      "atlas.http.authentication.kerberos.name.rules": "DEFAULT",
+      "atlas.server.http.port" : "21000",
+      "atlas.notification.embedded" : false,
+      "atlas.kafka.bootstrap.servers" : "c6401.ambari.apache.org:6667",
+      "atlas.kafka.data" : "/usr/hdp/current/atlas-server/data/kafka",
+      "atlas.kafka.entities.group.id" : "entities",
+      "atlas.kafka.hook.group.id" : "atlas",
+      "atlas.kafka.zookeeper.connect" : "c6401.ambari.apache.org:2181"
+    },
+    "atlas-env": {
+      "content": "# The java implementation to use. If JAVA_HOME is not found we expect java and jar to be in path\nexport JAVA_HOME={{java64_home}}\n# any additional java opts you want to set. This will apply to both client and server operations\nexport METADATA_OPTS={{metadata_opts}}\n# metadata configuration directory\nexport METADATA_CONF={{conf_dir}}\n# Where log files are stored. Defatult is logs directory under the base install location\nexport METADATA_LOG_DIR={{log_dir}}\n# additional classpath entries\nexport METADATACPPATH={{metadata_classpath}}\n# data dir\nexport METADATA_DATA_DIR={{data_dir}}\n# Where do you want to expand the war file. By Default it is in /server/webapp dir under the base install dir.\nexport METADATA_EXPANDED_WEBAPP_DIR={{expanded_war_dir}}",
+      "metadata_user": "atlas",
+      "metadata_port": 21000,
+      "metadata_pid_dir": "/var/run/atlas",
+      "metadata_log_dir": "/var/log/atlas",
+      "metadata_data_dir": "/var/lib/atlas/data",
+      "metadata_expanded_war_dir": "/var/lib/atlas/server/webapp",
+      "metadata_conf_file": "atlas-application.properties"
+    },
+    "ranger-hbase-plugin-properties": {
+      "ranger-hbase-plugin-enabled":"yes"
+    },
+    "ranger-hive-plugin-properties": {
+      "ranger-hive-plugin-enabled":"yes"
+    },
+    "ranger-env": {
+      "xml_configurations_supported" : "true"
+    }
+  },
+  "configuration_attributes": {
+    "sqoop-site": {},
+    "yarn-site": {
+      "final": {
+        "yarn.nodemanager.disk-health-checker.min-healthy-disks": "true",
+        "yarn.nodemanager.container-executor.class": "true",
+        "yarn.nodemanager.local-dirs": "true"
+      }
+    },
+    "yarn-site": {
+      "final": {
+        "is_supported_yarn_ranger": "true"
+      }
+    },
+    "hdfs-site": {
+      "final": {
+        "dfs.web.ugi": "true",
+        "dfs.support.append": "true",
+        "dfs.cluster.administrators": "true"
+      }
+    },
+    "core-site": {
+      "final": {
+        "hadoop.proxyuser.hive.groups": "true",
+        "webinterface.private.actions": "true",
+        "hadoop.proxyuser.oozie.hosts": "true"
+      }
+    },
+    "knox-env": {},
+    "gateway-site": {},
+    "users-ldif": {},
+    "kafka-env": {},
+    "kafka-log4j": {},
+    "kafka-broker": {},
+    "metadata-env": {}
+  },
+  "configurationTags": {
+    "slider-client": {
+      "tag": "version1"
+    },
+    "slider-log4j": {
+      "tag": "version1"
+    },
+    "slider-env": {
+      "tag": "version1"
+    },
+    "core-site": {
+      "tag": "version1"
+    },
+    "hdfs-site": {
+      "tag": "version1"
+    },
+    "yarn-site": {
+      "tag": "version1"
+    },
+    "gateway-site": {
+      "tag": "version1"
+    },
+    "topology": {
+      "tag": "version1"
+    },
+    "users-ldif": {
+      "tag": "version1"
+    },
+    "kafka-env": {
+      "tag": "version1"
+    },
+    "kafka-log4j": {
+      "tag": "version1"
+    },
+    "kafka-broker": {
+      "tag": "version1"
+    },
+    "metadata-env": {
+      "tag": "version1"
+    }
+  },
+  "commandId": "7-1",
+  "clusterHostInfo": {
+    "ambari_server_host": [
+      "c6401.ambari.apache.org"
+    ],
+    "all_ping_ports": [
+      "8670",
+      "8670"
+    ],
+    "rm_host": [
+      "c6402.ambari.apache.org"
+    ],
+    "all_hosts": [
+      "c6401.ambari.apache.org",
+      "c6402.ambari.apache.org"
+    ],
+    "knox_gateway_hosts": [
+      "jaimin-knox-1.c.pramod-thangali.internal"
+    ],
+    "kafka_broker_hosts": [
+      "c6401.ambari.apache.org"
+    ],
+    "zookeeper_hosts": [
+      "c6401.ambari.apache.org"
+    ]
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/c5c5da75/ambari-server/src/test/python/stacks/2.6/ATLAS/test_atlas_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.6/ATLAS/test_atlas_server.py b/ambari-server/src/test/python/stacks/2.6/ATLAS/test_atlas_server.py
deleted file mode 100644
index 8e51ea0..0000000
--- a/ambari-server/src/test/python/stacks/2.6/ATLAS/test_atlas_server.py
+++ /dev/null
@@ -1,107 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-from stacks.utils.RMFTestCase import *
-
-from only_for_platform import not_for_platform, PLATFORM_WINDOWS
-
-@not_for_platform(PLATFORM_WINDOWS)
-class TestAtlasServer(RMFTestCase):
-  COMMON_SERVICES_PACKAGE_DIR = "ATLAS/0.1.0.2.3/package"
-  STACK_VERSION = "2.6"
-
-  def configureResourcesCalled(self):
-    self.assertResourceCalled('Directory', '/var/run/atlas',
-                              owner='atlas',
-                              group='hadoop',
-                              create_parents = True,
-                              cd_access='a',
-                              mode=0755
-                              )
-    self.assertResourceCalled('Directory', '/etc/atlas/conf',
-                              owner='atlas',
-                              group='hadoop',
-                              create_parents = True,
-                              cd_access='a',
-                              mode=0755
-                              )
-    self.assertResourceCalled('Directory', '/var/log/atlas',
-                              owner='atlas',
-                              group='hadoop',
-                              create_parents = True,
-                              cd_access='a',
-                              mode=0755
-                              )
-    self.assertResourceCalled('Directory', '/var/lib/atlas/data',
-                              owner='atlas',
-                              group='hadoop',
-                              create_parents = True,
-                              cd_access='a',
-                              mode=0644
-                              )
-    self.assertResourceCalled('Directory', '/var/lib/atlas/server/webapp',
-                              owner='atlas',
-                              group='hadoop',
-                              create_parents = True,
-                              cd_access='a',
-                              mode=0644
-                              )
-    self.assertResourceCalled('File', '/var/lib/atlas/server/webapp/atlas.war',
-                              content = StaticFile('/usr/hdp/current/atlas-server/server/webapp/atlas.war'),
-                              )
-    appprops =  dict(self.getConfig()['configurations'][
-                       'application-properties'])
-    appprops['atlas.http.authentication.kerberos.name.rules'] = ' \\ \n'.join(appprops['atlas.http.authentication.kerberos.name.rules'].splitlines())
-    appprops['atlas.server.bind.address'] = 'c6401.ambari.apache.org'
-
-    self.assertResourceCalled('PropertiesFile',
-                              '/etc/atlas/conf/atlas-application.properties',
-                              properties=appprops,
-                              owner='atlas',
-                              group='hadoop',
-                              mode=0644,
-                              )
-    self.assertResourceCalled('File', '/etc/atlas/conf/atlas-env.sh',
-                              content=InlineTemplate(
-                                  self.getConfig()['configurations'][
-                                    'atlas-env']['content']),
-                              owner='atlas',
-                              group='hadoop',
-                              mode=0755,
-                              )
-    self.assertResourceCalled('File', '/etc/atlas/conf/atlas-log4j.xml',
-                              content=StaticFile('atlas-log4j.xml'),
-                              owner='atlas',
-                              group='hadoop',
-                              mode=0644,
-                              )
-
-  def test_configure_default(self):
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/metadata_server.py",
-                       classname = "MetadataServer",
-                       command = "configure",
-                       config_file="default.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-                       )
-
-    self.configureResourcesCalled()
-    self.assertNoMoreResources()
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/c5c5da75/ambari-server/src/test/python/stacks/2.6/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.6/configs/default.json b/ambari-server/src/test/python/stacks/2.6/configs/default.json
deleted file mode 100644
index 2e1bc68..0000000
--- a/ambari-server/src/test/python/stacks/2.6/configs/default.json
+++ /dev/null
@@ -1,329 +0,0 @@
-{
-  "roleCommand": "SERVICE_CHECK",
-  "clusterName": "c1",
-  "hostname": "c6401.ambari.apache.org",
-  "hostLevelParams": {
-    "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
-    "agent_stack_retry_count": "5",
-    "agent_stack_retry_on_unavailability": "false",
-    "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
-    "ambari_db_rca_password": "mapred",
-    "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
-    "jce_name": "UnlimitedJCEPolicyJDK7.zip",
-    "stack_version": "2.3",
-    "stack_name": "HDP",
-    "ambari_db_rca_driver": "org.postgresql.Driver",
-    "jdk_name": "jdk-7u67-linux-x64.tar.gz",
-    "ambari_db_rca_username": "mapred",
-    "java_home": "/usr/jdk64/jdk1.7.0_45",
-    "db_name": "ambari"
-  },
-  "commandType": "EXECUTION_COMMAND",
-  "roleParams": {},
-  "serviceName": "SLIDER",
-  "role": "SLIDER",
-  "commandParams": {
-    "version": "2.2.1.0-2067",
-    "command_timeout": "300",
-    "service_package_folder": "OOZIE",
-    "script_type": "PYTHON",
-    "script": "scripts/service_check.py",
-    "excluded_hosts": "host1,host2"
-  },
-  "taskId": 152,
-  "public_hostname": "c6401.ambari.apache.org",
-  "configurations": {
-    "slider-client": {
-      "slider.yarn.queue": "default"
-    },
-    "sqoop-site": {
-      "atlas.cluster.name": "c1",
-      "sqoop.job.data.publish.class": "org.apache.atlas.sqoop.hook.SqoopHook"
-    },
-    "mahout-env": {
-      "mahout_user": "mahout"
-    },
-    "yarn-env": {
-      "yarn_user": "yarn"
-    },
-    "mahout-log4j": {
-      "content": "\n            #\n            #\n            # Licensed to the Apache Software Foundation (ASF) under one\n            # or more contributor license agreements.  See the NOTICE file\n            # distributed with this work for additional information\n            # regarding copyright ownership.  The ASF licenses this file\n            # to you under the Apache License, Version 2.0 (the\n            # \"License\"); you may not use this file except in compliance\n            # with the License.  You may obtain a copy of the License at\n            #\n            #   http://www.apache.org/licenses/LICENSE-2.0\n            #\n            # Unless required by applicable law or agreed to in writing,\n            # software distributed under the License is distributed on an\n            # \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n            # KIND, either express or implied.  See the License for the\n            # specific language governing permissions a
 nd limitations\n            # under the License.\n            #\n            #\n            #\n\n            # Set everything to be logged to the console\n            log4j.rootCategory=WARN, console\n            log4j.appender.console=org.apache.log4j.ConsoleAppender\n            log4j.appender.console.target=System.err\n            log4j.appender.console.layout=org.apache.log4j.PatternLayout\n            log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n\n\n            # Settings to quiet third party logs that are too verbose\n            log4j.logger.org.eclipse.jetty=WARN\n            log4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=WARN\n            log4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=WARN"
-    },
-    "hadoop-env": {
-      "hdfs_user": "hdfs"
-    },
-    "core-site": {
-      "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020"
-    },
-    "hdfs-site": {
-      "a": "b"
-    },
-    "yarn-site": {
-      "yarn.application.classpath": "/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*",
-      "yarn.resourcemanager.address": "c6401.ambari.apache.org:8050",
-      "yarn.resourcemanager.scheduler.address": "c6401.ambari.apache.org:8030"
-    },
-    "cluster-env": {
-      "managed_hdfs_resource_property_names": "",
-      "security_enabled": "false",
-      "ignore_groupsusers_create": "false",
-      "smokeuser": "ambari-qa",
-      "kerberos_domain": "EXAMPLE.COM",
-      "user_group": "hadoop"
-    },
-    "webhcat-site": {
-      "templeton.jar": "/usr/hdp/current/hive-webhcat/share/webhcat/svr/lib/hive-webhcat-*.jar",
-      "templeton.pig.archive": "hdfs:///hdp/apps/{{ hdp_stack_version }}/pig/pig.tar.gz",
-      "templeton.hive.archive": "hdfs:///hdp/apps/{{ hdp_stack_version }}/hive/hive.tar.gz",
-      "templeton.sqoop.archive": "hdfs:///hdp/apps/{{ hdp_stack_version }}/sqoop/sqoop.tar.gz",
-      "templeton.streaming.jar": "hdfs:///hdp/apps/{{ hdp_stack_version }}/mr/hadoop-streaming.jar"
-    },
-    "slider-log4j": {
-      "content": "log4jproperties\nline2"
-    },
-    "slider-env": {
-      "content": "envproperties\nline2"
-    },
-    "gateway-site": {
-      "java.security.auth.login.config": "/etc/knox/conf/krb5JAASLogin.conf",
-      "gateway.hadoop.kerberos.secured": "false",
-      "gateway.gateway.conf.dir": "deployments",
-      "gateway.path": "gateway",
-      "sun.security.krb5.debug": "true",
-      "java.security.krb5.conf": "/etc/knox/conf/krb5.conf",
-      "gateway.port": "8443"
-    },
-
-    "users-ldif": {
-      "content": "\n            # Licensed to the Apache Software Foundation (ASF) under one\n            # or more contributor license agreements.  See the NOTICE file\n            # distributed with this work for additional information\n            # regarding copyright ownership.  The ASF licenses this file\n            # to you under the Apache License, Version 2.0 (the\n            # \"License\"); you may not use this file except in compliance\n            # with the License.  You may obtain a copy of the License at\n            #\n            #     http://www.apache.org/licenses/LICENSE-2.0\n            #\n            # Unless required by applicable law or agreed to in writing, software\n            # distributed under the License is distributed on an \"AS IS\" BASIS,\n            # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n            # See the License for the specific language governing permissions and\n            # limitations under the Lice
 nse.\n\n            version: 1\n\n            # Please replace with site specific values\n            dn: dc=hadoop,dc=apache,dc=org\n            objectclass: organization\n            objectclass: dcObject\n            o: Hadoop\n            dc: hadoop\n\n            # Entry for a sample people container\n            # Please replace with site specific values\n            dn: ou=people,dc=hadoop,dc=apache,dc=org\n            objectclass:top\n            objectclass:organizationalUnit\n            ou: people\n\n            # Entry for a sample end user\n            # Please replace with site specific values\n            dn: uid=guest,ou=people,dc=hadoop,dc=apache,dc=org\n            objectclass:top\n            objectclass:person\n            objectclass:organizationalPerson\n            objectclass:inetOrgPerson\n            cn: Guest\n            sn: User\n            uid: guest\n            userPassword:guest-password\n\n            # entry for sample user admin\n            dn: 
 uid=admin,ou=people,dc=hadoop,dc=apache,dc=org\n            objectclass:top\n            objectclass:person\n            objectclass:organizationalPerson\n            objectclass:inetOrgPerson\n            cn: Admin\n            sn: Admin\n            uid: admin\n            userPassword:admin-password\n\n            # entry for sample user sam\n            dn: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org\n            objectclass:top\n            objectclass:person\n            objectclass:organizationalPerson\n            objectclass:inetOrgPerson\n            cn: sam\n            sn: sam\n            uid: sam\n            userPassword:sam-password\n\n            # entry for sample user tom\n            dn: uid=tom,ou=people,dc=hadoop,dc=apache,dc=org\n            objectclass:top\n            objectclass:person\n            objectclass:organizationalPerson\n            objectclass:inetOrgPerson\n            cn: tom\n            sn: tom\n            uid: tom\n            userPasswor
 d:tom-password\n\n            # create FIRST Level groups branch\n            dn: ou=groups,dc=hadoop,dc=apache,dc=org\n            objectclass:top\n            objectclass:organizationalUnit\n            ou: groups\n            description: generic groups branch\n\n            # create the analyst group under groups\n            dn: cn=analyst,ou=groups,dc=hadoop,dc=apache,dc=org\n            objectclass:top\n            objectclass: groupofnames\n            cn: analyst\n            description:analyst  group\n            member: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org\n            member: uid=tom,ou=people,dc=hadoop,dc=apache,dc=org\n\n\n            # create the scientist group under groups\n            dn: cn=scientist,ou=groups,dc=hadoop,dc=apache,dc=org\n            objectclass:top\n            objectclass: groupofnames\n            cn: scientist\n            description: scientist group\n            member: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org"
-    },
-
-    "topology": {
-      "content": "\n        <topology>\n\n            <gateway>\n\n                <provider>\n                    <role>authentication</role>\n                    <name>ShiroProvider</name>\n                    <enabled>true</enabled>\n                    <param>\n                        <name>sessionTimeout</name>\n                        <value>30</value>\n                    </param>\n                    <param>\n                        <name>main.ldapRealm</name>\n                        <value>org.apache.hadoop.gateway.shirorealm.KnoxLdapRealm</value>\n                    </param>\n                    <param>\n                        <name>main.ldapRealm.userDnTemplate</name>\n                        <value>uid={0},ou=people,dc=hadoop,dc=apache,dc=org</value>\n                    </param>\n                    <param>\n                        <name>main.ldapRealm.contextFactory.url</name>\n                        <value>ldap://{{knox_host_name}}:33389</value>\n                 
    </param>\n                    <param>\n                        <name>main.ldapRealm.contextFactory.authenticationMechanism</name>\n                        <value>simple</value>\n                    </param>\n                    <param>\n                        <name>urls./**</name>\n                        <value>authcBasic</value>\n                    </param>\n                </provider>\n\n                <provider>\n                    <role>identity-assertion</role>\n                    <name>Default</name>\n                    <enabled>true</enabled>\n                </provider>\n\n            </gateway>\n\n            <service>\n                <role>NAMENODE</role>\n                <url>hdfs://{{namenode_host}}:{{namenode_rpc_port}}</url>\n            </service>\n\n            <service>\n                <role>JOBTRACKER</role>\n                <url>rpc://{{rm_host}}:{{jt_rpc_port}}</url>\n            </service>\n\n            <service>\n                <role>WEBHDFS</role
 >\n                <url>http://{{namenode_host}}:{{namenode_http_port}}/webhdfs</url>\n            </service>\n\n            <service>\n                <role>WEBHCAT</role>\n                <url>http://{{webhcat_server_host}}:{{templeton_port}}/templeton</url>\n            </service>\n\n            <service>\n                <role>OOZIE</role>\n                <url>http://{{oozie_server_host}}:{{oozie_server_port}}/oozie</url>\n            </service>\n\n            <service>\n                <role>WEBHBASE</role>\n                <url>http://{{hbase_master_host}}:{{hbase_master_port}}</url>\n            </service>\n\n            <service>\n                <role>HIVE</role>\n                <url>http://{{hive_server_host}}:{{hive_http_port}}/{{hive_http_path}}</url>\n            </service>\n\n            <service>\n                <role>RESOURCEMANAGER</role>\n                <url>http://{{rm_host}}:{{rm_port}}/ws</url>\n            </service>\n        </topology>"
-    },
-
-    "ldap-log4j": {
-      "content": "\n        # Licensed to the Apache Software Foundation (ASF) under one\n        # or more contributor license agreements.  See the NOTICE file\n        # distributed with this work for additional information\n        # regarding copyright ownership.  The ASF licenses this file\n        # to you under the Apache License, Version 2.0 (the\n        # \"License\"); you may not use this file except in compliance\n        # with the License.  You may obtain a copy of the License at\n        #\n        #     http://www.apache.org/licenses/LICENSE-2.0\n        #\n        # Unless required by applicable law or agreed to in writing, software\n        # distributed under the License is distributed on an \"AS IS\" BASIS,\n        # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n        # See the License for the specific language governing permissions and\n        # limitations under the License.\n        #testing\n\n        app.log.dir=${launcher.dir
 }/../logs\n        app.log.file=${launcher.name}.log\n\n        log4j.rootLogger=ERROR, drfa\n        log4j.logger.org.apache.directory.server.ldap.LdapServer=INFO\n        log4j.logger.org.apache.directory=WARN\n\n        log4j.appender.stdout=org.apache.log4j.ConsoleAppender\n        log4j.appender.stdout.layout=org.apache.log4j.PatternLayout\n        log4j.appender.stdout.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n        log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender\n        log4j.appender.drfa.File=${app.log.dir}/${app.log.file}\n        log4j.appender.drfa.DatePattern=.yyyy-MM-dd\n        log4j.appender.drfa.layout=org.apache.log4j.PatternLayout\n        log4j.appender.drfa.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n"
-    },
-
-    "gateway-log4j": {
-      "content": "\n\n      # Licensed to the Apache Software Foundation (ASF) under one\n      # or more contributor license agreements. See the NOTICE file\n      # distributed with this work for additional information\n      # regarding copyright ownership. The ASF licenses this file\n      # to you under the Apache License, Version 2.0 (the\n      # \"License\"); you may not use this file except in compliance\n      # with the License. You may obtain a copy of the License at\n      #\n      # http://www.apache.org/licenses/LICENSE-2.0\n      #\n      # Unless required by applicable law or agreed to in writing, software\n      # distributed under the License is distributed on an \"AS IS\" BASIS,\n      # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n      # See the License for the specific language governing permissions and\n      # limitations under the License.\n\n      app.log.dir=${launcher.dir}/../logs\n      app.log.file=${launcher.name}.log\n   
    app.audit.file=${launcher.name}-audit.log\n\n      log4j.rootLogger=ERROR, drfa\n\n      log4j.logger.org.apache.hadoop.gateway=INFO\n      #log4j.logger.org.apache.hadoop.gateway=DEBUG\n\n      #log4j.logger.org.eclipse.jetty=DEBUG\n      #log4j.logger.org.apache.shiro=DEBUG\n      #log4j.logger.org.apache.http=DEBUG\n      #log4j.logger.org.apache.http.client=DEBUG\n      #log4j.logger.org.apache.http.headers=DEBUG\n      #log4j.logger.org.apache.http.wire=DEBUG\n\n      log4j.appender.stdout=org.apache.log4j.ConsoleAppender\n      log4j.appender.stdout.layout=org.apache.log4j.PatternLayout\n      log4j.appender.stdout.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n      log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender\n      log4j.appender.drfa.File=${app.log.dir}/${app.log.file}\n      log4j.appender.drfa.DatePattern=.yyyy-MM-dd\n      log4j.appender.drfa.layout=org.apache.log4j.PatternLayout\n      log4j.appender.drfa.layout.ConversionPattern=%
 d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n      log4j.logger.audit=INFO, auditfile\n      log4j.appender.auditfile=org.apache.log4j.DailyRollingFileAppender\n      log4j.appender.auditfile.File=${app.log.dir}/${app.audit.file}\n      log4j.appender.auditfile.Append = true\n      log4j.appender.auditfile.DatePattern = '.'yyyy-MM-dd\n      log4j.appender.auditfile.layout = org.apache.hadoop.gateway.audit.log4j.layout.AuditLayout"
-    },
-    "knox-env": {
-      "knox_master_secret": "sa",
-      "knox_group": "knox",
-      "knox_pid_dir": "/var/run/knox",
-      "knox_user": "knox"
-    },
-    "kafka-env": {
-      "content": "\n#!/bin/bash\n\n# Set KAFKA specific environment variables here.\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}\nexport PATH=$PATH:$JAVA_HOME/bin",
-      "kafka_user": "kafka",
-      "kafka_log_dir": "/var/log/kafka",
-      "kafka_pid_dir": "/var/run/kafka"
-    },
-    "kafka-log4j": {
-      "content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\nkafka.logs.dir=logs\n\nlog4j.rootLogger=INFO, stdout\n\nlog4j.appender.stdout=org.apache.log4j.ConsoleAppender\nlog4j.appender.stdout.layout=org.apache.log4j
 .PatternLayout\nlog4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n\n\nlog4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log\nlog4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n\n\nlog4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log\nlog4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n\n\nlog4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-reques
 t.log\nlog4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n\n\nlog4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log\nlog4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n\n\nlog4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log\nlog4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n\n\n# Turn on all our debugging info\n#log4j.logger.kafka.producer.async.DefaultEventHandler=DEBUG, kafkaAppender\n#log4j.logge
 r.kafka.client.ClientUtils=DEBUG, kafkaAppender\n#log4j.logger.kafka.perf=DEBUG, kafkaAppender\n#log4j.logger.kafka.perf.ProducerPerformance$ProducerThread=DEBUG, kafkaAppender\n#log4j.logger.org.I0Itec.zkclient.ZkClient=DEBUG\nlog4j.logger.kafka=INFO, kafkaAppender\nlog4j.logger.kafka.network.RequestChannel$=WARN, requestAppender\nlog4j.additivity.kafka.network.RequestChannel$=false\n\n#log4j.logger.kafka.network.Processor=TRACE, requestAppender\n#log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender\n#log4j.additivity.kafka.server.KafkaApis=false\nlog4j.logger.kafka.request.logger=WARN, requestAppender\nlog4j.additivity.kafka.request.logger=false\n\nlog4j.logger.kafka.controller=TRACE, controllerAppender\nlog4j.additivity.kafka.controller=false\n\nlog4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender\nlog4j.additivity.kafka.log.LogCleaner=false\n\nlog4j.logger.state.change.logger=TRACE, stateChangeAppender\nlog4j.additivity.state.change.logger=false"
-    },
-    "kafka-broker": {
-      "log.segment.bytes": "1073741824",
-      "socket.send.buffer.bytes": "102400",
-      "num.network.threads": "3",
-      "log.flush.scheduler.interval.ms": "3000",
-      "kafka.ganglia.metrics.host": "localhost",
-      "zookeeper.session.timeout.ms": "6000",
-      "replica.lag.time.max.ms": "10000",
-      "num.io.threads": "8",
-      "kafka.ganglia.metrics.group": "kafka",
-      "replica.lag.max.messages": "4000",
-      "port": "6667",
-      "log.retention.bytes": "-1",
-      "fetch.purgatory.purge.interval.requests": "10000",
-      "producer.purgatory.purge.interval.requests": "10000",
-      "default.replication.factor": "1",
-      "replica.high.watermark.checkpoint.interval.ms": "5000",
-      "zookeeper.connect": "c6402.ambari.apache.org:2181",
-      "controlled.shutdown.retry.backoff.ms": "5000",
-      "num.partitions": "1",
-      "log.flush.interval.messages": "10000",
-      "replica.fetch.min.bytes": "1",
-      "queued.max.requests": "500",
-      "controlled.shutdown.max.retries": "3",
-      "replica.fetch.wait.max.ms": "500",
-      "controlled.shutdown.enable": "false",
-      "log.roll.hours": "168",
-      "log.cleanup.interval.mins": "10",
-      "replica.socket.receive.buffer.bytes": "65536",
-      "zookeeper.connection.timeout.ms": "6000",
-      "replica.fetch.max.bytes": "1048576",
-      "num.replica.fetchers": "1",
-      "socket.request.max.bytes": "104857600",
-      "message.max.bytes": "1000000",
-      "zookeeper.sync.time.ms": "2000",
-      "socket.receive.buffer.bytes": "102400",
-      "controller.message.queue.size": "10",
-      "log.flush.interval.ms": "3000",
-      "log.dirs": "/tmp/log/dir",
-      "controller.socket.timeout.ms": "30000",
-      "replica.socket.timeout.ms": "30000",
-      "auto.create.topics.enable": "true",
-      "log.index.size.max.bytes": "10485760",
-      "kafka.ganglia.metrics.port": "8649",
-      "log.index.interval.bytes": "4096",
-      "log.retention.hours": "168"
-    },
-    "application-properties": {
-      "atlas.cluster.name" : "c2",
-      "atlas.graph.storage.backend": "berkeleyje",
-      "atlas.graph.storage.directory": "data/berkley",
-      "atlas.graph.index.search.backend": "elasticsearch",
-      "atlas.graph.index.search.directory": "data/es",
-      "atlas.graph.index.search.elasticsearch.client-only": false,
-      "atlas.graph.index.search.elasticsearch.local-mode": true,
-      "atlas.lineage.hive.table.type.name": "Table",
-      "atlas.lineage.hive.column.type.name": "Column",
-      "atlas.lineage.hive.table.column.name": "columns",
-      "atlas.lineage.hive.process.type.name": "LoadProcess",
-      "atlas.lineage.hive.process.inputs.name": "inputTables",
-      "atlas.lineage.hive.process.outputs.name": "outputTables",
-      "atlas.enableTLS": false,
-      "atlas.authentication.method": "simple",
-      "atlas.authentication.principal": "atlas",
-      "atlas.authentication.keytab": "/etc/security/keytabs/atlas.service.keytab",
-      "atlas.http.authentication.enabled": false,
-      "atlas.http.authentication.type": "simple",
-      "atlas.http.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
-      "atlas.http.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
-      "atlas.http.authentication.kerberos.name.rules": "DEFAULT",
-      "atlas.server.http.port" : "21000",
-      "atlas.notification.embedded" : false,
-      "atlas.kafka.bootstrap.servers" : "c6401.ambari.apache.org:6667",
-      "atlas.kafka.data" : "/usr/hdp/current/atlas-server/data/kafka",
-      "atlas.kafka.entities.group.id" : "entities",
-      "atlas.kafka.hook.group.id" : "atlas",
-      "atlas.kafka.zookeeper.connect" : "c6401.ambari.apache.org:2181"
-    },
-    "atlas-env": {
-      "content": "# The java implementation to use. If JAVA_HOME is not found we expect java and jar to be in path\nexport JAVA_HOME={{java64_home}}\n# any additional java opts you want to set. This will apply to both client and server operations\nexport METADATA_OPTS={{metadata_opts}}\n# metadata configuration directory\nexport METADATA_CONF={{conf_dir}}\n# Where log files are stored. Defatult is logs directory under the base install location\nexport METADATA_LOG_DIR={{log_dir}}\n# additional classpath entries\nexport METADATACPPATH={{metadata_classpath}}\n# data dir\nexport METADATA_DATA_DIR={{data_dir}}\n# Where do you want to expand the war file. By Default it is in /server/webapp dir under the base install dir.\nexport METADATA_EXPANDED_WEBAPP_DIR={{expanded_war_dir}}",
-      "metadata_user": "atlas",
-      "metadata_port": 21000,
-      "metadata_pid_dir": "/var/run/atlas",
-      "metadata_log_dir": "/var/log/atlas",
-      "metadata_data_dir": "/var/lib/atlas/data",
-      "metadata_expanded_war_dir": "/var/lib/atlas/server/webapp",
-      "metadata_conf_file": "atlas-application.properties"
-    },
-    "ranger-hbase-plugin-properties": {
-      "ranger-hbase-plugin-enabled":"yes"
-    },
-    "ranger-hive-plugin-properties": {
-      "ranger-hive-plugin-enabled":"yes"
-    },
-    "ranger-env": {
-      "xml_configurations_supported" : "true"
-    }
-  },
-  "configuration_attributes": {
-    "sqoop-site": {},
-    "yarn-site": {
-      "final": {
-        "yarn.nodemanager.disk-health-checker.min-healthy-disks": "true",
-        "yarn.nodemanager.container-executor.class": "true",
-        "yarn.nodemanager.local-dirs": "true"
-      }
-    },
-    "yarn-site": {
-      "final": {
-        "is_supported_yarn_ranger": "true"
-      }
-    },
-    "hdfs-site": {
-      "final": {
-        "dfs.web.ugi": "true",
-        "dfs.support.append": "true",
-        "dfs.cluster.administrators": "true"
-      }
-    },
-    "core-site": {
-      "final": {
-        "hadoop.proxyuser.hive.groups": "true",
-        "webinterface.private.actions": "true",
-        "hadoop.proxyuser.oozie.hosts": "true"
-      }
-    },
-    "knox-env": {},
-    "gateway-site": {},
-    "users-ldif": {},
-    "kafka-env": {},
-    "kafka-log4j": {},
-    "kafka-broker": {},
-    "metadata-env": {}
-  },
-  "configurationTags": {
-    "slider-client": {
-      "tag": "version1"
-    },
-    "slider-log4j": {
-      "tag": "version1"
-    },
-    "slider-env": {
-      "tag": "version1"
-    },
-    "core-site": {
-      "tag": "version1"
-    },
-    "hdfs-site": {
-      "tag": "version1"
-    },
-    "yarn-site": {
-      "tag": "version1"
-    },
-    "gateway-site": {
-      "tag": "version1"
-    },
-    "topology": {
-      "tag": "version1"
-    },
-    "users-ldif": {
-      "tag": "version1"
-    },
-    "kafka-env": {
-      "tag": "version1"
-    },
-    "kafka-log4j": {
-      "tag": "version1"
-    },
-    "kafka-broker": {
-      "tag": "version1"
-    },
-    "metadata-env": {
-      "tag": "version1"
-    }
-  },
-  "commandId": "7-1",
-  "clusterHostInfo": {
-    "ambari_server_host": [
-      "c6401.ambari.apache.org"
-    ],
-    "all_ping_ports": [
-      "8670",
-      "8670"
-    ],
-    "rm_host": [
-      "c6402.ambari.apache.org"
-    ],
-    "all_hosts": [
-      "c6401.ambari.apache.org",
-      "c6402.ambari.apache.org"
-    ],
-    "knox_gateway_hosts": [
-      "jaimin-knox-1.c.pramod-thangali.internal"
-    ],
-    "kafka_broker_hosts": [
-      "c6401.ambari.apache.org"
-    ],
-    "zookeeper_hosts": [
-      "c6401.ambari.apache.org"
-    ]
-
-  }
-}