You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by vb...@apache.org on 2017/05/08 20:46:16 UTC

[2/3] ambari git commit: AMBARI-20954. HDP 3.0 TP - create service definition for Atlas with configs, kerberos, widgets, etc.(vbrodetskyi)

http://git-wip-us.apache.org/repos/asf/ambari/blob/42a542a5/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/metainfo.xml b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/metainfo.xml
new file mode 100644
index 0000000..11ebf45
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/metainfo.xml
@@ -0,0 +1,190 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ATLAS</name>
+      <displayName>Atlas</displayName>
+      <comment>Atlas Metadata and Governance platform</comment>
+      <version>0.7.0.3.0</version>
+      
+      <components>
+        <component>
+          <name>ATLAS_SERVER</name>
+          <displayName>Atlas Metadata Server</displayName>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>AMBARI_INFRA/INFRA_SOLR_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>HBASE/HBASE_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>KAFKA/KAFKA_BROKER</name>
+              <scope>cluster</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/metadata_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <logs>
+            <log>
+              <logId>atlas_app</logId>
+              <primary>true</primary>
+            </log>
+          </logs>
+        </component>
+
+        <component>
+          <name>ATLAS_CLIENT</name>
+          <displayName>Atlas Metadata Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+          </dependencies>
+          <commandScript>
+            <script>scripts/atlas_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>properties</type>
+              <fileName>application.properties</fileName>
+              <dictionaryName>application-properties</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>atlas-env.sh</fileName>
+              <dictionaryName>atlas-env</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>atlas-log4j.xml</fileName>
+              <dictionaryName>atlas-log4j</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>atlas-solrconfig.xml</fileName>
+              <dictionaryName>atlas-solrconfig</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+      </components>
+
+      <quickLinksConfigurations>
+        <quickLinksConfiguration>
+          <fileName>quicklinks.json</fileName>
+          <default>true</default>
+        </quickLinksConfiguration>
+      </quickLinksConfigurations>
+
+      <requiredServices>
+        <service>KAFKA</service>
+      </requiredServices>
+
+      <themes>
+        <theme>
+          <fileName>theme.json</fileName>
+          <default>true</default>
+        </theme>
+        <theme>
+          <fileName>theme_version_2.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat7,amazon2015,redhat6,suse11,suse12</osFamily>
+          <packages>
+            <package>
+              <name>atlas-metadata_${stack_version}</name>
+            </package>
+            <package>
+              <name>ambari-infra-solr-client</name>
+              <condition>should_install_infra_solr_client</condition>
+            </package>
+            <package>
+              <name>kafka_${stack_version}</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>debian7,ubuntu12,ubuntu14,ubuntu16</osFamily>
+          <packages>
+            <package>
+              <name>atlas-metadata-${stack_version}</name>
+            </package>
+            <package>
+              <name>ambari-infra-solr-client</name>
+              <condition>should_install_infra_solr_client</condition>
+            </package>
+            <package>
+              <name>kafka-${stack_version}</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>application-properties</config-type>
+        <config-type>atlas-env</config-type>
+        <config-type>atlas-log4j</config-type>
+        <config-type>core-site</config-type>
+        <config-type>hdfs-site</config-type>
+        <config-type>atlas-solrconfig</config-type>
+        <config-type>ranger-atlas-audit</config-type>
+        <config-type>ranger-atlas-plugin-properties</config-type>
+        <config-type>ranger-atlas-policymgr-ssl</config-type>
+        <config-type>ranger-atlas-security</config-type>
+      </configuration-dependencies>
+
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/42a542a5/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/atlas_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/atlas_client.py b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/atlas_client.py
new file mode 100644
index 0000000..26742ae
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/atlas_client.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import conf_select, stack_select
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.core.exceptions import ClientComponentHasNoStatus
+
+from metadata import metadata
+
+
+class AtlasClient(Script):
+
+  def get_component_name(self):
+    return "atlas-client"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if check_stack_feature(StackFeature.ATLAS_UPGRADE_SUPPORT, params.version):
+      conf_select.select(params.stack_name, "atlas", params.version)
+      stack_select.select("atlas-client", params.version)
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env, upgrade_type=None, config_dir=None):
+    import params
+    env.set_params(params)
+    metadata('client')
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  AtlasClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/42a542a5/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/metadata.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/metadata.py b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/metadata.py
new file mode 100644
index 0000000..36c4598
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/metadata.py
@@ -0,0 +1,243 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+import hashlib
+
+from resource_management import Package
+from resource_management import StackFeature
+from resource_management.core.resources.system import Directory, File, Execute
+from resource_management.core.source import StaticFile, InlineTemplate, Template
+from resource_management.core.exceptions import Fail
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.decorator import retry
+from resource_management.libraries.functions import solr_cloud_util
+from resource_management.libraries.functions.stack_features import check_stack_feature, get_stack_feature_version
+from resource_management.libraries.resources.properties_file import PropertiesFile
+from resource_management.libraries.resources.template_config import TemplateConfig
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.libraries.functions.is_empty import is_empty
+from resource_management.libraries.resources.modify_properties_file import ModifyPropertiesFile
+
+
+def metadata(type='server'):
+    import params
+
+    # Needed by both Server and Client
+    Directory(params.conf_dir,
+              mode=0755,
+              cd_access='a',
+              owner=params.metadata_user,
+              group=params.user_group,
+              create_parents = True
+    )
+
+    if type == "server":
+      Directory([params.pid_dir],
+                mode=0755,
+                cd_access='a',
+                owner=params.metadata_user,
+                group=params.user_group,
+                create_parents = True
+      )
+      Directory(format('{conf_dir}/solr'),
+                mode=0755,
+                cd_access='a',
+                owner=params.metadata_user,
+                group=params.user_group,
+                create_parents = True,
+                recursive_ownership=True
+      )
+      Directory(params.log_dir,
+                mode=0755,
+                cd_access='a',
+                owner=params.metadata_user,
+                group=params.user_group,
+                create_parents = True
+      )
+      Directory(params.data_dir,
+                mode=0644,
+                cd_access='a',
+                owner=params.metadata_user,
+                group=params.user_group,
+                create_parents = True
+      )
+      Directory(params.expanded_war_dir,
+                mode=0644,
+                cd_access='a',
+                owner=params.metadata_user,
+                group=params.user_group,
+                create_parents = True
+      )
+      File(format("{expanded_war_dir}/atlas.war"),
+           content = StaticFile(format('{metadata_home}/server/webapp/atlas.war'))
+      )
+      File(format("{conf_dir}/atlas-log4j.xml"),
+           mode=0644,
+           owner=params.metadata_user,
+           group=params.user_group,
+           content=InlineTemplate(params.metadata_log4j_content)
+      )
+      File(format("{conf_dir}/atlas-env.sh"),
+           owner=params.metadata_user,
+           group=params.user_group,
+           mode=0755,
+           content=InlineTemplate(params.metadata_env_content)
+      )
+
+      if not is_empty(params.atlas_admin_username) and not is_empty(params.atlas_admin_password):
+        psswd_output = hashlib.sha256(params.atlas_admin_password).hexdigest()
+        ModifyPropertiesFile(format("{conf_dir}/users-credentials.properties"),
+            properties = {format('{atlas_admin_username}') : format('ROLE_ADMIN::{psswd_output}')},
+            owner = params.metadata_user
+        )
+
+      files_to_chown = [format("{conf_dir}/policy-store.txt"), format("{conf_dir}/users-credentials.properties")]
+      for file in files_to_chown:
+        if os.path.exists(file):
+          Execute(('chown', format('{metadata_user}:{user_group}'), file),
+                  sudo=True
+                  )
+          Execute(('chmod', '644', file),
+                  sudo=True
+                  )
+
+      if params.metadata_solrconfig_content:
+        File(format("{conf_dir}/solr/solrconfig.xml"),
+             mode=0644,
+             owner=params.metadata_user,
+             group=params.user_group,
+             content=InlineTemplate(params.metadata_solrconfig_content)
+        )
+
+    # Needed by both Server and Client
+    PropertiesFile(format('{conf_dir}/{conf_file}'),
+         properties = params.application_properties,
+         mode=0644,
+         owner=params.metadata_user,
+         group=params.user_group
+    )
+
+    if params.security_enabled:
+      TemplateConfig(format(params.atlas_jaas_file),
+                     owner=params.metadata_user)
+
+    if type == 'server' and params.search_backend_solr and params.has_infra_solr:
+      solr_cloud_util.setup_solr_client(params.config)
+      check_znode()
+      jaasFile=params.atlas_jaas_file if params.security_enabled else None
+      upload_conf_set('atlas_configs', jaasFile)
+
+      if params.security_enabled: # update permissions before creating the collections
+        solr_cloud_util.add_solr_roles(params.config,
+                                       roles = [params.infra_solr_role_atlas, params.infra_solr_role_ranger_audit, params.infra_solr_role_dev],
+                                       new_service_principals = [params.atlas_jaas_principal])
+
+      create_collection('vertex_index', 'atlas_configs', jaasFile)
+      create_collection('edge_index', 'atlas_configs', jaasFile)
+      create_collection('fulltext_index', 'atlas_configs', jaasFile)
+
+      if params.security_enabled:
+        secure_znode(format('{infra_solr_znode}/configs/atlas_configs'), jaasFile)
+        secure_znode(format('{infra_solr_znode}/collections/vertex_index'), jaasFile)
+        secure_znode(format('{infra_solr_znode}/collections/edge_index'), jaasFile)
+        secure_znode(format('{infra_solr_znode}/collections/fulltext_index'), jaasFile)
+
+    File(params.atlas_hbase_setup,
+         group=params.user_group,
+         owner=params.hbase_user,
+         content=Template("atlas_hbase_setup.rb.j2")
+    )
+
+    is_atlas_upgrade_support = check_stack_feature(StackFeature.ATLAS_UPGRADE_SUPPORT, get_stack_feature_version(params.config))
+
+    if is_atlas_upgrade_support and params.security_enabled:
+
+      File(params.atlas_kafka_setup,
+           group=params.user_group,
+           owner=params.kafka_user,
+           content=Template("atlas_kafka_acl.sh.j2"))
+
+      #  files required only in case if kafka broker is not present on the host as configured component
+      if not params.host_with_kafka:
+        File(format("{kafka_conf_dir}/kafka-env.sh"),
+             owner=params.kafka_user,
+             content=InlineTemplate(params.kafka_env_sh_template))
+
+        File(format("{kafka_conf_dir}/kafka_jaas.conf"),
+             group=params.user_group,
+             owner=params.kafka_user,
+             content=Template("kafka_jaas.conf.j2"))
+
+    if params.stack_supports_atlas_hdfs_site_on_namenode_ha and len(params.namenode_host) > 1:
+      XmlConfig("hdfs-site.xml",
+                conf_dir=params.conf_dir,
+                configurations=params.config['configurations']['hdfs-site'],
+                configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
+                owner=params.metadata_user,
+                group=params.user_group,
+                mode=0644
+                )
+    else:
+      File(format('{conf_dir}/hdfs-site.xml'), action="delete")
+
+
+def upload_conf_set(config_set, jaasFile):
+  import params
+
+  solr_cloud_util.upload_configuration_to_zk(
+      zookeeper_quorum=params.zookeeper_quorum,
+      solr_znode=params.infra_solr_znode,
+      config_set_dir=format("{conf_dir}/solr"),
+      config_set=config_set,
+      tmp_dir=params.tmp_dir,
+      java64_home=params.java64_home,
+      solrconfig_content=InlineTemplate(params.metadata_solrconfig_content),
+      jaas_file=jaasFile,
+      retry=30, interval=5)
+
+def create_collection(collection, config_set, jaasFile):
+  import params
+
+  solr_cloud_util.create_collection(
+      zookeeper_quorum=params.zookeeper_quorum,
+      solr_znode=params.infra_solr_znode,
+      collection = collection,
+      config_set=config_set,
+      java64_home=params.java64_home,
+      jaas_file=jaasFile,
+      shards=params.atlas_solr_shards,
+      replication_factor = params.infra_solr_replication_factor)
+
+def secure_znode(znode, jaasFile):
+  import params
+  solr_cloud_util.secure_znode(config=params.config, zookeeper_quorum=params.zookeeper_quorum,
+                               solr_znode=znode,
+                               jaas_file=jaasFile,
+                               java64_home=params.java64_home, sasl_users=[params.atlas_jaas_principal])
+
+
+
+@retry(times=10, sleep_time=5, err_class=Fail)
+def check_znode():
+  import params
+  solr_cloud_util.check_znode(
+    zookeeper_quorum=params.zookeeper_quorum,
+    solr_znode=params.infra_solr_znode,
+    java64_home=params.java64_home)

http://git-wip-us.apache.org/repos/asf/ambari/blob/42a542a5/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/metadata_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/metadata_server.py b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/metadata_server.py
new file mode 100644
index 0000000..1ef77cf
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/metadata_server.py
@@ -0,0 +1,187 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+# Python Imports
+import os
+
+# Local Imports
+from metadata import metadata
+from resource_management import Fail
+from resource_management.libraries.functions import conf_select, stack_select
+from resource_management.core.resources.system import Execute, File
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_PROPERTIES
+from resource_management.libraries.functions.show_logs import show_logs
+from resource_management.libraries.functions.stack_features import check_stack_feature, get_stack_feature_version
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.core.resources.system import Directory
+from resource_management.core.logger import Logger
+from setup_ranger_atlas import setup_ranger_atlas
+from resource_management.core.resources.zkmigrator import ZkMigrator
+
+class MetadataServer(Script):
+
+  def get_component_name(self):
+    return "atlas-server"
+
+  def install(self, env):
+    import params
+    env.set_params(params)
+
+    Directory(format("{expanded_war_dir}/atlas"),
+              action = "delete",
+    )
+
+    self.install_packages(env)
+
+  def configure(self, env, upgrade_type=None, config_dir=None):
+    import params
+    env.set_params(params)
+    metadata()
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if check_stack_feature(StackFeature.ATLAS_UPGRADE_SUPPORT, params.version):
+      conf_select.select(params.stack_name, "atlas", params.version)
+      stack_select.select("atlas-server", params.version)
+
+  def start(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    self.configure(env)
+
+    daemon_cmd = format('source {params.conf_dir}/atlas-env.sh ; {params.metadata_start_script}')
+    no_op_test = format('ls {params.pid_file} >/dev/null 2>&1 && ps -p `cat {params.pid_file}` >/dev/null 2>&1')
+    atlas_hbase_setup_command = format("cat {atlas_hbase_setup} | hbase shell -n")
+    atlas_kafka_setup_command = format("bash {atlas_kafka_setup}")
+    secure_atlas_hbase_setup_command = format("kinit -kt {hbase_user_keytab} {hbase_principal_name}; ") + atlas_hbase_setup_command
+    # in case if principal was distributed across several hosts, pattern need to be replaced to right one
+    secure_atlas_kafka_setup_command = format("kinit -kt {kafka_keytab} {kafka_principal_name}; ").replace("_HOST", params.hostname) + atlas_kafka_setup_command
+
+    if params.stack_supports_atlas_ranger_plugin:
+      Logger.info('Atlas plugin is enabled, configuring Atlas plugin.')
+      setup_ranger_atlas(upgrade_type=upgrade_type)
+    else:
+      Logger.info('Atlas plugin is not supported or enabled.')
+
+    try:
+      effective_version = get_stack_feature_version(params.config)
+
+      if check_stack_feature(StackFeature.ATLAS_HBASE_SETUP, effective_version):
+        if params.security_enabled and params.has_hbase_master:
+          Execute(secure_atlas_hbase_setup_command,
+                  tries = 5,
+                  try_sleep = 10,
+                  user=params.hbase_user
+          )
+        elif params.enable_ranger_hbase and not params.security_enabled:
+          Execute(atlas_hbase_setup_command,
+                  tries = 5,
+                  try_sleep = 10,
+                  user=params.hbase_user
+          )
+
+      if check_stack_feature(StackFeature.ATLAS_UPGRADE_SUPPORT, effective_version) and params.security_enabled:
+        try:
+          Execute(secure_atlas_kafka_setup_command,
+                  user=params.kafka_user,
+                  tries=5,
+                  try_sleep=10
+          )
+        except Fail:
+          pass  # do nothing and do not block Atlas start, fail logs would be available via Execute internals
+
+      Execute(daemon_cmd,
+              user=params.metadata_user,
+              not_if=no_op_test
+      )
+    except:
+      show_logs(params.log_dir, params.metadata_user)
+      raise
+
+  def stop(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    daemon_cmd = format('source {params.conf_dir}/atlas-env.sh; {params.metadata_stop_script}')
+
+    # If the pid dir doesn't exist, this means either
+    # 1. The user just added Atlas service and issued a restart command (stop+start). So stop should be a no-op
+    # since there's nothing to stop.
+    # OR
+    # 2. The user changed the value of the pid dir config and incorrectly issued a restart command.
+    # In which case the stop command cannot do anything since Ambari doesn't know which process to kill.
+    # The start command will spawn another instance.
+    # The user should have issued a stop, changed the config, and then started it.
+    if not os.path.isdir(params.pid_dir):
+      Logger.info("*******************************************************************")
+      Logger.info("Will skip the stop command since this is the first time stopping/restarting Atlas "
+                  "and the pid dir does not exist, %s\n" % params.pid_dir)
+      return
+
+    try:
+      Execute(daemon_cmd,
+              user=params.metadata_user,
+      )
+    except:
+      show_logs(params.log_dir, params.metadata_user)
+      raise
+
+    File(params.pid_file, action="delete")
+
+  def disable_security(self, env):
+    import params
+    if not params.zookeeper_quorum:
+      Logger.info("No zookeeper connection string. Skipping reverting ACL")
+      return
+    zkmigrator = ZkMigrator(params.zookeeper_quorum, params.java_exec, params.java64_home, params.atlas_jaas_file, params.metadata_user)
+    zkmigrator.set_acls(params.zk_root if params.zk_root.startswith('/') else '/' + params.zk_root, 'world:anyone:crdwa')
+    if params.atlas_kafka_group_id:
+      zkmigrator.set_acls(format('/consumers/{params.atlas_kafka_group_id}'), 'world:anyone:crdwa')
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    check_process_status(status_params.pid_file)
+
+  def get_log_folder(self):
+    import params
+
+    return params.log_dir
+
+  def get_user(self):
+    import params
+
+    return params.metadata_user
+
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.pid_file]
+
+if __name__ == "__main__":
+  MetadataServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/42a542a5/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/params.py
new file mode 100644
index 0000000..d26df33
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/params.py
@@ -0,0 +1,417 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+import sys
+
+from ambari_commons import OSCheck
+from resource_management import get_bare_principal
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.default import default
+
+# Local Imports
+from status_params import *
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.is_empty import is_empty
+from resource_management.libraries.functions.expect import expect
+from resource_management.libraries.functions.setup_ranger_plugin_xml import generate_ranger_service_config
+
+
+def configs_for_ha(atlas_hosts, metadata_port, is_atlas_ha_enabled, metadata_protocol):
+  """
+  Return a dictionary of additional configs to merge if Atlas HA is enabled.
+  :param atlas_hosts: List of hostnames that contain Atlas
+  :param metadata_port: Port number
+  :param is_atlas_ha_enabled: None, True, or False
+  :param metadata_protocol: http or https
+  :return: Dictionary with additional configs to merge to application-properties if HA is enabled.
+  """
+  additional_props = {}
+  if atlas_hosts is None or len(atlas_hosts) == 0 or metadata_port is None:
+    return additional_props
+
+  # Sort to guarantee each host sees the same values, assuming restarted at the same time.
+  atlas_hosts = sorted(atlas_hosts)
+
+  # E.g., id1,id2,id3,...,idn
+  _server_id_list = ["id" + str(i) for i in range(1, len(atlas_hosts) + 1)]
+  atlas_server_ids = ",".join(_server_id_list)
+  additional_props["atlas.server.ids"] = atlas_server_ids
+
+  i = 0
+  for curr_hostname in atlas_hosts:
+    id = _server_id_list[i]
+    prop_name = "atlas.server.address." + id
+    prop_value = curr_hostname + ":" + metadata_port
+    additional_props[prop_name] = prop_value
+    if "atlas.rest.address" in additional_props:
+      additional_props["atlas.rest.address"] += "," + metadata_protocol + "://" + prop_value
+    else:
+      additional_props["atlas.rest.address"] = metadata_protocol + "://" + prop_value
+
+    i += 1
+
+  # This may override the existing property
+  if i == 1 or (i > 1 and is_atlas_ha_enabled is False):
+    additional_props["atlas.server.ha.enabled"] = "false"
+  elif i > 1:
+    additional_props["atlas.server.ha.enabled"] = "true"
+
+  return additional_props
+  
+# server configurations
+config = Script.get_config()
+exec_tmp_dir = Script.get_tmp_dir()
+stack_root = Script.get_stack_root()
+
+# Needed since this is an Atlas Hook service.
+cluster_name = config['clusterName']
+
+java_version = expect("/hostLevelParams/java_version", int)
+
+zk_root = default('/configurations/application-properties/atlas.server.ha.zookeeper.zkroot', '/apache_atlas')
+stack_supports_zk_security = check_stack_feature(StackFeature.SECURE_ZOOKEEPER, version_for_stack_feature_checks)
+atlas_kafka_group_id = default('/configurations/application-properties/atlas.kafka.hook.group.id', None)
+
+if security_enabled:
+  _hostname_lowercase = config['hostname'].lower()
+  _atlas_principal_name = config['configurations']['application-properties']['atlas.authentication.principal']
+  atlas_jaas_principal = _atlas_principal_name.replace('_HOST',_hostname_lowercase)
+  atlas_keytab_path = config['configurations']['application-properties']['atlas.authentication.keytab']
+
+# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade
+version = default("/commandParams/version", None)
+
+# stack version
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+metadata_home = format('{stack_root}/current/atlas-server')
+metadata_bin = format("{metadata_home}/bin")
+
+python_binary = os.environ['PYTHON_EXE'] if 'PYTHON_EXE' in os.environ else sys.executable
+metadata_start_script = format("{metadata_bin}/atlas_start.py")
+metadata_stop_script = format("{metadata_bin}/atlas_stop.py")
+
+# metadata local directory structure
+log_dir = config['configurations']['atlas-env']['metadata_log_dir']
+
+# service locations
+hadoop_conf_dir = os.path.join(os.environ["HADOOP_HOME"], "conf") if 'HADOOP_HOME' in os.environ else '/etc/hadoop/conf'
+
+# some commands may need to supply the JAAS location when running as atlas
+atlas_jaas_file = format("{conf_dir}/atlas_jaas.conf")
+
+# user
+user_group = config['configurations']['cluster-env']['user_group']
+
+# metadata env
+java64_home = config['hostLevelParams']['java_home']
+java_exec = format("{java64_home}/bin/java")
+env_sh_template = config['configurations']['atlas-env']['content']
+
+# credential provider
+credential_provider = format( "jceks://file@{conf_dir}/atlas-site.jceks")
+
+# command line args
+ssl_enabled = default("/configurations/application-properties/atlas.enableTLS", False)
+http_port = default("/configurations/application-properties/atlas.server.http.port", "21000")
+https_port = default("/configurations/application-properties/atlas.server.https.port", "21443")
+if ssl_enabled:
+  metadata_port = https_port
+  metadata_protocol = 'https'
+else:
+  metadata_port = http_port
+  metadata_protocol = 'http'
+
+metadata_host = config['hostname']
+
+atlas_hosts = sorted(default('/clusterHostInfo/atlas_server_hosts', []))
+metadata_server_host = atlas_hosts[0] if len(atlas_hosts) > 0 else "UNKNOWN_HOST"
+
+# application properties
+application_properties = dict(config['configurations']['application-properties'])
+application_properties["atlas.server.bind.address"] = metadata_host
+
+# trimming knox_key
+if 'atlas.sso.knox.publicKey' in application_properties:
+  knox_key = application_properties['atlas.sso.knox.publicKey']
+  knox_key_without_new_line = knox_key.replace("\n","")
+  application_properties['atlas.sso.knox.publicKey'] = knox_key_without_new_line
+
+if check_stack_feature(StackFeature.ATLAS_UPGRADE_SUPPORT, version_for_stack_feature_checks):
+  metadata_server_url = application_properties["atlas.rest.address"]
+else:
+  # In HDP 2.3 and 2.4 the property was computed and saved to the local config but did not exist in the database.
+  metadata_server_url = format('{metadata_protocol}://{metadata_server_host}:{metadata_port}')
+  application_properties["atlas.rest.address"] = metadata_server_url
+
+# Atlas HA should populate
+# atlas.server.ids = id1,id2,...,idn
+# atlas.server.address.id# = host#:port
+# User should not have to modify this property, but still allow overriding it to False if multiple Atlas servers exist
+# This can be None, True, or False
+is_atlas_ha_enabled = default("/configurations/application-properties/atlas.server.ha.enabled", None)
+additional_ha_props = configs_for_ha(atlas_hosts, metadata_port, is_atlas_ha_enabled, metadata_protocol)
+for k,v in additional_ha_props.iteritems():
+  application_properties[k] = v
+
+
+metadata_env_content = config['configurations']['atlas-env']['content']
+
+metadata_opts = config['configurations']['atlas-env']['metadata_opts']
+metadata_classpath = config['configurations']['atlas-env']['metadata_classpath']
+data_dir = format("{stack_root}/current/atlas-server/data")
+expanded_war_dir = os.environ['METADATA_EXPANDED_WEBAPP_DIR'] if 'METADATA_EXPANDED_WEBAPP_DIR' in os.environ else format("{stack_root}/current/atlas-server/server/webapp")
+
+metadata_log4j_content = config['configurations']['atlas-log4j']['content']
+
+metadata_solrconfig_content = default("/configurations/atlas-solrconfig/content", None)
+
+atlas_log_level = config['configurations']['atlas-log4j']['atlas_log_level']
+audit_log_level = config['configurations']['atlas-log4j']['audit_log_level']
+atlas_log_max_backup_size = default("/configurations/atlas-log4j/atlas_log_max_backup_size", 256)
+atlas_log_number_of_backup_files = default("/configurations/atlas-log4j/atlas_log_number_of_backup_files", 20)
+
+# smoke test
+smoke_test_user = config['configurations']['cluster-env']['smokeuser']
+smoke_test_password = 'smoke'
+smokeuser_principal =  config['configurations']['cluster-env']['smokeuser_principal_name']
+smokeuser_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+
+
+security_check_status_file = format('{log_dir}/security_check.status')
+
+# hbase
+hbase_conf_dir = "/etc/hbase/conf"
+
+atlas_search_backend = default("/configurations/application-properties/atlas.graph.index.search.backend", "")
+search_backend_solr = atlas_search_backend.startswith('solr')
+
+# infra solr
+infra_solr_znode = default("/configurations/infra-solr-env/infra_solr_znode", None)
+infra_solr_hosts = default("/clusterHostInfo/infra_solr_hosts", [])
+infra_solr_replication_factor = 2 if len(infra_solr_hosts) > 1 else 1
+atlas_solr_shards = default("/configurations/atlas-env/atlas_solr-shards", 1)
+has_infra_solr = len(infra_solr_hosts) > 0
+infra_solr_role_atlas = default('configurations/infra-solr-security-json/infra_solr_role_atlas', 'atlas_user')
+infra_solr_role_dev = default('configurations/infra-solr-security-json/infra_solr_role_dev', 'dev')
+infra_solr_role_ranger_audit = default('configurations/infra-solr-security-json/infra_solr_role_ranger_audit', 'ranger_audit_user')
+
+# zookeeper
+zookeeper_hosts = config['clusterHostInfo']['zookeeper_hosts']
+zookeeper_port = default('/configurations/zoo.cfg/clientPort', None)
+
+# get comma separated lists of zookeeper hosts from clusterHostInfo
+index = 0
+zookeeper_quorum = ""
+for host in zookeeper_hosts:
+  zookeeper_host = host
+  if zookeeper_port is not None:
+    zookeeper_host = host + ":" + str(zookeeper_port)
+
+  zookeeper_quorum += zookeeper_host
+  index += 1
+  if index < len(zookeeper_hosts):
+    zookeeper_quorum += ","
+
+stack_supports_atlas_hdfs_site_on_namenode_ha = check_stack_feature(StackFeature.ATLAS_HDFS_SITE_ON_NAMENODE_HA, version_for_stack_feature_checks)
+
+atlas_server_xmx = default("configurations/atlas-env/atlas_server_xmx", 2048)
+atlas_server_max_new_size = default("configurations/atlas-env/atlas_server_max_new_size", 614)
+
+hbase_master_hosts = default('/clusterHostInfo/hbase_master_hosts', [])
+has_hbase_master = not len(hbase_master_hosts) == 0
+
+atlas_hbase_setup = format("{exec_tmp_dir}/atlas_hbase_setup.rb")
+atlas_kafka_setup = format("{exec_tmp_dir}/atlas_kafka_acl.sh")
+atlas_graph_storage_hbase_table = default('/configurations/application-properties/atlas.graph.storage.hbase.table', None)
+atlas_audit_hbase_tablename = default('/configurations/application-properties/atlas.audit.hbase.tablename', None)
+
+hbase_user_keytab = default('/configurations/hbase-env/hbase_user_keytab', None)
+hbase_principal_name = default('/configurations/hbase-env/hbase_principal_name', None)
+
+# ToDo: Kafka port to Atlas
+# Used while upgrading the stack in a kerberized cluster and running kafka-acls.sh
+hosts_with_kafka = default('/clusterHostInfo/kafka_broker_hosts', [])
+host_with_kafka = hostname in hosts_with_kafka
+
+ranger_tagsync_hosts = default("/clusterHostInfo/ranger_tagsync_hosts", [])
+has_ranger_tagsync = len(ranger_tagsync_hosts) > 0
+rangertagsync_user = "rangertagsync"
+
+kafka_keytab = default('/configurations/kafka-env/kafka_keytab', None)
+kafka_principal_name = default('/configurations/kafka-env/kafka_principal_name', None)
+default_replication_factor = default('/configurations/application-properties/atlas.notification.replicas', None)
+
+if check_stack_feature(StackFeature.ATLAS_UPGRADE_SUPPORT, version_for_stack_feature_checks):
+  default_replication_factor = default('/configurations/application-properties/atlas.notification.replicas', None)
+
+  kafka_env_sh_template = config['configurations']['kafka-env']['content']
+  kafka_home = os.path.join(stack_root,  "current", "kafka-broker")
+  kafka_conf_dir = os.path.join(kafka_home, "config")
+
+  kafka_zk_endpoint = default("/configurations/kafka-broker/zookeeper.connect", None)
+  kafka_kerberos_enabled = (('security.inter.broker.protocol' in config['configurations']['kafka-broker']) and
+                            ((config['configurations']['kafka-broker']['security.inter.broker.protocol'] == "PLAINTEXTSASL") or
+                             (config['configurations']['kafka-broker']['security.inter.broker.protocol'] == "SASL_PLAINTEXT")))
+  if security_enabled and stack_version_formatted != "" and 'kafka_principal_name' in config['configurations']['kafka-env'] \
+    and check_stack_feature(StackFeature.KAFKA_KERBEROS, stack_version_formatted):
+    _hostname_lowercase = config['hostname'].lower()
+    _kafka_principal_name = config['configurations']['kafka-env']['kafka_principal_name']
+    kafka_jaas_principal = _kafka_principal_name.replace('_HOST', _hostname_lowercase)
+    kafka_keytab_path = config['configurations']['kafka-env']['kafka_keytab']
+    kafka_bare_jaas_principal = get_bare_principal(_kafka_principal_name)
+    kafka_kerberos_params = "-Djava.security.auth.login.config={0}/kafka_jaas.conf".format(kafka_conf_dir)
+  else:
+    kafka_kerberos_params = ''
+    kafka_jaas_principal = None
+    kafka_keytab_path = None
+
+namenode_host = set(default("/clusterHostInfo/namenode_host", []))
+has_namenode = not len(namenode_host) == 0
+
+# ranger altas plugin section start
+
+# ranger host
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+has_ranger_admin = not len(ranger_admin_hosts) == 0
+
+retry_enabled = default("/commandParams/command_retry_enabled", False)
+
+stack_supports_atlas_ranger_plugin = check_stack_feature(StackFeature.ATLAS_RANGER_PLUGIN_SUPPORT, version_for_stack_feature_checks)
+stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
+
+# ranger support xml_configuration flag, instead of depending on ranger xml_configurations_supported/ranger-env, using stack feature
+xml_configurations_supported = check_stack_feature(StackFeature.RANGER_XML_CONFIGURATION, version_for_stack_feature_checks)
+
+# ranger atlas plugin enabled property
+enable_ranger_atlas = default("/configurations/ranger-atlas-plugin-properties/ranger-atlas-plugin-enabled", "No")
+enable_ranger_atlas = True if enable_ranger_atlas.lower() == "yes" else False
+
+# ranger hbase plugin enabled property
+enable_ranger_hbase = default("/configurations/ranger-hbase-plugin-properties/ranger-hbase-plugin-enabled", "No")
+enable_ranger_hbase = True if enable_ranger_hbase.lower() == 'yes' else False
+
+if stack_supports_atlas_ranger_plugin and enable_ranger_atlas:
+  # for create_hdfs_directory
+  hdfs_user = config['configurations']['hadoop-env']['hdfs_user'] if has_namenode else None
+  hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']  if has_namenode else None
+  hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name'] if has_namenode else None
+  hdfs_site = config['configurations']['hdfs-site']
+  default_fs = config['configurations']['core-site']['fs.defaultFS']
+  dfs_type = default("/commandParams/dfs_type", "")
+
+  import functools
+  from resource_management.libraries.resources.hdfs_resource import HdfsResource
+  from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
+  #create partial functions with common arguments for every HdfsResource call
+  #to create hdfs directory we need to call params.HdfsResource in code
+
+  HdfsResource = functools.partial(
+    HdfsResource,
+    user = hdfs_user,
+    hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
+    security_enabled = security_enabled,
+    keytab = hdfs_user_keytab,
+    kinit_path_local = kinit_path_local,
+    hadoop_bin_dir = hadoop_bin_dir,
+    hadoop_conf_dir = hadoop_conf_dir,
+    principal_name = hdfs_principal_name,
+    hdfs_site = hdfs_site,
+    default_fs = default_fs,
+    immutable_paths = get_not_managed_resources(),
+    dfs_type = dfs_type
+  )
+
+  # ranger atlas service/repository name
+  repo_name = str(config['clusterName']) + '_atlas'
+  repo_name_value = config['configurations']['ranger-atlas-security']['ranger.plugin.atlas.service.name']
+  if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
+    repo_name = repo_name_value
+
+  ssl_keystore_password = config['configurations']['ranger-atlas-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password']
+  ssl_truststore_password = config['configurations']['ranger-atlas-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password']
+  credential_file = format('/etc/ranger/{repo_name}/cred.jceks')
+  xa_audit_hdfs_is_enabled = default('/configurations/ranger-atlas-audit/xasecure.audit.destination.hdfs', False)
+
+  # get ranger policy url
+  policymgr_mgr_url = config['configurations']['ranger-atlas-security']['ranger.plugin.atlas.policy.rest.url']
+
+  if not is_empty(policymgr_mgr_url) and policymgr_mgr_url.endswith('/'):
+    policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
+
+  downloaded_custom_connector = None
+  driver_curl_source = None
+  driver_curl_target = None
+
+  ranger_env = config['configurations']['ranger-env']
+
+  # create ranger-env config having external ranger credential properties
+  if not has_ranger_admin and enable_ranger_atlas:
+    external_admin_username = default('/configurations/ranger-atlas-plugin-properties/external_admin_username', 'admin')
+    external_admin_password = default('/configurations/ranger-atlas-plugin-properties/external_admin_password', 'admin')
+    external_ranger_admin_username = default('/configurations/ranger-atlas-plugin-properties/external_ranger_admin_username', 'amb_ranger_admin')
+    external_ranger_admin_password = default('/configurations/ranger-atlas-plugin-properties/external_ranger_admin_password', 'amb_ranger_admin')
+    ranger_env = {}
+    ranger_env['admin_username'] = external_admin_username
+    ranger_env['admin_password'] = external_admin_password
+    ranger_env['ranger_admin_username'] = external_ranger_admin_username
+    ranger_env['ranger_admin_password'] = external_ranger_admin_password
+
+  ranger_plugin_properties = config['configurations']['ranger-atlas-plugin-properties']
+  ranger_atlas_audit = config['configurations']['ranger-atlas-audit']
+  ranger_atlas_audit_attrs = config['configuration_attributes']['ranger-atlas-audit']
+  ranger_atlas_security = config['configurations']['ranger-atlas-security']
+  ranger_atlas_security_attrs = config['configuration_attributes']['ranger-atlas-security']
+  ranger_atlas_policymgr_ssl = config['configurations']['ranger-atlas-policymgr-ssl']
+  ranger_atlas_policymgr_ssl_attrs = config['configuration_attributes']['ranger-atlas-policymgr-ssl']
+
+  policy_user = config['configurations']['ranger-atlas-plugin-properties']['policy_user']
+
+  atlas_repository_configuration = {
+    'username' : config['configurations']['ranger-atlas-plugin-properties']['REPOSITORY_CONFIG_USERNAME'],
+    'password' : unicode(config['configurations']['ranger-atlas-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']),
+    'atlas.rest.address' : metadata_server_url,
+    'commonNameForCertificate' : config['configurations']['ranger-atlas-plugin-properties']['common.name.for.certificate'],
+    'ambari.service.check.user' : policy_user
+  }
+
+  custom_ranger_service_config = generate_ranger_service_config(ranger_plugin_properties)
+  if len(custom_ranger_service_config) > 0:
+    atlas_repository_configuration.update(custom_ranger_service_config)
+
+  if security_enabled:
+    atlas_repository_configuration['policy.download.auth.users'] = metadata_user
+    atlas_repository_configuration['tag.download.auth.users'] = metadata_user
+
+  atlas_ranger_plugin_repo = {
+    'isEnabled': 'true',
+    'configs': atlas_repository_configuration,
+    'description': 'atlas repo',
+    'name': repo_name,
+    'type': 'atlas',
+    }
+# ranger atlas plugin section end
+# atlas admin login username password
+atlas_admin_username = config['configurations']['atlas-env']['atlas.admin.username']
+atlas_admin_password = config['configurations']['atlas-env']['atlas.admin.password']

http://git-wip-us.apache.org/repos/asf/ambari/blob/42a542a5/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/service_check.py
new file mode 100644
index 0000000..cada8c3
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/service_check.py
@@ -0,0 +1,55 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.format import format
+from resource_management.core.logger import Logger  
+from resource_management.core.resources.system import Execute
+from resource_management.core.exceptions import ComponentIsNotRunning
+from resource_management.core.exceptions import Fail
+
+class AtlasServiceCheck(Script):
+
+  def service_check(self, env):
+    import params
+
+    env.set_params(params)
+
+    if params.security_enabled:
+      Execute(format("{kinit_path_local} -kt {smokeuser_keytab} {smokeuser_principal}"),
+              user=params.smoke_test_user)
+    atlas_host_call_count = 0
+
+    for atlas_host in params.atlas_hosts:
+      if params.security_enabled:
+        smoke_cmd = format('curl -k --negotiate -u : -b ~/cookiejar.txt -c ~/cookiejar.txt -s -o /dev/null -w "%{{http_code}}" {metadata_protocol}://{atlas_host}:{metadata_port}/')
+      else:
+        smoke_cmd = format('curl -k -s -o /dev/null -w "%{{http_code}}" {metadata_protocol}://{atlas_host}:{metadata_port}/')
+      try:
+        Execute(smoke_cmd , user=params.smoke_test_user, tries = 5,
+              try_sleep = 10)
+      except Exception, err:
+        atlas_host_call_count =  atlas_host_call_count + 1
+        Logger.error("ATLAS service check failed for host {0} with error {1}".format(atlas_host,err))
+    if atlas_host_call_count == len(params.atlas_hosts):
+      raise Fail("All instances of ATLAS METADATA SERVER are down.")
+
+
+if __name__ == "__main__":
+  AtlasServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/42a542a5/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/setup_ranger_atlas.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/setup_ranger_atlas.py b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/setup_ranger_atlas.py
new file mode 100644
index 0000000..c47c75c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/setup_ranger_atlas.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+    http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+from resource_management.core.logger import Logger
+
+def setup_ranger_atlas(upgrade_type=None):
+  import params
+
+  if params.enable_ranger_atlas:
+
+    from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_ranger_plugin
+
+    if params.retry_enabled:
+      Logger.info("ATLAS: Setup ranger: command retry enables thus retrying if ranger admin is down !")
+    else:
+      Logger.info("ATLAS: Setup ranger: command retry not enabled thus skipping if ranger admin is down !")
+
+    if params.enable_ranger_atlas and params.xa_audit_hdfs_is_enabled:
+      if params.has_namenode:
+        params.HdfsResource("/ranger/audit",
+                            type="directory",
+                            action="create_on_execute",
+                            owner=params.metadata_user,
+                            group=params.user_group,
+                            mode=0755,
+                            recursive_chmod=True
+        )
+        params.HdfsResource("/ranger/audit/atlas",
+                            type="directory",
+                            action="create_on_execute",
+                            owner=params.metadata_user,
+                            group=params.user_group,
+                            mode=0700,
+                            recursive_chmod=True
+        )
+        params.HdfsResource(None, action="execute")
+
+    setup_ranger_plugin('atlas-server', 'atlas',None,
+                        params.downloaded_custom_connector, params.driver_curl_source,
+                        params.driver_curl_target, params.java64_home,
+                        params.repo_name, params.atlas_ranger_plugin_repo,
+                        params.ranger_env, params.ranger_plugin_properties,
+                        params.policy_user, params.policymgr_mgr_url,
+                        params.enable_ranger_atlas, conf_dict=params.conf_dir,
+                        component_user=params.metadata_user, component_group=params.user_group, cache_service_list=['atlas'],
+                        plugin_audit_properties=params.config['configurations']['ranger-atlas-audit'], plugin_audit_attributes=params.config['configuration_attributes']['ranger-atlas-audit'],
+                        plugin_security_properties=params.config['configurations']['ranger-atlas-security'], plugin_security_attributes=params.config['configuration_attributes']['ranger-atlas-security'],
+                        plugin_policymgr_ssl_properties=params.config['configurations']['ranger-atlas-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configuration_attributes']['ranger-atlas-policymgr-ssl'],
+                        component_list=['atlas-server'], audit_db_is_enabled=False,
+                        credential_file=params.credential_file, xa_audit_db_password=None,
+                        ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
+                        api_version = 'v2', skip_if_rangeradmin_down = not params.retry_enabled, is_security_enabled = params.security_enabled,
+                        is_stack_supports_ranger_kerberos = params.stack_supports_ranger_kerberos,
+                        component_user_principal=params.atlas_jaas_principal if params.security_enabled else None,
+                        component_user_keytab=params.atlas_keytab_path if params.security_enabled else None)
+  else:
+    Logger.info('Ranger Atlas plugin is not enabled')
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/42a542a5/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/status_params.py
new file mode 100644
index 0000000..852a9cb
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/status_params.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions import  get_kinit_path, format_stack_version
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+
+from resource_management.libraries.functions.stack_features import check_stack_feature, get_stack_feature_version
+from resource_management.libraries.functions import StackFeature
+
+
+config = Script.get_config()
+stack_root = Script.get_stack_root()
+
+default_conf_file = "application.properties"
+
+# get the correct version to use for checking stack features
+version_for_stack_feature_checks = get_stack_feature_version(config)
+
+if check_stack_feature(StackFeature.ATLAS_UPGRADE_SUPPORT, version_for_stack_feature_checks):
+  default_conf_file = "atlas-application.properties"
+
+conf_file = default("/configurations/atlas-env/metadata_conf_file", default_conf_file)
+conf_dir = format("{stack_root}/current/atlas-server/conf")
+pid_dir = default("/configurations/atlas-env/metadata_pid_dir", "/var/run/atlas")
+pid_file = format("{pid_dir}/atlas.pid")
+
+metadata_user = default("/configurations/atlas-env/metadata_user", None)
+hbase_user = default("/configurations/hbase-env/hbase_user", None)
+kafka_user = default("/configurations/kafka-env/kafka_user", None)
+
+# Security related/required params
+hostname = config['hostname']
+security_enabled = default("/configurations/cluster-env/security_enabled", None)
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+tmp_dir = Script.get_tmp_dir()
+
+stack_name = default("/hostLevelParams/stack_name", None)
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")

http://git-wip-us.apache.org/repos/asf/ambari/blob/42a542a5/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/templates/atlas_hbase_setup.rb.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/templates/atlas_hbase_setup.rb.j2 b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/templates/atlas_hbase_setup.rb.j2
new file mode 100644
index 0000000..14167dc
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/templates/atlas_hbase_setup.rb.j2
@@ -0,0 +1,42 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+_tbl_titan = '{{atlas_graph_storage_hbase_table}}'
+_tbl_audit = '{{atlas_audit_hbase_tablename}}'
+_usr_atlas = '{{metadata_user}}'
+
+
+if not list.include? _tbl_titan
+  begin
+    create _tbl_titan,{NAME => 'e',DATA_BLOCK_ENCODING => 'FAST_DIFF', COMPRESSION =>'GZ', BLOOMFILTER =>'ROW'},{NAME => 'g',DATA_BLOCK_ENCODING => 'FAST_DIFF', COMPRESSION =>'GZ', BLOOMFILTER =>'ROW'},{NAME => 'i',DATA_BLOCK_ENCODING => 'FAST_DIFF', COMPRESSION =>'GZ', BLOOMFILTER =>'ROW'},{NAME => 's',DATA_BLOCK_ENCODING => 'FAST_DIFF', COMPRESSION =>'GZ', BLOOMFILTER =>'ROW'},{NAME => 'm',DATA_BLOCK_ENCODING => 'FAST_DIFF', COMPRESSION =>'GZ', BLOOMFILTER =>'ROW'},{NAME => 'l',DATA_BLOCK_ENCODING => 'FAST_DIFF', COMPRESSION =>'GZ', BLOOMFILTER =>'ROW', TTL => 604800, KEEP_DELETED_CELLS =>false}
+  rescue RuntimeError => e
+    raise e if not e.message.include? "Table already exists"
+  end
+end
+
+
+if not list.include? _tbl_audit
+  begin
+    create _tbl_audit, {NAME => 'dt', DATA_BLOCK_ENCODING => 'FAST_DIFF', COMPRESSION =>'GZ', BLOOMFILTER =>'ROW'}
+  rescue RuntimeError => e
+    raise e if not e.message.include? "Table already exists"
+  end
+end
+
+grant _usr_atlas, 'RWCA', _tbl_titan
+grant _usr_atlas, 'RWCA', _tbl_audit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/42a542a5/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/templates/atlas_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/templates/atlas_jaas.conf.j2 b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/templates/atlas_jaas.conf.j2
new file mode 100644
index 0000000..68eb088
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/templates/atlas_jaas.conf.j2
@@ -0,0 +1,26 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+Client {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useKeyTab=true
+   useTicketCache=false
+   storeKey=true
+   doNotPrompt=false
+   keyTab="{{atlas_keytab_path}}"
+   principal="{{atlas_jaas_principal}}";
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/42a542a5/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/templates/atlas_kafka_acl.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/templates/atlas_kafka_acl.sh.j2 b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/templates/atlas_kafka_acl.sh.j2
new file mode 100644
index 0000000..6a2edc6
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/templates/atlas_kafka_acl.sh.j2
@@ -0,0 +1,41 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+#!/bin/bash
+
+
+create_topic() {
+    topic_name=$1
+    topics=`{{kafka_home}}/bin/kafka-topics.sh --zookeeper {{kafka_zk_endpoint}} --topic $topic_name --list`
+    if [ -z $topics ]; then
+      {{kafka_home}}/bin/kafka-topics.sh --zookeeper {{kafka_zk_endpoint}} --topic $topic_name --create --partitions 1 --replication-factor {{default_replication_factor}}
+      echo "Created topic $topic_name with replication factor {{default_replication_factor}}"
+    else
+      echo "Topic $topic_name already exists"
+    fi
+}
+
+create_topic ATLAS_HOOK
+create_topic ATLAS_ENTITIES
+
+{{kafka_home}}/bin/kafka-acls.sh --authorizer-properties zookeeper.connect={{kafka_zk_endpoint}} --add  --topic ATLAS_HOOK --allow-principal User:* --producer
+{{kafka_home}}/bin/kafka-acls.sh --authorizer-properties zookeeper.connect={{kafka_zk_endpoint}} --add  --topic ATLAS_HOOK --allow-principal User:{{metadata_user}} --consumer --group atlas
+{{kafka_home}}/bin/kafka-acls.sh --authorizer-properties zookeeper.connect={{kafka_zk_endpoint}} --add  --topic ATLAS_ENTITIES --allow-principal User:{{metadata_user}} --producer
+
+{% if has_ranger_tagsync %}
+{{kafka_home}}/bin/kafka-acls.sh --authorizer-properties zookeeper.connect={{kafka_zk_endpoint}} --add  --topic ATLAS_ENTITIES --allow-principal User:{{rangertagsync_user}} --consumer --group ranger_entities_consumer
+{% endif %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/42a542a5/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/templates/input.config-atlas.json.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/templates/input.config-atlas.json.j2 b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/templates/input.config-atlas.json.j2
new file mode 100644
index 0000000..2d977b9
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/templates/input.config-atlas.json.j2
@@ -0,0 +1,48 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+{
+  "input":[
+    {
+      "type":"atlas_app",
+      "rowtype":"service",
+      "path":"{{default('/configurations/atlas-env/metadata_log_dir', '/var/log/atlas')}}/application.log"
+    }
+  ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "atlas_app"
+          ]
+        }
+      },
+      "log4j_format":"%d %-5p - [%t:%x] ~ %m (%c{1}:%L)%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{SPACE}-%{SPACE}\\[%{DATA:thread_name}\\]%{SPACE}~%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/42a542a5/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/templates/kafka_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/templates/kafka_jaas.conf.j2 b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/templates/kafka_jaas.conf.j2
new file mode 100644
index 0000000..56c558d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/templates/kafka_jaas.conf.j2
@@ -0,0 +1,41 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+KafkaServer {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useKeyTab=true
+   keyTab="{{kafka_keytab_path}}"
+   storeKey=true
+   useTicketCache=false
+   serviceName="{{kafka_bare_jaas_principal}}"
+   principal="{{kafka_jaas_principal}}";
+};
+KafkaClient {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useTicketCache=true
+   renewTicket=true
+   serviceName="{{kafka_bare_jaas_principal}}";
+};
+Client {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useKeyTab=true
+   keyTab="{{kafka_keytab_path}}"
+   storeKey=true
+   useTicketCache=false
+   serviceName="zookeeper"
+   principal="{{kafka_jaas_principal}}";
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/42a542a5/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/quicklinks/quicklinks.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/quicklinks/quicklinks.json b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/quicklinks/quicklinks.json
new file mode 100644
index 0000000..0a7d0a0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/quicklinks/quicklinks.json
@@ -0,0 +1,36 @@
+{
+  "name": "default",
+  "description": "default quick links configuration",
+  "configuration": {
+    "protocol":
+    {
+      "type":"https",
+      "checks":[
+        {
+          "property":"atlas.enableTLS",
+          "desired":"true",
+          "site":"application-properties"
+        }
+      ]
+    },
+
+    "links": [
+      {
+        "name": "atlas_dashboard",
+        "label": "Atlas Dashboard",
+        "requires_user_name": "true",
+        "component_name": "ATLAS_SERVER",
+        "url": "%@://%@:%@/",
+        "attributes": ["authenticated", "sso"],
+        "port":{
+          "http_property": "atlas.server.http.port",
+          "http_default_port": "21000",
+          "https_property": "atlas.server.https.port",
+          "https_default_port": "21443",
+          "regex": "^(\\d+)$",
+          "site": "application-properties"
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/42a542a5/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/role_command_order.json b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/role_command_order.json
new file mode 100644
index 0000000..4d66dfc
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/role_command_order.json
@@ -0,0 +1,7 @@
+{
+  "general_deps" : {
+    "_comment" : "dependencies for ATLAS",
+    "ATLAS_SERVICE_CHECK-SERVICE_CHECK": ["ATLAS_SERVER-START"],
+    "ATLAS_SERVER-START": ["KAFKA_BROKER-START", "INFRA_SOLR-START", "HBASE_MASTER-START", "HBASE_REGIONSERVER-START"]
+  }
+}