You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by rl...@apache.org on 2017/10/03 20:16:02 UTC

[15/22] ambari git commit: AMBARI-22095 Make hooks stack agnostic (dsen)

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/before-START/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/before-START/files/checkForFormat.sh b/ambari-server/src/main/resources/stack-hooks/before-START/files/checkForFormat.sh
new file mode 100644
index 0000000..68aa96d
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/before-START/files/checkForFormat.sh
@@ -0,0 +1,65 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export hdfs_user=$1
+shift
+export conf_dir=$1
+shift
+export bin_dir=$1
+shift
+export mark_dir=$1
+shift
+export name_dirs=$*
+
+export EXIT_CODE=0
+export command="namenode -format"
+export list_of_non_empty_dirs=""
+
+mark_file=/var/run/hadoop/hdfs/namenode-formatted
+if [[ -f ${mark_file} ]] ; then
+  /var/lib/ambari-agent/ambari-sudo.sh rm -f ${mark_file}
+  /var/lib/ambari-agent/ambari-sudo.sh mkdir -p ${mark_dir}
+fi
+
+if [[ ! -d $mark_dir ]] ; then
+  for dir in `echo $name_dirs | tr ',' ' '` ; do
+    echo "NameNode Dirname = $dir"
+    cmd="ls $dir | wc -l  | grep -q ^0$"
+    eval $cmd
+    if [[ $? -ne 0 ]] ; then
+      (( EXIT_CODE = $EXIT_CODE + 1 ))
+      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
+    fi
+  done
+
+  if [[ $EXIT_CODE == 0 ]] ; then
+    /var/lib/ambari-agent/ambari-sudo.sh su ${hdfs_user} - -s /bin/bash -c "export PATH=$PATH:$bin_dir ; yes Y | hdfs --config ${conf_dir} ${command}"
+    (( EXIT_CODE = $EXIT_CODE | $? ))
+  else
+    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
+  fi
+else
+  echo "${mark_dir} exists. Namenode DFS already formatted"
+fi
+
+exit $EXIT_CODE
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/before-START/files/fast-hdfs-resource.jar
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/before-START/files/fast-hdfs-resource.jar b/ambari-server/src/main/resources/stack-hooks/before-START/files/fast-hdfs-resource.jar
new file mode 100644
index 0000000..6c993bf
Binary files /dev/null and b/ambari-server/src/main/resources/stack-hooks/before-START/files/fast-hdfs-resource.jar differ

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/before-START/files/task-log4j.properties
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/before-START/files/task-log4j.properties b/ambari-server/src/main/resources/stack-hooks/before-START/files/task-log4j.properties
new file mode 100644
index 0000000..7e12962
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/before-START/files/task-log4j.properties
@@ -0,0 +1,134 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+#
+# Job Summary Appender 
+#
+# Use following logger to send summary to separate file defined by 
+# hadoop.mapreduce.jobsummary.log.file rolled daily:
+# hadoop.mapreduce.jobsummary.logger=INFO,JSA
+# 
+hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this 
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+# Rolling File Appender
+#
+
+#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+#log4j.appender.RFA.MaxFileSize=1MB
+#log4j.appender.RFA.MaxBackupIndex=30
+
+#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+# Custom Logging levels
+
+hadoop.metrics.log.level=INFO
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Null Appender
+# Trap security logger on the hadoop client side
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+ 
+# Removes "deprecated" messages
+log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/before-START/files/topology_script.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/before-START/files/topology_script.py b/ambari-server/src/main/resources/stack-hooks/before-START/files/topology_script.py
new file mode 100644
index 0000000..0f7a55c
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/before-START/files/topology_script.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import sys, os
+from string import join
+import ConfigParser
+
+
+DEFAULT_RACK = "/default-rack"
+DATA_FILE_NAME =  os.path.dirname(os.path.abspath(__file__)) + "/topology_mappings.data"
+SECTION_NAME = "network_topology"
+
+class TopologyScript():
+
+  def load_rack_map(self):
+    try:
+      #RACK_MAP contains both host name vs rack and ip vs rack mappings
+      mappings = ConfigParser.ConfigParser()
+      mappings.read(DATA_FILE_NAME)
+      return dict(mappings.items(SECTION_NAME))
+    except ConfigParser.NoSectionError:
+      return {}
+
+  def get_racks(self, rack_map, args):
+    if len(args) == 1:
+      return DEFAULT_RACK
+    else:
+      return join([self.lookup_by_hostname_or_ip(input_argument, rack_map) for input_argument in args[1:]],)
+
+  def lookup_by_hostname_or_ip(self, hostname_or_ip, rack_map):
+    #try looking up by hostname
+    rack = rack_map.get(hostname_or_ip)
+    if rack is not None:
+      return rack
+    #try looking up by ip
+    rack = rack_map.get(self.extract_ip(hostname_or_ip))
+    #try by localhost since hadoop could be passing in 127.0.0.1 which might not be mapped
+    return rack if rack is not None else rack_map.get("localhost.localdomain", DEFAULT_RACK)
+
+  #strips out port and slashes in case hadoop passes in something like 127.0.0.1/127.0.0.1:50010
+  def extract_ip(self, container_string):
+    return container_string.split("/")[0].split(":")[0]
+
+  def execute(self, args):
+    rack_map = self.load_rack_map()
+    rack = self.get_racks(rack_map, args)
+    print rack
+
+if __name__ == "__main__":
+  TopologyScript().execute(sys.argv)

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/before-START/scripts/custom_extensions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/before-START/scripts/custom_extensions.py b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/custom_extensions.py
new file mode 100644
index 0000000..04299ba
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/custom_extensions.py
@@ -0,0 +1,173 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from resource_management.core.resources import Directory
+from resource_management.core.resources import Execute
+from resource_management.libraries.functions import default
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import format
+
+
+DEFAULT_HADOOP_HDFS_EXTENSION_DIR = "/hdp/ext/{0}/hadoop"
+DEFAULT_HADOOP_HIVE_EXTENSION_DIR = "/hdp/ext/{0}/hive"
+DEFAULT_HADOOP_HBASE_EXTENSION_DIR = "/hdp/ext/{0}/hbase"
+
+def setup_extensions():
+  """
+  The goal of this method is to distribute extensions (for example jar files) from
+  HDFS (/hdp/ext/{major_stack_version}/{service_name}) to all nodes which contain related
+  components of service (YARN, HIVE or HBASE). Extensions should be added to HDFS by
+  user manually.
+  """
+
+  import params
+
+  # Hadoop Custom extensions
+  hadoop_custom_extensions_enabled = default("/configurations/core-site/hadoop.custom-extensions.enabled", False)
+  hadoop_custom_extensions_services = default("/configurations/core-site/hadoop.custom-extensions.services", "")
+  hadoop_custom_extensions_owner = default("/configurations/core-site/hadoop.custom-extensions.owner", params.hdfs_user)
+  hadoop_custom_extensions_hdfs_dir = get_config_formatted_value(default("/configurations/core-site/hadoop.custom-extensions.root",
+                                                 DEFAULT_HADOOP_HDFS_EXTENSION_DIR.format(params.major_stack_version)))
+  hadoop_custom_extensions_services = [ service.strip().upper() for service in hadoop_custom_extensions_services.split(",") ]
+  hadoop_custom_extensions_services.append("YARN")
+
+  hadoop_custom_extensions_local_dir = "{0}/current/ext/hadoop".format(Script.get_stack_root())
+
+  if params.current_service in hadoop_custom_extensions_services:
+    clean_extensions(hadoop_custom_extensions_local_dir)
+    if hadoop_custom_extensions_enabled:
+      download_extensions(hadoop_custom_extensions_owner, params.user_group,
+                          hadoop_custom_extensions_hdfs_dir,
+                          hadoop_custom_extensions_local_dir)
+
+  setup_extensions_hive()
+
+  hbase_custom_extensions_services = []
+  hbase_custom_extensions_services.append("HBASE")
+  if params.current_service in hbase_custom_extensions_services:
+    setup_hbase_extensions()
+
+
+def setup_hbase_extensions():
+  import params
+
+  # HBase Custom extensions
+  hbase_custom_extensions_enabled = default("/configurations/hbase-site/hbase.custom-extensions.enabled", False)
+  hbase_custom_extensions_owner = default("/configurations/hbase-site/hbase.custom-extensions.owner", params.hdfs_user)
+  hbase_custom_extensions_hdfs_dir = get_config_formatted_value(default("/configurations/hbase-site/hbase.custom-extensions.root",
+                                                DEFAULT_HADOOP_HBASE_EXTENSION_DIR.format(params.major_stack_version)))
+  hbase_custom_extensions_local_dir = "{0}/current/ext/hbase".format(Script.get_stack_root())
+
+  impacted_components = ['HBASE_MASTER', 'HBASE_REGIONSERVER', 'PHOENIX_QUERY_SERVER'];
+  role = params.config.get('role','')
+
+  if role in impacted_components:
+    clean_extensions(hbase_custom_extensions_local_dir)
+    if hbase_custom_extensions_enabled:
+      download_extensions(hbase_custom_extensions_owner, params.user_group,
+                          hbase_custom_extensions_hdfs_dir,
+                          hbase_custom_extensions_local_dir)
+
+
+def setup_extensions_hive():
+  import params
+
+  hive_custom_extensions_enabled = default("/configurations/hive-site/hive.custom-extensions.enabled", False)
+  hive_custom_extensions_owner = default("/configurations/hive-site/hive.custom-extensions.owner", params.hdfs_user)
+  hive_custom_extensions_hdfs_dir = DEFAULT_HADOOP_HIVE_EXTENSION_DIR.format(params.major_stack_version)
+
+  hive_custom_extensions_local_dir = "{0}/current/ext/hive".format(Script.get_stack_root())
+
+  impacted_components = ['HIVE_SERVER', 'HIVE_CLIENT'];
+  role = params.config.get('role','')
+
+  # Run copying for HIVE_SERVER and HIVE_CLIENT
+  if params.current_service == 'HIVE' and role in impacted_components:
+    clean_extensions(hive_custom_extensions_local_dir)
+    if hive_custom_extensions_enabled:
+      download_extensions(hive_custom_extensions_owner, params.user_group,
+                          hive_custom_extensions_hdfs_dir,
+                          hive_custom_extensions_local_dir)
+
+def download_extensions(owner_user, owner_group, hdfs_source_dir, local_target_dir):
+  """
+  :param owner_user: user owner of the HDFS directory
+  :param owner_group: group owner of the HDFS directory
+  :param hdfs_source_dir: the HDFS directory from where the files are being pull
+  :param local_target_dir: the location of where to download the files
+  :return: Will return True if successful, otherwise, False.
+  """
+  import params
+
+  if not os.path.isdir(local_target_dir):
+    extensions_tmp_dir=format("{tmp_dir}/custom_extensions")
+    Directory(local_target_dir,
+              owner="root",
+              mode=0755,
+              group="root",
+              create_parents=True)
+
+    params.HdfsResource(hdfs_source_dir,
+                        type="directory",
+                        action="create_on_execute",
+                        owner=owner_user,
+                        group=owner_group,
+                        mode=0755)
+
+    Directory(extensions_tmp_dir,
+              owner=params.hdfs_user,
+              mode=0755,
+              create_parents=True)
+
+    # copy from hdfs to /tmp
+    params.HdfsResource(extensions_tmp_dir,
+                        type="directory",
+                        action="download_on_execute",
+                        source=hdfs_source_dir,
+                        user=params.hdfs_user,
+                        mode=0644,
+                        replace_existing_files=True)
+
+    # Execute command is not quoting correctly.
+    cmd = format("{sudo} mv {extensions_tmp_dir}/* {local_target_dir}")
+    only_if_cmd = "ls -d {extensions_tmp_dir}/*".format(extensions_tmp_dir=extensions_tmp_dir)
+    Execute(cmd, only_if=only_if_cmd)
+
+    only_if_local = 'ls -d "{local_target_dir}"'.format(local_target_dir=local_target_dir)
+    Execute(("chown", "-R", "root:root", local_target_dir),
+            sudo=True,
+            only_if=only_if_local)
+
+    params.HdfsResource(None,action="execute")
+  return True
+
+def clean_extensions(local_dir):
+  """
+  :param local_dir: The local directory where the extensions are stored.
+  :return: Will return True if successful, otherwise, False.
+  """
+  if os.path.isdir(local_dir):
+    Directory(local_dir,
+              action="delete")
+  return True
+
+def get_config_formatted_value(property_value):
+  return format(property_value.replace("{{", "{").replace("}}", "}"))

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/before-START/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/before-START/scripts/hook.py b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/hook.py
new file mode 100644
index 0000000..4cb276a
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/hook.py
@@ -0,0 +1,43 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from rack_awareness import create_topology_script_and_mapping
+from shared_initialization import setup_hadoop, setup_configs, create_javahome_symlink, setup_unlimited_key_jce_policy
+from custom_extensions import setup_extensions
+
+class BeforeStartHook(Hook):
+
+  def hook(self, env):
+    import params
+
+    self.run_custom_hook('before-ANY')
+    env.set_params(params)
+
+    setup_hadoop()
+    setup_configs()
+    create_javahome_symlink()
+    create_topology_script_and_mapping()
+    setup_unlimited_key_jce_policy()
+    if params.stack_supports_hadoop_custom_extensions:
+      setup_extensions()
+
+if __name__ == "__main__":
+  BeforeStartHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/params.py
new file mode 100644
index 0000000..6c26e01
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/params.py
@@ -0,0 +1,380 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import default
+from resource_management.libraries.functions import format_jvm_option
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.version import format_stack_version, compare_versions, get_major_version
+from ambari_commons.os_check import OSCheck
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.stack_features import get_stack_feature_version
+from resource_management.libraries.functions import StackFeature
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+artifact_dir = tmp_dir + "/AMBARI-artifacts"
+
+version_for_stack_feature_checks = get_stack_feature_version(config)
+stack_supports_hadoop_custom_extensions = check_stack_feature(StackFeature.HADOOP_CUSTOM_EXTENSIONS, version_for_stack_feature_checks)
+
+sudo = AMBARI_SUDO_BINARY
+
+# Global flag enabling or disabling the sysprep feature
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+
+# Whether to skip copying fast-hdfs-resource.jar to /var/lib/ambari-agent/lib/
+# This is required if tarballs are going to be copied to HDFS, so set to False
+sysprep_skip_copy_fast_jar_hdfs = host_sys_prepped and default("/configurations/cluster-env/sysprep_skip_copy_fast_jar_hdfs", False)
+
+# Whether to skip setting up the unlimited key JCE policy
+sysprep_skip_setup_jce = host_sys_prepped and default("/configurations/cluster-env/sysprep_skip_setup_jce", False)
+
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+major_stack_version = get_major_version(stack_version_formatted)
+
+dfs_type = default("/commandParams/dfs_type", "")
+hadoop_conf_dir = "/etc/hadoop/conf"
+component_list = default("/localComponents", [])
+
+hdfs_tmp_dir = default("/configurations/hadoop-env/hdfs_tmp_dir", "/tmp")
+
+hadoop_metrics2_properties_content = None
+if 'hadoop-metrics2.properties' in config['configurations']:
+  hadoop_metrics2_properties_content = config['configurations']['hadoop-metrics2.properties']['content']
+
+hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
+hadoop_lib_home = stack_select.get_hadoop_dir("lib")
+hadoop_bin = stack_select.get_hadoop_dir("sbin")
+
+mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
+hadoop_home = stack_select.get_hadoop_dir("home")
+create_lib_snappy_symlinks = False
+  
+current_service = config['serviceName']
+
+#security params
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+ambari_server_resources_url = default("/hostLevelParams/jdk_location", None)
+if ambari_server_resources_url is not None and ambari_server_resources_url.endswith('/'):
+  ambari_server_resources_url = ambari_server_resources_url[:-1]
+
+# Unlimited key JCE policy params
+jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
+unlimited_key_jce_required = default("/hostLevelParams/unlimited_key_jce_required", False)
+jdk_name = default("/hostLevelParams/jdk_name", None)
+java_home = default("/hostLevelParams/java_home", None)
+java_exec = "{0}/bin/java".format(java_home) if java_home is not None else "/bin/java"
+
+#users and groups
+has_hadoop_env = 'hadoop-env' in config['configurations']
+mapred_user = config['configurations']['mapred-env']['mapred_user']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+yarn_user = config['configurations']['yarn-env']['yarn_user']
+
+user_group = config['configurations']['cluster-env']['user_group']
+
+#hosts
+hostname = config["hostname"]
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+rm_host = default("/clusterHostInfo/rm_host", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
+hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+hs_host = default("/clusterHostInfo/hs_host", [])
+jtnode_host = default("/clusterHostInfo/jtnode_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+cluster_name = config["clusterName"]
+set_instanceId = "false"
+if 'cluster-env' in config['configurations'] and \
+    'metrics_collector_external_hosts' in config['configurations']['cluster-env']:
+  ams_collector_hosts = config['configurations']['cluster-env']['metrics_collector_external_hosts']
+  set_instanceId = "true"
+else:
+  ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
+
+has_namenode = not len(namenode_host) == 0
+has_resourcemanager = not len(rm_host) == 0
+has_slaves = not len(slave_hosts) == 0
+has_oozie_server = not len(oozie_servers) == 0
+has_hcat_server_host = not len(hcat_server_hosts) == 0
+has_hive_server_host = not len(hive_server_host) == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_zk_host = not len(zk_hosts) == 0
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+has_metric_collector = not len(ams_collector_hosts) == 0
+
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+
+if has_ganglia_server:
+  ganglia_server_host = ganglia_server_hosts[0]
+
+metric_collector_port = None
+if has_metric_collector:
+  if 'cluster-env' in config['configurations'] and \
+      'metrics_collector_external_port' in config['configurations']['cluster-env']:
+    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_external_port']
+  else:
+    metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
+    if metric_collector_web_address.find(':') != -1:
+      metric_collector_port = metric_collector_web_address.split(':')[1]
+    else:
+      metric_collector_port = '6188'
+  if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
+    metric_collector_protocol = 'https'
+  else:
+    metric_collector_protocol = 'http'
+  metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
+  metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
+  metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
+
+  pass
+metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
+metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
+
+host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
+host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
+
+# Cluster Zookeeper quorum
+zookeeper_quorum = None
+if has_zk_host:
+  if 'zoo.cfg' in config['configurations'] and 'clientPort' in config['configurations']['zoo.cfg']:
+    zookeeper_clientPort = config['configurations']['zoo.cfg']['clientPort']
+  else:
+    zookeeper_clientPort = '2181'
+  zookeeper_quorum = (':' + zookeeper_clientPort + ',').join(config['clusterHostInfo']['zookeeper_hosts'])
+  # last port config
+  zookeeper_quorum += ':' + zookeeper_clientPort
+
+#hadoop params
+
+if has_namenode or dfs_type == 'HCFS':
+  hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
+  hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+  task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
+
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+hbase_tmp_dir = "/tmp/hbase-hbase"
+#db params
+server_db_name = config['hostLevelParams']['db_name']
+db_driver_filename = config['hostLevelParams']['db_driver_filename']
+oracle_driver_url = config['hostLevelParams']['oracle_jdbc_url']
+mysql_driver_url = config['hostLevelParams']['mysql_jdbc_url']
+oracle_driver_symlink_url = format("{ambari_server_resources_url}/oracle-jdbc-driver.jar")
+mysql_driver_symlink_url = format("{ambari_server_resources_url}/mysql-jdbc-driver.jar")
+
+ambari_db_rca_url = config['hostLevelParams']['ambari_db_rca_url'][0]
+ambari_db_rca_driver = config['hostLevelParams']['ambari_db_rca_driver'][0]
+ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username'][0]
+ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password'][0]
+
+if has_namenode and 'rca_enabled' in config['configurations']['hadoop-env']:
+  rca_enabled =  config['configurations']['hadoop-env']['rca_enabled']
+else:
+  rca_enabled = False
+rca_disabled_prefix = "###"
+if rca_enabled == True:
+  rca_prefix = ""
+else:
+  rca_prefix = rca_disabled_prefix
+
+#hadoop-env.sh
+
+jsvc_path = "/usr/lib/bigtop-utils"
+
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
+namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
+namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
+
+jtnode_opt_newsize = "200m"
+jtnode_opt_maxnewsize = "200m"
+jtnode_heapsize =  "1024m"
+ttnode_heapsize = "1024m"
+
+dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
+mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
+
+#log4j.properties
+
+yarn_log_dir_prefix = default("/configurations/yarn-env/yarn_log_dir_prefix","/var/log/hadoop-yarn")
+
+dfs_hosts = default('/configurations/hdfs-site/dfs.hosts', None)
+
+# Hdfs log4j settings
+hadoop_log_max_backup_size = default('configurations/hdfs-log4j/hadoop_log_max_backup_size', 256)
+hadoop_log_number_of_backup_files = default('configurations/hdfs-log4j/hadoop_log_number_of_backup_files', 10)
+hadoop_security_log_max_backup_size = default('configurations/hdfs-log4j/hadoop_security_log_max_backup_size', 256)
+hadoop_security_log_number_of_backup_files = default('configurations/hdfs-log4j/hadoop_security_log_number_of_backup_files', 20)
+
+# Yarn log4j settings
+yarn_rm_summary_log_max_backup_size = default('configurations/yarn-log4j/yarn_rm_summary_log_max_backup_size', 256)
+yarn_rm_summary_log_number_of_backup_files = default('configurations/yarn-log4j/yarn_rm_summary_log_number_of_backup_files', 20)
+
+#log4j.properties
+if (('hdfs-log4j' in config['configurations']) and ('content' in config['configurations']['hdfs-log4j'])):
+  log4j_props = config['configurations']['hdfs-log4j']['content']
+  if (('yarn-log4j' in config['configurations']) and ('content' in config['configurations']['yarn-log4j'])):
+    log4j_props += config['configurations']['yarn-log4j']['content']
+else:
+  log4j_props = None
+
+refresh_topology = False
+command_params = config["commandParams"] if "commandParams" in config else None
+if command_params is not None:
+  refresh_topology = bool(command_params["refresh_topology"]) if "refresh_topology" in command_params else False
+
+ambari_java_home = default("/commandParams/ambari_java_home", None)
+ambari_jdk_name = default("/commandParams/ambari_jdk_name", None)
+ambari_jce_name = default("/commandParams/ambari_jce_name", None)
+  
+ambari_libs_dir = "/var/lib/ambari-agent/lib"
+is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
+#host info
+all_hosts = default("/clusterHostInfo/all_hosts", [])
+all_racks = default("/clusterHostInfo/all_racks", [])
+all_ipv4_ips = default("/clusterHostInfo/all_ipv4_ips", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+
+#topology files
+net_topology_script_file_path = "/etc/hadoop/conf/topology_script.py"
+net_topology_script_dir = os.path.dirname(net_topology_script_file_path)
+net_topology_mapping_data_file_name = 'topology_mappings.data'
+net_topology_mapping_data_file_path = os.path.join(net_topology_script_dir, net_topology_mapping_data_file_name)
+
+#Added logic to create /tmp and /user directory for HCFS stack.  
+has_core_site = 'core-site' in config['configurations']
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+kinit_path_local = get_kinit_path()
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', None)
+hdfs_site = config['configurations']['hdfs-site']
+smoke_user =  config['configurations']['cluster-env']['smokeuser']
+smoke_hdfs_user_dir = format("/user/{smoke_user}")
+smoke_hdfs_user_mode = 0770
+
+
+##### Namenode RPC ports - metrics config section start #####
+
+# Figure out the rpc ports for current namenode
+nn_rpc_client_port = None
+nn_rpc_dn_port = None
+nn_rpc_healthcheck_port = None
+
+namenode_id = None
+namenode_rpc = None
+
+dfs_ha_enabled = False
+dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.internal.nameservices', None)
+if dfs_ha_nameservices is None:
+  dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.nameservices', None)
+dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
+
+dfs_ha_namemodes_ids_list = []
+other_namenode_id = None
+
+if dfs_ha_namenode_ids:
+ dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
+ dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
+ if dfs_ha_namenode_ids_array_len > 1:
+   dfs_ha_enabled = True
+
+if dfs_ha_enabled:
+ for nn_id in dfs_ha_namemodes_ids_list:
+   nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
+   if hostname.lower() in nn_host.lower():
+     namenode_id = nn_id
+     namenode_rpc = nn_host
+   pass
+ pass
+else:
+  namenode_rpc = default('/configurations/hdfs-site/dfs.namenode.rpc-address', default_fs)
+
+# if HDFS is not installed in the cluster, then don't try to access namenode_rpc
+if "core-site" in config['configurations'] and namenode_rpc:
+  port_str = namenode_rpc.split(':')[-1].strip()
+  try:
+    nn_rpc_client_port = int(port_str)
+  except ValueError:
+    nn_rpc_client_port = None
+
+if dfs_ha_enabled:
+ dfs_service_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.servicerpc-address.{dfs_ha_nameservices}.{namenode_id}'), None)
+ dfs_lifeline_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.lifeline.rpc-address.{dfs_ha_nameservices}.{namenode_id}'), None)
+else:
+ dfs_service_rpc_address = default('/configurations/hdfs-site/dfs.namenode.servicerpc-address', None)
+ dfs_lifeline_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.lifeline.rpc-address'), None)
+
+if dfs_service_rpc_address:
+ nn_rpc_dn_port = dfs_service_rpc_address.split(':')[1].strip()
+
+if dfs_lifeline_rpc_address:
+ nn_rpc_healthcheck_port = dfs_lifeline_rpc_address.split(':')[1].strip()
+
+is_nn_client_port_configured = False if nn_rpc_client_port is None else True
+is_nn_dn_port_configured = False if nn_rpc_dn_port is None else True
+is_nn_healthcheck_port_configured = False if nn_rpc_healthcheck_port is None else True
+
+##### end #####
+
+import functools
+#create partial functions with common arguments for every HdfsResource call
+#to create/delete/copyfromlocal hdfs directories/files we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs,
+  immutable_paths = get_not_managed_resources(),
+  dfs_type = dfs_type
+)

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/before-START/scripts/rack_awareness.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/before-START/scripts/rack_awareness.py b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/rack_awareness.py
new file mode 100644
index 0000000..48158bb
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/rack_awareness.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management.core.resources import File
+from resource_management.core.source import StaticFile, Template
+from resource_management.libraries.functions import format
+
+
+def create_topology_mapping():
+  import params
+
+  File(params.net_topology_mapping_data_file_path,
+       content=Template("topology_mappings.data.j2"),
+       owner=params.hdfs_user,
+       group=params.user_group,
+       mode=0644,
+       only_if=format("test -d {net_topology_script_dir}"))
+
+def create_topology_script():
+  import params
+
+  File(params.net_topology_script_file_path,
+       content=StaticFile('topology_script.py'),
+       mode=0755,
+       only_if=format("test -d {net_topology_script_dir}"))
+
+def create_topology_script_and_mapping():
+  import params
+  if params.has_hadoop_env:
+    create_topology_mapping()
+    create_topology_script()

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/shared_initialization.py
new file mode 100644
index 0000000..48dc4b0
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/shared_initialization.py
@@ -0,0 +1,256 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+from resource_management.libraries.providers.hdfs_resource import WebHDFSUtil
+from resource_management.core.resources.jcepolicyinfo import JcePolicyInfo
+
+from resource_management import *
+
+def setup_hadoop():
+  """
+  Setup hadoop files and directories
+  """
+  import params
+
+  Execute(("setenforce","0"),
+          only_if="test -f /selinux/enforce",
+          not_if="(! which getenforce ) || (which getenforce && getenforce | grep -q Disabled)",
+          sudo=True,
+  )
+
+  #directories
+  if params.has_namenode or params.dfs_type == 'HCFS':
+    Directory(params.hdfs_log_dir_prefix,
+              create_parents = True,
+              owner='root',
+              group=params.user_group,
+              mode=0775,
+              cd_access='a',
+    )
+    if params.has_namenode:
+      Directory(params.hadoop_pid_dir_prefix,
+              create_parents = True,
+              owner='root',
+              group='root',
+              cd_access='a',
+      )
+    Directory(params.hadoop_tmp_dir,
+              create_parents = True,
+              owner=params.hdfs_user,
+              cd_access='a',
+              )
+  #files
+    if params.security_enabled:
+      tc_owner = "root"
+    else:
+      tc_owner = params.hdfs_user
+      
+    # if WebHDFS is not enabled we need this jar to create hadoop folders and copy tarballs to HDFS.
+    if params.sysprep_skip_copy_fast_jar_hdfs:
+      print "Skipping copying of fast-hdfs-resource.jar as host is sys prepped"
+    elif params.dfs_type == 'HCFS' or not WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
+      # for source-code of jar goto contrib/fast-hdfs-resource
+      File(format("{ambari_libs_dir}/fast-hdfs-resource.jar"),
+           mode=0644,
+           content=StaticFile("fast-hdfs-resource.jar")
+      )
+      
+    if os.path.exists(params.hadoop_conf_dir):
+      File(os.path.join(params.hadoop_conf_dir, 'commons-logging.properties'),
+           owner=tc_owner,
+           content=Template('commons-logging.properties.j2')
+      )
+
+      health_check_template_name = "health_check"
+      File(os.path.join(params.hadoop_conf_dir, health_check_template_name),
+           owner=tc_owner,
+           content=Template(health_check_template_name + ".j2")
+      )
+
+      log4j_filename = os.path.join(params.hadoop_conf_dir, "log4j.properties")
+      if (params.log4j_props != None):
+        File(log4j_filename,
+             mode=0644,
+             group=params.user_group,
+             owner=params.hdfs_user,
+             content=InlineTemplate(params.log4j_props)
+        )
+      elif (os.path.exists(format("{params.hadoop_conf_dir}/log4j.properties"))):
+        File(log4j_filename,
+             mode=0644,
+             group=params.user_group,
+             owner=params.hdfs_user,
+        )
+
+      if params.hadoop_metrics2_properties_content:
+        File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"),
+             owner=params.hdfs_user,
+             group=params.user_group,
+             content=InlineTemplate(params.hadoop_metrics2_properties_content)
+             )
+      else:
+        File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"),
+             owner=params.hdfs_user,
+             group=params.user_group,
+             content=Template("hadoop-metrics2.properties.j2")
+             )
+
+    if params.dfs_type == 'HCFS' and params.has_core_site and 'ECS_CLIENT' in params.component_list:
+       create_dirs()
+
+    create_microsoft_r_dir()
+
+
+def setup_configs():
+  """
+  Creates configs for services HDFS mapred
+  """
+  import params
+
+  if params.has_namenode or params.dfs_type == 'HCFS':
+    if os.path.exists(params.hadoop_conf_dir):
+      File(params.task_log4j_properties_location,
+           content=StaticFile("task-log4j.properties"),
+           mode=0755
+      )
+
+    if os.path.exists(os.path.join(params.hadoop_conf_dir, 'configuration.xsl')):
+      File(os.path.join(params.hadoop_conf_dir, 'configuration.xsl'),
+           owner=params.hdfs_user,
+           group=params.user_group
+      )
+    if os.path.exists(os.path.join(params.hadoop_conf_dir, 'masters')):
+      File(os.path.join(params.hadoop_conf_dir, 'masters'),
+                owner=params.hdfs_user,
+                group=params.user_group
+      )
+
+def create_javahome_symlink():
+  if os.path.exists("/usr/jdk/jdk1.6.0_31") and not os.path.exists("/usr/jdk64/jdk1.6.0_31"):
+    Directory("/usr/jdk64/",
+         create_parents = True,
+    )
+    Link("/usr/jdk/jdk1.6.0_31",
+         to="/usr/jdk64/jdk1.6.0_31",
+    )
+
+def create_dirs():
+   import params
+   params.HdfsResource(params.hdfs_tmp_dir,
+                       type="directory",
+                       action="create_on_execute",
+                       owner=params.hdfs_user,
+                       mode=0777
+   )
+   params.HdfsResource(params.smoke_hdfs_user_dir,
+                       type="directory",
+                       action="create_on_execute",
+                       owner=params.smoke_user,
+                       mode=params.smoke_hdfs_user_mode
+   )
+   params.HdfsResource(None,
+                      action="execute"
+   )
+
+def create_microsoft_r_dir():
+  import params
+  if 'MICROSOFT_R_NODE_CLIENT' in params.component_list and params.default_fs:
+    directory = '/user/RevoShare'
+    try:
+      params.HdfsResource(directory,
+                          type="directory",
+                          action="create_on_execute",
+                          owner=params.hdfs_user,
+                          mode=0777)
+      params.HdfsResource(None, action="execute")
+    except Exception as exception:
+      Logger.warning("Could not check the existence of {0} on DFS while starting {1}, exception: {2}".format(directory, params.current_service, str(exception)))
+
+def setup_unlimited_key_jce_policy():
+  """
+  Sets up the unlimited key JCE policy if needed. (sets up ambari JCE as well if ambari and the  stack use different JDK)
+  """
+  import params
+  __setup_unlimited_key_jce_policy(custom_java_home=params.java_home, custom_jdk_name=params.jdk_name, custom_jce_name = params.jce_policy_zip)
+  if params.ambari_jce_name and params.ambari_jce_name != params.jce_policy_zip:
+    __setup_unlimited_key_jce_policy(custom_java_home=params.ambari_java_home, custom_jdk_name=params.ambari_jdk_name, custom_jce_name = params.ambari_jce_name)
+
+def __setup_unlimited_key_jce_policy(custom_java_home, custom_jdk_name, custom_jce_name):
+  """
+  Sets up the unlimited key JCE policy if needed.
+
+  The following criteria must be met:
+
+    * The cluster has not been previously prepared (sys preped) - cluster-env/sysprep_skip_setup_jce = False
+    * Ambari is managing the host's JVM - /hostLevelParams/jdk_name is set
+    * Either security is enabled OR a service requires it - /hostLevelParams/unlimited_key_jce_required = True
+    * The unlimited key JCE policy has not already been installed
+
+  If the conditions are met, the following steps are taken to install the unlimited key JCE policy JARs
+
+    1. The unlimited key JCE policy ZIP file is downloaded from the Ambari server and stored in the
+        Ambari agent's temporary directory
+    2. The existing JCE policy JAR files are deleted
+    3. The downloaded ZIP file is unzipped into the proper JCE policy directory
+
+  :return: None
+  """
+  import params
+
+  if params.sysprep_skip_setup_jce:
+    Logger.info("Skipping unlimited key JCE policy check and setup since the host is sys prepped")
+
+  elif not custom_jdk_name:
+    Logger.debug("Skipping unlimited key JCE policy check and setup since the Java VM is not managed by Ambari")
+
+  elif not params.unlimited_key_jce_required:
+    Logger.debug("Skipping unlimited key JCE policy check and setup since it is not required")
+
+  else:
+    jcePolicyInfo = JcePolicyInfo(custom_java_home)
+
+    if jcePolicyInfo.is_unlimited_key_jce_policy():
+      Logger.info("The unlimited key JCE policy is required, and appears to have been installed.")
+
+    elif custom_jce_name is None:
+      raise Fail("The unlimited key JCE policy needs to be installed; however the JCE policy zip is not specified.")
+
+    else:
+      Logger.info("The unlimited key JCE policy is required, and needs to be installed.")
+
+      jce_zip_target = format("{artifact_dir}/{custom_jce_name}")
+      jce_zip_source = format("{ambari_server_resources_url}/{custom_jce_name}")
+      java_security_dir = format("{custom_java_home}/jre/lib/security")
+
+      Logger.debug("Downloading the unlimited key JCE policy files from {0} to {1}.".format(jce_zip_source, jce_zip_target))
+      Directory(params.artifact_dir, create_parents=True)
+      File(jce_zip_target, content=DownloadSource(jce_zip_source))
+
+      Logger.debug("Removing existing JCE policy JAR files: {0}.".format(java_security_dir))
+      File(format("{java_security_dir}/US_export_policy.jar"), action="delete")
+      File(format("{java_security_dir}/local_policy.jar"), action="delete")
+
+      Logger.debug("Unzipping the unlimited key JCE policy files from {0} into {1}.".format(jce_zip_target, java_security_dir))
+      extract_cmd = ("unzip", "-o", "-j", "-q", jce_zip_target, "-d", java_security_dir)
+      Execute(extract_cmd,
+              only_if=format("test -e {java_security_dir} && test -f {jce_zip_target}"),
+              path=['/bin/', '/usr/bin'],
+              sudo=True
+              )

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/before-START/templates/commons-logging.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/before-START/templates/commons-logging.properties.j2 b/ambari-server/src/main/resources/stack-hooks/before-START/templates/commons-logging.properties.j2
new file mode 100644
index 0000000..2197ba5
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/before-START/templates/commons-logging.properties.j2
@@ -0,0 +1,43 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+#Logging Implementation
+
+#Log4J
+org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger
+
+#JDK Logger
+#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/before-START/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/before-START/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stack-hooks/before-START/templates/exclude_hosts_list.j2
new file mode 100644
index 0000000..1adba80
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/before-START/templates/exclude_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in hdfs_exclude_file %}
+{{host}}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/before-START/templates/hadoop-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/before-START/templates/hadoop-metrics2.properties.j2 b/ambari-server/src/main/resources/stack-hooks/before-START/templates/hadoop-metrics2.properties.j2
new file mode 100644
index 0000000..2cd9aa8
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/before-START/templates/hadoop-metrics2.properties.j2
@@ -0,0 +1,107 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# syntax: [prefix].[source|sink|jmx].[instance].[options]
+# See package.html for org.apache.hadoop.metrics2 for details
+
+{% if has_ganglia_server %}
+*.period=60
+
+*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+*.sink.ganglia.period=10
+
+# default for supportsparse is false
+*.sink.ganglia.supportsparse=true
+
+.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+# Hook up to the server
+namenode.sink.ganglia.servers={{ganglia_server_host}}:8661
+datanode.sink.ganglia.servers={{ganglia_server_host}}:8659
+jobtracker.sink.ganglia.servers={{ganglia_server_host}}:8662
+tasktracker.sink.ganglia.servers={{ganglia_server_host}}:8658
+maptask.sink.ganglia.servers={{ganglia_server_host}}:8660
+reducetask.sink.ganglia.servers={{ganglia_server_host}}:8660
+resourcemanager.sink.ganglia.servers={{ganglia_server_host}}:8664
+nodemanager.sink.ganglia.servers={{ganglia_server_host}}:8657
+historyserver.sink.ganglia.servers={{ganglia_server_host}}:8666
+journalnode.sink.ganglia.servers={{ganglia_server_host}}:8654
+nimbus.sink.ganglia.servers={{ganglia_server_host}}:8649
+supervisor.sink.ganglia.servers={{ganglia_server_host}}:8650
+
+resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
+
+{% endif %}
+
+{% if has_metric_collector %}
+
+*.period={{metrics_collection_period}}
+*.sink.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
+*.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+*.sink.timeline.period={{metrics_collection_period}}
+*.sink.timeline.sendInterval={{metrics_report_interval}}000
+*.sink.timeline.slave.host.name={{hostname}}
+*.sink.timeline.zookeeper.quorum={{zookeeper_quorum}}
+*.sink.timeline.protocol={{metric_collector_protocol}}
+*.sink.timeline.port={{metric_collector_port}}
+*.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+*.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
+
+# HTTPS properties
+*.sink.timeline.truststore.path = {{metric_truststore_path}}
+*.sink.timeline.truststore.type = {{metric_truststore_type}}
+*.sink.timeline.truststore.password = {{metric_truststore_password}}
+
+datanode.sink.timeline.collector.hosts={{ams_collector_hosts}}
+namenode.sink.timeline.collector.hosts={{ams_collector_hosts}}
+resourcemanager.sink.timeline.collector.hosts={{ams_collector_hosts}}
+nodemanager.sink.timeline.collector.hosts={{ams_collector_hosts}}
+jobhistoryserver.sink.timeline.collector.hosts={{ams_collector_hosts}}
+journalnode.sink.timeline.collector.hosts={{ams_collector_hosts}}
+applicationhistoryserver.sink.timeline.collector.hosts={{ams_collector_hosts}}
+
+resourcemanager.sink.timeline.tagsForPrefix.yarn=Queue
+
+{% if is_nn_client_port_configured %}
+# Namenode rpc ports customization
+namenode.sink.timeline.metric.rpc.client.port={{nn_rpc_client_port}}
+{% endif %}
+{% if is_nn_dn_port_configured %}
+namenode.sink.timeline.metric.rpc.datanode.port={{nn_rpc_dn_port}}
+{% endif %}
+{% if is_nn_healthcheck_port_configured %}
+namenode.sink.timeline.metric.rpc.healthcheck.port={{nn_rpc_healthcheck_port}}
+{% endif %}
+
+{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/before-START/templates/health_check.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/before-START/templates/health_check.j2 b/ambari-server/src/main/resources/stack-hooks/before-START/templates/health_check.j2
new file mode 100644
index 0000000..0a03d17
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/before-START/templates/health_check.j2
@@ -0,0 +1,81 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#!/bin/bash
+#
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+err=0;
+
+function check_disks {
+
+  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
+    fsdev=""
+    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
+    if [ -z "$fsdev" -a "$m" != "/mnt" ] ; then
+      msg_="$msg_ $m(u)"
+    else
+      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
+    fi
+  done
+
+  if [ -z "$msg_" ] ; then
+    echo "disks ok" ; exit 0
+  else
+    echo "$msg_" ; exit 2
+  fi
+
+}
+
+# Run all checks
+for check in disks ; do
+  msg=`check_${check}` ;
+  if [ $? -eq 0 ] ; then
+    ok_msg="$ok_msg$msg,"
+  else
+    err_msg="$err_msg$msg,"
+  fi
+done
+
+if [ ! -z "$err_msg" ] ; then
+  echo -n "ERROR $err_msg "
+fi
+if [ ! -z "$ok_msg" ] ; then
+  echo -n "OK: $ok_msg"
+fi
+
+echo
+
+# Success!
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/before-START/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/before-START/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/stack-hooks/before-START/templates/include_hosts_list.j2
new file mode 100644
index 0000000..4a9e713
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/before-START/templates/include_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in slave_hosts %}
+{{host}}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/before-START/templates/topology_mappings.data.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/before-START/templates/topology_mappings.data.j2 b/ambari-server/src/main/resources/stack-hooks/before-START/templates/topology_mappings.data.j2
new file mode 100644
index 0000000..15034d6
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/before-START/templates/topology_mappings.data.j2
@@ -0,0 +1,24 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+    #
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+[network_topology]
+{% for host in all_hosts %}
+{% if host in slave_hosts %}
+{{host}}={{all_racks[loop.index-1]}}
+{{all_ipv4_ips[loop.index-1]}}={{all_racks[loop.index-1]}}
+{% endif %}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/hook.py
deleted file mode 100644
index 8bae9e6..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/hook.py
+++ /dev/null
@@ -1,37 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.libraries.script.hook import Hook
-from shared_initialization import link_configs
-from shared_initialization import setup_config
-from shared_initialization import setup_stack_symlinks
-
-class AfterInstallHook(Hook):
-
-  def hook(self, env):
-    import params
-
-    env.set_params(params)
-    setup_stack_symlinks(self.stroutfile)
-    setup_config()
-
-    link_configs(self.stroutfile)
-
-if __name__ == "__main__":
-  AfterInstallHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
deleted file mode 100644
index bf9d79b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
+++ /dev/null
@@ -1,108 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-
-from ambari_commons.constants import AMBARI_SUDO_BINARY
-from resource_management.libraries.script import Script
-from resource_management.libraries.script.script import get_config_lock_file
-from resource_management.libraries.functions import default
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import format_jvm_option
-from resource_management.libraries.functions.version import format_stack_version, get_major_version
-from string import lower
-
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-dfs_type = default("/commandParams/dfs_type", "")
-
-is_parallel_execution_enabled = int(default("/agentConfigParams/agent/parallel_execution", 0)) == 1
-host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
-
-sudo = AMBARI_SUDO_BINARY
-
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-stack_version_formatted = format_stack_version(stack_version_unformatted)
-major_stack_version = get_major_version(stack_version_formatted)
-
-# service name
-service_name = config['serviceName']
-
-# logsearch configuration
-logsearch_logfeeder_conf = "/etc/ambari-logsearch-logfeeder/conf"
-
-agent_cache_dir = config['hostLevelParams']['agentCacheDir']
-service_package_folder = config['commandParams']['service_package_folder']
-logsearch_service_name = service_name.lower().replace("_", "-")
-logsearch_config_file_name = 'input.config-' + logsearch_service_name + ".json"
-logsearch_config_file_path = agent_cache_dir + "/" + service_package_folder + "/templates/" + logsearch_config_file_name + ".j2"
-logsearch_config_file_exists = os.path.isfile(logsearch_config_file_path)
-
-# default hadoop params
-hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
-
-mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
-
-versioned_stack_root = '/usr/hdp/current'
-
-#security params
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-
-#java params
-java_home = config['hostLevelParams']['java_home']
-
-#hadoop params
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
-hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
-
-jsvc_path = "/usr/lib/bigtop-utils"
-
-hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
-namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
-namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
-
-jtnode_opt_newsize = "200m"
-jtnode_opt_maxnewsize = "200m"
-jtnode_heapsize =  "1024m"
-ttnode_heapsize = "1024m"
-
-dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
-mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
-
-#users and groups
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-user_group = config['configurations']['cluster-env']['user_group']
-
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-has_namenode = not len(namenode_host) == 0
-
-if has_namenode or dfs_type == 'HCFS':
-  hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-
-link_configs_lock_file = get_config_lock_file()
-stack_select_lock_file = os.path.join(tmp_dir, "stack_select_lock_file")
-
-upgrade_suspended = default("/roleParams/upgrade_suspended", False)

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py
deleted file mode 100644
index 67c3ba8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py
+++ /dev/null
@@ -1,132 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-
-import ambari_simplejson as json
-from ambari_jinja2 import Environment as JinjaEnvironment
-from resource_management.core.logger import Logger
-from resource_management.core.resources.system import Directory, File
-from resource_management.core.source import InlineTemplate, Template
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions.default import default
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.fcntl_based_process_lock import FcntlBasedProcessLock
-from resource_management.libraries.resources.xml_config import XmlConfig
-from resource_management.libraries.script import Script
-
-
-def setup_stack_symlinks(struct_out_file):
-  """
-  Invokes <stack-selector-tool> set all against a calculated fully-qualified, "normalized" version based on a
-  stack version, such as "2.3". This should always be called after a component has been
-  installed to ensure that all HDP pointers are correct. The stack upgrade logic does not
-  interact with this since it's done via a custom command and will not trigger this hook.
-  :return:
-  """
-  import params
-  if params.upgrade_suspended:
-    Logger.warning("Skipping running stack-selector-tool because there is a suspended upgrade")
-    return
-
-  if params.host_sys_prepped:
-    Logger.warning("Skipping running stack-selector-tool because this is a sys_prepped host. This may cause symlink pointers not to be created for HDP components installed later on top of an already sys_prepped host")
-    return
-
-  # get the packages which the stack-select tool should be used on
-  stack_packages = stack_select.get_packages(stack_select.PACKAGE_SCOPE_INSTALL)
-  if stack_packages is None:
-    return
-
-  json_version = load_version(struct_out_file)
-
-  if not json_version:
-    Logger.info("There is no advertised version for this component stored in {0}".format(struct_out_file))
-    return
-
-  # On parallel command execution this should be executed by a single process at a time.
-  with FcntlBasedProcessLock(params.stack_select_lock_file, enabled = params.is_parallel_execution_enabled, skip_fcntl_failures = True):
-    for package in stack_packages:
-      stack_select.select(package, json_version)
-
-
-def setup_config():
-  import params
-  stackversion = params.stack_version_unformatted
-  Logger.info("FS Type: {0}".format(params.dfs_type))
-
-  is_hadoop_conf_dir_present = False
-  if hasattr(params, "hadoop_conf_dir") and params.hadoop_conf_dir is not None and os.path.exists(params.hadoop_conf_dir):
-    is_hadoop_conf_dir_present = True
-  else:
-    Logger.warning("Parameter hadoop_conf_dir is missing or directory does not exist. This is expected if this host does not have any Hadoop components.")
-
-  if is_hadoop_conf_dir_present and (params.has_namenode or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS'):
-    # create core-site only if the hadoop config diretory exists
-    XmlConfig("core-site.xml",
-              conf_dir=params.hadoop_conf_dir,
-              configurations=params.config['configurations']['core-site'],
-              configuration_attributes=params.config['configuration_attributes']['core-site'],
-              owner=params.hdfs_user,
-              group=params.user_group,
-              only_if=format("ls {hadoop_conf_dir}"))
-
-  Directory(params.logsearch_logfeeder_conf,
-            mode=0755,
-            cd_access='a',
-            create_parents=True
-            )
-
-  if params.logsearch_config_file_exists:
-    File(format("{logsearch_logfeeder_conf}/" + params.logsearch_config_file_name),
-         content=Template(params.logsearch_config_file_path,extra_imports=[default])
-         )
-  else:
-    Logger.warning('No logsearch configuration exists at ' + params.logsearch_config_file_path)
-
-
-def load_version(struct_out_file):
-  """
-  Load version from file.  Made a separate method for testing
-  """
-  try:
-    with open(struct_out_file, 'r') as fp:
-      json_info = json.load(fp)
-
-    return json_info['version']
-  except (IOError, KeyError, TypeError):
-    return None
-
-
-def link_configs(struct_out_file):
-  """
-  Links configs, only on a fresh install of HDP-2.3 and higher
-  """
-  import params
-
-  json_version = load_version(struct_out_file)
-
-  if not json_version:
-    Logger.info("Could not load 'version' from {0}".format(struct_out_file))
-    return
-
-  # On parallel command execution this should be executed by a single process at a time.
-  with FcntlBasedProcessLock(params.link_configs_lock_file, enabled = params.is_parallel_execution_enabled, skip_fcntl_failures = True):
-    for k, v in conf_select.get_package_dirs().iteritems():
-      conf_select.convert_conf_directories_to_symlinks(k, json_version, v)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/files/changeToSecureUid.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/files/changeToSecureUid.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/files/changeToSecureUid.sh
deleted file mode 100644
index a6b8b77..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/files/changeToSecureUid.sh
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-username=$1
-directories=$2
-newUid=$3
-
-function find_available_uid() {
- for ((i=1001; i<=2000; i++))
- do
-   grep -q $i /etc/passwd
-   if [ "$?" -ne 0 ]
-   then
-    newUid=$i
-    break
-   fi
- done
-}
-
-if [ -z $2 ]; then
-  test $(id -u ${username} 2>/dev/null)
-  if [ $? -ne 1 ]; then
-   newUid=`id -u ${username}`
-  else
-   find_available_uid
-  fi
-  echo $newUid
-  exit 0
-else
-  find_available_uid
-fi
-
-if [ $newUid -eq 0 ]
-then
-  echo "Failed to find Uid between 1000 and 2000"
-  exit 1
-fi
-
-set -e
-dir_array=($(echo $directories | sed 's/,/\n/g'))
-old_uid=$(id -u $username)
-sudo_prefix="/var/lib/ambari-agent/ambari-sudo.sh -H -E"
-echo "Changing uid of $username from $old_uid to $newUid"
-echo "Changing directory permisions for ${dir_array[@]}"
-$sudo_prefix usermod -u $newUid $username && for dir in ${dir_array[@]} ; do ls $dir 2> /dev/null && echo "Changing permission for $dir" && $sudo_prefix chown -Rh $newUid $dir ; done
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/hook.py
deleted file mode 100644
index c34be0b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/hook.py
+++ /dev/null
@@ -1,36 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from shared_initialization import *
-
-class BeforeAnyHook(Hook):
-
-  def hook(self, env):
-    import params
-    env.set_params(params)
-
-    setup_users()
-    if params.has_namenode or params.dfs_type == 'HCFS':
-      setup_hadoop_env()
-    setup_java()
-
-if __name__ == "__main__":
-  BeforeAnyHook().execute()
-