You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by al...@apache.org on 2017/06/28 00:24:48 UTC

[50/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/after-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/after-INSTALL/scripts/hook.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/after-INSTALL/scripts/hook.py
new file mode 100755
index 0000000..ad7144c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/after-INSTALL/scripts/hook.py
@@ -0,0 +1,38 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.script.hook import Hook
+from shared_initialization import link_configs
+from shared_initialization import setup_config
+from shared_initialization import setup_iop_install_directory
+from resource_management.libraries.script import Script
+
+class AfterInstallHook(Hook):
+
+  def hook(self, env):
+    import params
+
+    env.set_params(params)
+    setup_iop_install_directory()
+    setup_config()
+
+    link_configs(self.stroutfile)
+
+if __name__ == "__main__":
+  AfterInstallHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/after-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/after-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/after-INSTALL/scripts/params.py
new file mode 100755
index 0000000..d3332db
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/after-INSTALL/scripts/params.py
@@ -0,0 +1,88 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import default
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format_jvm_option
+from resource_management.libraries.functions.version import format_stack_version
+
+from resource_management.core.system import System
+from ambari_commons.os_check import OSCheck
+
+
+config = Script.get_config()
+sudo = AMBARI_SUDO_BINARY
+
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+iop_stack_version = format_stack_version(stack_version_unformatted)
+
+# default hadoop params
+mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
+hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
+
+# IOP 4.0+ params
+if Script.is_stack_greater_or_equal("4.0"):
+  mapreduce_libs_path = "/usr/iop/current/hadoop-mapreduce-client/*"
+  # not supported in IOP 4.0+
+  hadoop_conf_empty_dir = None
+
+versioned_iop_root = '/usr/iop/current'
+
+#security params
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+#java params
+java_home = config['hostLevelParams']['java_home']
+
+#hadoop params
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
+hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
+
+jsvc_path = "/usr/lib/bigtop-utils"
+
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
+namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
+namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
+
+jtnode_opt_newsize = "200m"
+jtnode_opt_maxnewsize = "200m"
+jtnode_heapsize =  "1024m"
+ttnode_heapsize = "1024m"
+
+dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
+mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
+
+#users and groups
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+user_group = config['configurations']['cluster-env']['user_group']
+
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+has_namenode = not len(namenode_host) == 0
+
+if has_namenode:
+  hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/after-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/after-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/after-INSTALL/scripts/shared_initialization.py
new file mode 100755
index 0000000..0d68abc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/after-INSTALL/scripts/shared_initialization.py
@@ -0,0 +1,89 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+import shutil
+
+import ambari_simplejson as json
+from resource_management.core.logger import Logger
+from resource_management.core.resources.system import Execute
+from resource_management.core.shell import as_sudo
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.version import compare_versions
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.libraries.script import Script
+
+def setup_iop_install_directory():
+  # This is a name of marker file.
+  SELECT_ALL_PERFORMED_MARKER = "/var/lib/ambari-agent/data/iop-select-set-all.performed"
+
+  import params
+  if params.iop_stack_version != "" and compare_versions(params.iop_stack_version, '4.0') >= 0:
+    Execute(as_sudo(['touch', SELECT_ALL_PERFORMED_MARKER]) + ' ; ' +
+                   format('{sudo} /usr/bin/iop-select set all `ambari-python-wrap /usr/bin/iop-select versions | grep ^{stack_version_unformatted} | tail -1`'),
+            only_if=format('ls -d /usr/iop/{stack_version_unformatted}*'),   # If any IOP version is installed
+            not_if=format("test -f {SELECT_ALL_PERFORMED_MARKER}")           # Do that only once (otherwise we break rolling upgrade logic)
+    )
+
+def setup_config():
+  import params
+  if params.has_namenode:
+    # create core-site only if the hadoop config directory exists
+    XmlConfig("core-site.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations']['core-site'],
+              configuration_attributes=params.config['configuration_attributes']['core-site'],
+              owner=params.hdfs_user,
+              group=params.user_group,
+              only_if=format("ls {hadoop_conf_dir}"))
+
+
+def load_version(struct_out_file):
+  """
+  Load version from file.  Made a separate method for testing
+  """
+  json_version = None
+  try:
+    if os.path.exists(struct_out_file):
+      with open(struct_out_file, 'r') as fp:
+        json_info = json.load(fp)
+        json_version = json_info['version']
+  except:
+    pass
+
+  return json_version
+  
+
+def link_configs(struct_out_file):
+  """
+  Links configs, only on a fresh install of BigInsights-4.1 and higher
+  """
+
+  if not Script.is_stack_greater_or_equal("4.1"):
+    Logger.info("Can only link configs for BigInsights-4.1 and higher.")
+    return
+
+  json_version = load_version(struct_out_file)
+
+  if not json_version:
+    Logger.info("Could not load 'version' from {0}".format(struct_out_file))
+    return
+
+  for k, v in conf_select.get_package_dirs().iteritems():
+    conf_select.convert_conf_directories_to_symlinks(k, json_version, v)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/files/changeToSecureUid.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/files/changeToSecureUid.sh b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/files/changeToSecureUid.sh
new file mode 100755
index 0000000..af632b2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/files/changeToSecureUid.sh
@@ -0,0 +1,63 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+username=$1
+directories=$2
+newUid=$3
+
+function find_available_uid() {
+ for ((i=1001; i<=2000; i++))
+ do
+   grep -q $i /etc/passwd
+   if [ "$?" -ne 0 ]
+   then
+    newUid=$i
+    break
+   fi
+ done
+}
+
+if [ -z $2 ]; then
+  test $(id -u ${username} 2>/dev/null)
+  if [ $? -ne 1 ]; then
+   newUid=`id -u ${username}`
+  else
+   find_available_uid 
+  fi
+  echo $newUid
+  exit 0
+fi
+
+if [ $newUid -eq 0 ]
+then
+  echo "Failed to find Uid between 1000 and 2000"
+  exit 1
+fi
+
+
+set -e
+dir_array=($(echo $directories | sed 's/,/\n/g'))
+old_uid=$(id -u $username)
+sudo_prefix="/var/lib/ambari-agent/ambari-sudo.sh -H -E"
+echo "Changing uid of $username from $old_uid to $newUid"
+echo "Changing directory permisions for ${dir_array[@]}"
+$sudo_prefix usermod -u $newUid $username && for dir in ${dir_array[@]} ; do ls $dir 2> /dev/null && echo "Changing permission for $dir" && $sudo_prefix chown -Rh $newUid $dir ; done
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/hook.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/hook.py
new file mode 100755
index 0000000..1926f48
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/hook.py
@@ -0,0 +1,36 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from shared_initialization import *
+
+class BeforeAnyHook(Hook):
+
+  def hook(self, env):
+    import params
+    env.set_params(params)
+    
+    setup_users()
+    if params.has_namenode:
+      setup_hadoop_env()
+    setup_java()
+
+if __name__ == "__main__":
+  BeforeAnyHook().execute()
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/params.py
new file mode 100755
index 0000000..5ffd28c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/params.py
@@ -0,0 +1,226 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import collections
+import re
+import os
+
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import default
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format_jvm_option
+from resource_management.libraries.functions.is_empty import is_empty
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.version import compare_versions
+from ambari_commons.os_check import OSCheck
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
+jdk_name = default("/hostLevelParams/jdk_name", None)
+java_home = config['hostLevelParams']['java_home']
+java_version = int(config['hostLevelParams']['java_version'])
+jdk_location = config['hostLevelParams']['jdk_location']
+
+sudo = AMBARI_SUDO_BINARY
+
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+iop_stack_version = format_stack_version(stack_version_unformatted)
+
+restart_type = default("/commandParams/restart_type", "")
+version = default("/commandParams/version", None)
+# Handle upgrade and downgrade
+if (restart_type.lower() == "rolling_upgrade" or restart_type.lower() == "nonrolling_upgrade") and version:
+  iop_stack_version = format_stack_version(version)
+
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+
+# Some datanode settings
+dfs_dn_addr = default('/configurations/hdfs-site/dfs.datanode.address', None)
+dfs_dn_http_addr = default('/configurations/hdfs-site/dfs.datanode.http.address', None)
+dfs_dn_https_addr = default('/configurations/hdfs-site/dfs.datanode.https.address', None)
+dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
+secure_dn_ports_are_in_use = False
+
+def get_port(address):
+  """
+  Extracts port from the address like 0.0.0.0:1019
+  """
+  if address is None:
+    return None
+  m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address)
+  if m is not None:
+    return int(m.group(2))
+  else:
+    return None
+
+def is_secure_port(port):
+  """
+  Returns True if port is root-owned at *nix systems
+  """
+  if port is not None:
+    return port < 1024
+  else:
+    return False
+
+# hadoop default params
+mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+
+# upgrades would cause these directories to have a version instead of "current"
+# which would cause a lot of problems when writing out hadoop-env.sh; instead
+# force the use of "current" in the hook
+hadoop_home = stack_select.get_hadoop_dir("home", force_latest_on_upgrade=True)
+hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec", force_latest_on_upgrade=True)
+
+hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
+hadoop_secure_dn_user = hdfs_user
+hadoop_dir = "/etc/hadoop"
+versioned_iop_root = '/usr/iop/current'
+hadoop_java_io_tmpdir = os.path.join(tmp_dir, "hadoop_java_io_tmpdir")
+datanode_max_locked_memory = config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory']
+is_datanode_max_locked_memory_set = not is_empty(config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory'])
+
+
+if Script.is_stack_greater_or_equal("4.0"):
+  mapreduce_libs_path = "/usr/iop/current/hadoop-mapreduce-client/*"
+  
+  # not supported in IOP 4.0+
+  hadoop_conf_empty_dir = None
+
+  if not security_enabled:
+    hadoop_secure_dn_user = '""'
+  else:
+    dfs_dn_port = get_port(dfs_dn_addr)
+    dfs_dn_http_port = get_port(dfs_dn_http_addr)
+    dfs_dn_https_port = get_port(dfs_dn_https_addr)
+    # We try to avoid inability to start datanode as a plain user due to usage of root-owned ports
+    if dfs_http_policy == "HTTPS_ONLY":
+      secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_https_port)
+    elif dfs_http_policy == "HTTP_AND_HTTPS":
+      secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port) or is_secure_port(dfs_dn_https_port)
+    else:   # params.dfs_http_policy == "HTTP_ONLY" or not defined:
+      secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port)
+    if secure_dn_ports_are_in_use:
+      hadoop_secure_dn_user = hdfs_user
+    else:
+      hadoop_secure_dn_user = '""'
+
+
+#hadoop params
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
+hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
+
+jsvc_path = "/usr/lib/bigtop-utils"
+
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
+namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
+namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
+
+jtnode_opt_newsize = "200m"
+jtnode_opt_maxnewsize = "200m"
+jtnode_heapsize =  "1024m"
+ttnode_heapsize = "1024m"
+
+dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
+nfsgateway_heapsize = config['configurations']['hadoop-env']['nfsgateway_heapsize']
+mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
+hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
+
+#users and groups
+hbase_user = config['configurations']['hbase-env']['hbase_user']
+smoke_user =  config['configurations']['cluster-env']['smokeuser']
+gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
+gmond_user = config['configurations']['ganglia-env']["gmond_user"]
+tez_user = None; #config['configurations']['tez-env']["tez_user"]
+oozie_user = config['configurations']['oozie-env']["oozie_user"]
+falcon_user = config['configurations']['falcon-env']["falcon_user"]
+ranger_user = config['configurations']['ranger-env']["ranger_user"]
+
+user_group = config['configurations']['cluster-env']['user_group']
+
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+falcon_server_hosts = default("/clusterHostInfo/falcon_server_hosts", [])
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+
+has_namenode = not len(namenode_host) == 0
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+has_tez = False #'tez-site' in config['configurations']
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_oozie_server = not len(oozie_servers) == 0
+has_falcon_server_hosts = not len(falcon_server_hosts) == 0
+has_ranger_admin = not len(ranger_admin_hosts) == 0
+
+
+if has_namenode:
+  hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
+
+hbase_tmp_dir = "/tmp/hbase-hbase"
+
+proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
+ranger_group = config['configurations']['ranger-env']['ranger_group']
+dfs_cluster_administrators_group = config['configurations']['hdfs-site']["dfs.cluster.administrators"]
+
+ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
+
+smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
+if has_hbase_masters:
+  hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
+#repo params
+repo_info = config['hostLevelParams']['repo_info']
+service_repo_info = default("/hostLevelParams/service_repo_info",None)
+
+user_to_groups_dict = collections.defaultdict(lambda:[user_group])
+user_to_groups_dict[smoke_user] = [proxyuser_group]
+if has_ganglia_server:
+  user_to_groups_dict[gmond_user] = [gmond_user]
+  user_to_groups_dict[gmetad_user] = [gmetad_user]
+if has_tez:
+  user_to_groups_dict[tez_user] = [proxyuser_group]
+if has_oozie_server:
+  user_to_groups_dict[oozie_user] = [proxyuser_group]
+if has_falcon_server_hosts:
+  user_to_groups_dict[falcon_user] = [proxyuser_group]  
+if has_ranger_admin:
+  user_to_groups_dict[ranger_user] = [ranger_group]
+
+user_to_gid_dict = collections.defaultdict(lambda:user_group)
+
+user_list = json.loads(config['hostLevelParams']['user_list'])
+group_list = json.loads(config['hostLevelParams']['group_list'])
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+
+override_uid = str(default("/configurations/cluster-env/override_uid", "true")).lower()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/shared_initialization.py
new file mode 100755
index 0000000..1c98fb8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/shared_initialization.py
@@ -0,0 +1,242 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import re
+import getpass
+from copy import copy
+from resource_management.libraries.functions.version import compare_versions
+from resource_management import *
+
+
+def setup_users():
+  """
+  Creates users before cluster installation
+  """
+  import params
+
+  if not params.host_sys_prepped and not params.ignore_groupsusers_create:
+    for group in params.group_list:
+      Group(group,
+      )
+
+
+    for user in params.user_list:
+      if params.override_uid == "true":
+        User(user,
+            uid = get_uid(user),
+            gid = params.user_to_gid_dict[user],
+            groups = params.user_to_groups_dict[user],
+        )
+      else:
+        User(user,
+            gid = params.user_to_gid_dict[user],
+            groups = params.user_to_groups_dict[user],
+        )
+
+    if params.override_uid == "true":
+      set_uid(params.smoke_user, params.smoke_user_dirs)
+    else:
+      Logger.info('Skipping setting uid for smoke user as host is sys prepped')
+  else:
+    Logger.info('Skipping creation of User and Group as host is sys prepped or ignore_groupsusers_create flag is on')
+    pass
+
+
+  if params.has_hbase_masters:
+    Directory (params.hbase_tmp_dir,
+               owner = params.hbase_user,
+               mode=0775,
+               create_parents = True,
+               cd_access="a",
+    )
+    if not params.host_sys_prepped and params.override_uid == "true":
+      set_uid(params.hbase_user, params.hbase_user_dirs)
+    else:
+      Logger.info('Skipping setting uid for hbase user as host is sys prepped')      
+      pass
+
+  if not params.host_sys_prepped:
+    if params.has_namenode:
+      create_dfs_cluster_admins()
+  else:
+    Logger.info('Skipping setting dfs cluster admin as host is sys prepped')
+
+
+def create_dfs_cluster_admins():
+  """
+  dfs.cluster.administrators support format <comma-delimited list of usernames><space><comma-delimited list of group names>
+  """
+  import params
+
+  groups_list = create_users_and_groups(params.dfs_cluster_administrators_group)
+
+  User(params.hdfs_user,
+    groups = params.user_to_groups_dict[params.hdfs_user] + groups_list,
+    ignore_failures = params.ignore_groupsusers_create
+  )
+def create_users_and_groups(user_and_groups):
+
+  import params
+
+  parts = re.split('\s', user_and_groups)
+  if len(parts) == 1:
+    parts.append("")
+
+  users_list = parts[0].split(",") if parts[0] else []
+  groups_list = parts[1].split(",") if parts[1] else []
+
+  if users_list:
+    User(users_list,
+         ignore_failures = params.ignore_groupsusers_create
+    )
+
+  if groups_list:
+    Group(copy(groups_list),
+          ignore_failures = params.ignore_groupsusers_create
+    )
+  return groups_list
+    
+def set_uid(user, user_dirs):
+  """
+  user_dirs - comma separated directories
+  """
+  import params
+
+  File(format("{tmp_dir}/changeUid.sh"),
+       content=StaticFile("changeToSecureUid.sh"),
+       mode=0555)
+  ignore_groupsusers_create_str = str(params.ignore_groupsusers_create).lower()
+  uid = get_uid(user)
+  Execute(format("{tmp_dir}/changeUid.sh {user} {user_dirs} {uid}"),
+          not_if = format("(test $(id -u {user}) -gt 1000) || ({ignore_groupsusers_create_str})"))
+
+def get_uid(user):
+  import params
+  import commands
+  user_str = str(user) + "_uid"
+  service_env = [ serviceEnv for serviceEnv in params.config['configurations'] if user_str in params.config['configurations'][serviceEnv]]
+  
+  if service_env and params.config['configurations'][service_env[0]][user_str]:
+    service_env_str = str(service_env[0])
+    uid = params.config['configurations'][service_env_str][user_str]
+    if len(service_env) > 1:
+      Logger.warning("Multiple values found for %s, using %s"  % (user_str, uid))
+    return uid 
+  else:
+    if user == params.smoke_user:
+      return 0
+    File(format("{tmp_dir}/changeUid.sh"),
+       content=StaticFile("changeToSecureUid.sh"),
+       mode=0555)
+    ignore_groupsusers_create_str = str(params.ignore_groupsusers_create).lower()
+    newUid=commands.getoutput(format("{tmp_dir}/changeUid.sh {user}"))
+    return newUid
+    
+def setup_hadoop_env():
+  import params
+  if params.has_namenode:
+    if params.security_enabled:
+      tc_owner = "root"
+    else:
+      tc_owner = params.hdfs_user
+
+    Directory(params.hadoop_dir, mode=0755)
+
+    # IOP < 4.0 used a conf -> conf.empty symlink for /etc/hadoop/
+    if Script.is_stack_less_than("4.0"):
+      Directory(params.hadoop_conf_empty_dir,
+              create_parents = True,
+              owner = 'root',
+              group = params.user_group
+      )
+      Link(params.hadoop_conf_dir,
+         to=params.hadoop_conf_empty_dir,
+         not_if=format("ls {hadoop_conf_dir}")
+      )
+      
+    # write out hadoop-env.sh, but only if the directory exists
+    if os.path.exists(params.hadoop_conf_dir):
+      File(os.path.join(params.hadoop_conf_dir, 'hadoop-env.sh'),
+         owner=tc_owner, group=params.user_group,
+         content=InlineTemplate(params.hadoop_env_sh_template)
+      )
+      
+    # Create tmp dir for java.io.tmpdir
+    # Handle a situation when /tmp is set to noexec
+    Directory(params.hadoop_java_io_tmpdir,
+              owner=params.hdfs_user,
+              group=params.user_group,
+              mode=0777
+    )
+  
+def setup_java():
+  """
+  Installs jdk using specific params, that comes from ambari-server
+  """
+  import params
+
+  java_exec = format("{java_home}/bin/java")
+
+  if not os.path.isfile(java_exec):
+
+    jdk_curl_target = format("{tmp_dir}/{jdk_name}")
+    java_dir = os.path.dirname(params.java_home)
+    tmp_java_dir = format("{tmp_dir}/jdk")
+
+    if not params.jdk_name:
+      return
+
+    Directory(params.artifact_dir,
+              create_parents = True,
+              )
+
+    File(jdk_curl_target,
+         content = DownloadSource(format("{jdk_location}/{jdk_name}")),
+         not_if = format("test -f {jdk_curl_target}")
+    )
+
+    if params.jdk_name.endswith(".bin"):
+      chmod_cmd = ("chmod", "+x", jdk_curl_target)
+      install_cmd = format("mkdir -p {tmp_java_dir} && cd {tmp_java_dir} && echo A | {jdk_curl_target} -noregister && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
+    elif params.jdk_name.endswith(".gz"):
+      chmod_cmd = ("chmod","a+x", java_dir)
+      install_cmd = format("mkdir -p {tmp_java_dir} && cd {tmp_java_dir} && tar -xf {jdk_curl_target} && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
+
+    Directory(java_dir
+    )
+
+    Execute(chmod_cmd,
+            sudo = True,
+            )
+
+    Execute(install_cmd,
+            )
+
+    File(format("{java_home}/bin/java"),
+         mode=0755,
+         cd_access="a",
+         )
+
+    Execute(("chgrp","-R", params.user_group, params.java_home),
+            sudo = True,
+            )
+    Execute(("chown","-R", getpass.getuser(), params.java_home),
+            sudo = True,
+            )

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/hook.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/hook.py
new file mode 100755
index 0000000..ce17776
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/hook.py
@@ -0,0 +1,37 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from shared_initialization import *
+from repo_initialization import *
+
+class BeforeInstallHook(Hook):
+
+  def hook(self, env):
+    import params
+
+    self.run_custom_hook('before-ANY')
+    env.set_params(params)
+    
+    install_repos()
+    install_packages()
+
+if __name__ == "__main__":
+  BeforeInstallHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/params.py
new file mode 100755
index 0000000..18ce5af
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/params.py
@@ -0,0 +1,111 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+from resource_management.libraries.functions.version import format_stack_version, compare_versions
+from resource_management.core.system import System
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import default, format
+
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+sudo = AMBARI_SUDO_BINARY
+
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+iop_stack_version = format_stack_version(stack_version_unformatted)
+
+#users and groups
+hbase_user = config['configurations']['hbase-env']['hbase_user']
+smoke_user =  config['configurations']['cluster-env']['smokeuser']
+gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
+gmond_user = config['configurations']['ganglia-env']["gmond_user"]
+tez_user = None #config['configurations']['tez-env']["tez_user"]
+
+user_group = config['configurations']['cluster-env']['user_group']
+proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
+
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+
+# repo templates
+repo_rhel_suse =  config['configurations']['cluster-env']['repo_suse_rhel_template']
+repo_ubuntu =  config['configurations']['cluster-env']['repo_ubuntu_template']
+
+#hosts
+hostname = config["hostname"]
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+rm_host = default("/clusterHostInfo/rm_host", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
+hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+hs_host = default("/clusterHostInfo/hs_host", [])
+jtnode_host = default("/clusterHostInfo/jtnode_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+storm_server_hosts = default("/clusterHostInfo/nimbus_hosts", [])
+falcon_host =  default('/clusterHostInfo/falcon_server_hosts', [])
+
+has_sqoop_client = 'sqoop-env' in config['configurations']
+has_namenode = not len(namenode_host) == 0
+has_hs = not len(hs_host) == 0
+has_resourcemanager = not len(rm_host) == 0
+has_slaves = not len(slave_hosts) == 0
+has_oozie_server = not len(oozie_servers)  == 0
+has_hcat_server_host = not len(hcat_server_hosts)  == 0
+has_hive_server_host = not len(hive_server_host)  == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_zk_host = not len(zk_hosts) == 0
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+has_storm_server = not len(storm_server_hosts) == 0
+has_falcon_server = not len(falcon_host) == 0
+has_tez = False #'tez-site' in config['configurations']
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+if has_ganglia_server:
+  ganglia_server_host = ganglia_server_hosts[0]
+
+hbase_tmp_dir = "/tmp/hbase-hbase"
+
+#security params
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+#java params
+java_home = config['hostLevelParams']['java_home']
+artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
+jdk_name = default("/hostLevelParams/jdk_name", None) # None when jdk is already installed by user
+jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
+jce_location = config['hostLevelParams']['jdk_location']
+jdk_location = config['hostLevelParams']['jdk_location']
+ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+
+smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
+if has_hbase_masters:
+  hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
+#repo params
+repo_info = config['hostLevelParams']['repo_info']
+service_repo_info = default("/hostLevelParams/service_repo_info",None)
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/repo_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/repo_initialization.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/repo_initialization.py
new file mode 100755
index 0000000..1b0f5c6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/repo_initialization.py
@@ -0,0 +1,90 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management import *
+from ambari_commons.os_check import OSCheck
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+
+# components_lits = repoName + postfix
+_UBUNTU_REPO_COMPONENTS_POSTFIX = ["main"]
+REPO_FILE_NAME_PREFIX = 'IOP-'
+STACK_TO_ROOT_FOLDER = {"IOP": "/usr/iop", "BIGINSIGHTS":"/usr/iop"}
+
+def _alter_repo(action, repo_string, repo_template):
+  """
+  @param action: "delete" or "create"
+  @param repo_string: e.g. "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0._\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]"
+  """
+  repo_dicts = json.loads(repo_string)
+
+  if not isinstance(repo_dicts, list):
+    repo_dicts = [repo_dicts]
+
+  for repo in repo_dicts:
+    if not 'baseUrl' in repo:
+      repo['baseUrl'] = None
+    if not 'mirrorsList' in repo:
+      repo['mirrorsList'] = None
+    
+    ubuntu_components = [ repo['repoName'] ] + _UBUNTU_REPO_COMPONENTS_POSTFIX
+    
+    Repository(repo['repoId'],
+               action = action,
+               base_url = repo['baseUrl'],
+               mirror_list = repo['mirrorsList'],
+               repo_file_name = repo['repoName'],
+               repo_template = repo_template,
+               components = ubuntu_components, # ubuntu specific
+    )
+
+    if action == "create":
+      #Attempt to clean cache against the given repo
+      repo_id=repo['repoId']
+      print "Clean cache against " + repo_id + "; file:" + repo['repoName']
+      current_repo_ids = []
+      current_repo_files = set() 
+      
+      if OSCheck.is_ubuntu_family():
+        current_repo_files.add("base")
+        current_repo_files.add(repo['repoName'])
+      elif OSCheck.is_suse_family():
+        current_repo_ids.append("base")
+        current_repo_ids.append(repo_id)
+      else:  
+        current_repo_ids.append(repo_id)
+      
+      Repository(repo_id,
+                 action = "clearcache",
+                 base_url = repo['baseUrl'],
+                 mirror_list = repo['mirrorsList'],
+                 repo_file_name = repo['repoName'],
+                 repo_template = repo_template,
+                 components = ubuntu_components,  # ubuntu specific
+                 use_repos=list(current_repo_files) if OSCheck.is_ubuntu_family() else current_repo_ids,
+                 skip_repos=["*"] if OSCheck.is_redhat_family() else []
+      )      
+
+def install_repos():
+  import params
+  if params.host_sys_prepped:
+    return
+
+  template = params.repo_rhel_suse if OSCheck.is_suse_family() or OSCheck.is_redhat_family() else params.repo_ubuntu
+  _alter_repo("create", params.repo_info, template)
+  if params.service_repo_info:
+    _alter_repo("create", params.service_repo_info, template)

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/shared_initialization.py
new file mode 100755
index 0000000..08748c9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/shared_initialization.py
@@ -0,0 +1,34 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from resource_management import *
+
+
+def install_packages():
+  import params
+  if params.host_sys_prepped:
+    return
+
+  packages = ['unzip', 'curl']
+  if params.iop_stack_version != "" and compare_versions(params.iop_stack_version, '4.0') >= 0:
+    packages.append('iop-select')
+  Package(packages)
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-RESTART/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-RESTART/scripts/hook.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-RESTART/scripts/hook.py
new file mode 100755
index 0000000..14b9d99
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-RESTART/scripts/hook.py
@@ -0,0 +1,29 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+class BeforeRestartHook(Hook):
+
+  def hook(self, env):
+    self.run_custom_hook('before-START')
+
+if __name__ == "__main__":
+  BeforeRestartHook().execute()
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/checkForFormat.sh
new file mode 100755
index 0000000..68aa96d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/checkForFormat.sh
@@ -0,0 +1,65 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export hdfs_user=$1
+shift
+export conf_dir=$1
+shift
+export bin_dir=$1
+shift
+export mark_dir=$1
+shift
+export name_dirs=$*
+
+export EXIT_CODE=0
+export command="namenode -format"
+export list_of_non_empty_dirs=""
+
+mark_file=/var/run/hadoop/hdfs/namenode-formatted
+if [[ -f ${mark_file} ]] ; then
+  /var/lib/ambari-agent/ambari-sudo.sh rm -f ${mark_file}
+  /var/lib/ambari-agent/ambari-sudo.sh mkdir -p ${mark_dir}
+fi
+
+if [[ ! -d $mark_dir ]] ; then
+  for dir in `echo $name_dirs | tr ',' ' '` ; do
+    echo "NameNode Dirname = $dir"
+    cmd="ls $dir | wc -l  | grep -q ^0$"
+    eval $cmd
+    if [[ $? -ne 0 ]] ; then
+      (( EXIT_CODE = $EXIT_CODE + 1 ))
+      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
+    fi
+  done
+
+  if [[ $EXIT_CODE == 0 ]] ; then
+    /var/lib/ambari-agent/ambari-sudo.sh su ${hdfs_user} - -s /bin/bash -c "export PATH=$PATH:$bin_dir ; yes Y | hdfs --config ${conf_dir} ${command}"
+    (( EXIT_CODE = $EXIT_CODE | $? ))
+  else
+    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
+  fi
+else
+  echo "${mark_dir} exists. Namenode DFS already formatted"
+fi
+
+exit $EXIT_CODE
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/fast-hdfs-resource.jar
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/fast-hdfs-resource.jar b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/fast-hdfs-resource.jar
new file mode 100755
index 0000000..6ea0873
Binary files /dev/null and b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/fast-hdfs-resource.jar differ

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/task-log4j.properties
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/task-log4j.properties b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/task-log4j.properties
new file mode 100755
index 0000000..7e12962
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/task-log4j.properties
@@ -0,0 +1,134 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+#
+# Job Summary Appender 
+#
+# Use following logger to send summary to separate file defined by 
+# hadoop.mapreduce.jobsummary.log.file rolled daily:
+# hadoop.mapreduce.jobsummary.logger=INFO,JSA
+# 
+hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this 
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+# Rolling File Appender
+#
+
+#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+#log4j.appender.RFA.MaxFileSize=1MB
+#log4j.appender.RFA.MaxBackupIndex=30
+
+#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+# Custom Logging levels
+
+hadoop.metrics.log.level=INFO
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Null Appender
+# Trap security logger on the hadoop client side
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+ 
+# Removes "deprecated" messages
+log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/topology_script.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/topology_script.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/topology_script.py
new file mode 100755
index 0000000..0f7a55c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/topology_script.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import sys, os
+from string import join
+import ConfigParser
+
+
+DEFAULT_RACK = "/default-rack"
+DATA_FILE_NAME =  os.path.dirname(os.path.abspath(__file__)) + "/topology_mappings.data"
+SECTION_NAME = "network_topology"
+
+class TopologyScript():
+
+  def load_rack_map(self):
+    try:
+      #RACK_MAP contains both host name vs rack and ip vs rack mappings
+      mappings = ConfigParser.ConfigParser()
+      mappings.read(DATA_FILE_NAME)
+      return dict(mappings.items(SECTION_NAME))
+    except ConfigParser.NoSectionError:
+      return {}
+
+  def get_racks(self, rack_map, args):
+    if len(args) == 1:
+      return DEFAULT_RACK
+    else:
+      return join([self.lookup_by_hostname_or_ip(input_argument, rack_map) for input_argument in args[1:]],)
+
+  def lookup_by_hostname_or_ip(self, hostname_or_ip, rack_map):
+    #try looking up by hostname
+    rack = rack_map.get(hostname_or_ip)
+    if rack is not None:
+      return rack
+    #try looking up by ip
+    rack = rack_map.get(self.extract_ip(hostname_or_ip))
+    #try by localhost since hadoop could be passing in 127.0.0.1 which might not be mapped
+    return rack if rack is not None else rack_map.get("localhost.localdomain", DEFAULT_RACK)
+
+  #strips out port and slashes in case hadoop passes in something like 127.0.0.1/127.0.0.1:50010
+  def extract_ip(self, container_string):
+    return container_string.split("/")[0].split(":")[0]
+
+  def execute(self, args):
+    rack_map = self.load_rack_map()
+    rack = self.get_racks(rack_map, args)
+    print rack
+
+if __name__ == "__main__":
+  TopologyScript().execute(sys.argv)

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/hook.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/hook.py
new file mode 100755
index 0000000..c4854ae
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/hook.py
@@ -0,0 +1,40 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from rack_awareness import create_topology_script_and_mapping
+from shared_initialization import setup_hadoop, setup_configs, create_javahome_symlink
+
+
+class BeforeStartHook(Hook):
+
+  def hook(self, env):
+    import params
+
+    self.run_custom_hook('before-ANY')
+    env.set_params(params)
+
+    setup_hadoop()
+    setup_configs()
+    create_javahome_symlink()
+    create_topology_script_and_mapping()	
+
+if __name__ == "__main__":
+  BeforeStartHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/params.py
new file mode 100755
index 0000000..5c84a05
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/params.py
@@ -0,0 +1,211 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import default
+from resource_management.libraries.functions import format_jvm_option
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.version import format_stack_version, compare_versions
+from ambari_commons.os_check import OSCheck
+from resource_management.libraries.script.script import Script
+
+config = Script.get_config()
+
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+iop_stack_version = format_stack_version(stack_version_unformatted)
+
+# hadoop default params
+mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+
+hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
+hadoop_lib_home = stack_select.get_hadoop_dir("lib")
+hadoop_bin = stack_select.get_hadoop_dir("sbin")
+hadoop_home = '/usr'
+create_lib_snappy_symlinks = True
+
+# IOP 4.0+ params
+if Script.is_stack_greater_or_equal("4.0"):
+  mapreduce_libs_path = "/usr/iop/current/hadoop-mapreduce-client/*"
+  hadoop_home = stack_select.get_hadoop_dir("home")
+  create_lib_snappy_symlinks = False
+
+
+current_service = config['serviceName']
+
+#security params
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+#users and groups
+has_hadoop_env = 'hadoop-env' in config['configurations']
+mapred_user = config['configurations']['mapred-env']['mapred_user']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+yarn_user = config['configurations']['yarn-env']['yarn_user']
+
+user_group = config['configurations']['cluster-env']['user_group']
+
+#hosts
+hostname = config["hostname"]
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+rm_host = default("/clusterHostInfo/rm_host", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
+hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+hs_host = default("/clusterHostInfo/hs_host", [])
+jtnode_host = default("/clusterHostInfo/jtnode_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
+
+has_namenode = not len(namenode_host) == 0
+has_resourcemanager = not len(rm_host) == 0
+has_slaves = not len(slave_hosts) == 0
+has_oozie_server = not len(oozie_servers)  == 0
+has_hcat_server_host = not len(hcat_server_hosts)  == 0
+has_hive_server_host = not len(hive_server_host)  == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_zk_host = not len(zk_hosts) == 0
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+has_metric_collector = not len(ams_collector_hosts) == 0
+
+
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+if has_ganglia_server:
+  ganglia_server_host = ganglia_server_hosts[0]
+if has_metric_collector:
+  if 'cluster-env' in config['configurations'] and \
+      'metrics_collector_vip_host' in config['configurations']['cluster-env']:
+    metric_collector_host = config['configurations']['cluster-env']['metrics_collector_vip_host']
+  else:
+    metric_collector_host = ams_collector_hosts[0]
+  if 'cluster-env' in config['configurations'] and \
+      'metrics_collector_vip_port' in config['configurations']['cluster-env']:
+    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
+  else:
+    metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
+    if metric_collector_web_address.find(':') != -1:
+      metric_collector_port = metric_collector_web_address.split(':')[1]
+    else:
+      metric_collector_port = '6188'
+  pass
+metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
+metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 60)
+
+#hadoop params
+
+if has_namenode:
+  hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
+  hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
+  task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
+  
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
+
+
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+hbase_tmp_dir = "/tmp/hbase-hbase"
+
+#db params
+server_db_name = config['hostLevelParams']['db_name']
+db_driver_filename = config['hostLevelParams']['db_driver_filename']
+oracle_driver_url = config['hostLevelParams']['oracle_jdbc_url']
+mysql_driver_url = config['hostLevelParams']['mysql_jdbc_url']
+ambari_server_resources = config['hostLevelParams']['jdk_location']
+oracle_driver_symlink_url = format("{ambari_server_resources}oracle-jdbc-driver.jar")
+mysql_driver_symlink_url = format("{ambari_server_resources}mysql-jdbc-driver.jar")
+
+ambari_db_rca_url = config['hostLevelParams']['ambari_db_rca_url'][0]
+ambari_db_rca_driver = config['hostLevelParams']['ambari_db_rca_driver'][0]
+ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username'][0]
+ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password'][0]
+
+if has_namenode and 'rca_enabled' in config['configurations']['hadoop-env']:
+  rca_enabled =  config['configurations']['hadoop-env']['rca_enabled']
+else:
+  rca_enabled = False
+rca_disabled_prefix = "###"
+if rca_enabled == True:
+  rca_prefix = ""
+else:
+  rca_prefix = rca_disabled_prefix
+
+#hadoop-env.sh
+java_home = config['hostLevelParams']['java_home']
+
+jsvc_path = "/usr/lib/bigtop-utils"
+
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
+namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
+namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
+
+jtnode_opt_newsize = "200m"
+jtnode_opt_maxnewsize = "200m"
+jtnode_heapsize =  "1024m"
+ttnode_heapsize = "1024m"
+
+dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
+mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
+
+#log4j.properties
+
+yarn_log_dir_prefix = default("/configurations/yarn-env/yarn_log_dir_prefix","/var/log/hadoop-yarn")
+
+dfs_hosts = default('/configurations/hdfs-site/dfs.hosts', None)
+
+#log4j.properties
+if (('hdfs-log4j' in config['configurations']) and ('content' in config['configurations']['hdfs-log4j'])):
+  log4j_props = config['configurations']['hdfs-log4j']['content']
+  if (('yarn-log4j' in config['configurations']) and ('content' in config['configurations']['yarn-log4j'])):
+    log4j_props += config['configurations']['yarn-log4j']['content']
+else:
+  log4j_props = None
+
+refresh_topology = False
+command_params = config["commandParams"] if "commandParams" in config else None
+if command_params is not None:
+  refresh_topology = bool(command_params["refresh_topology"]) if "refresh_topology" in command_params else False
+  
+ambari_libs_dir = "/var/lib/ambari-agent/lib"
+is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
+#host info
+all_hosts = default("/clusterHostInfo/all_hosts", [])
+all_racks = default("/clusterHostInfo/all_racks", [])
+all_ipv4_ips = default("/clusterHostInfo/all_ipv4_ips", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+
+#topology files
+net_topology_script_file_path = "/etc/hadoop/conf/topology_script.py"
+net_topology_script_dir = os.path.dirname(net_topology_script_file_path)
+net_topology_mapping_data_file_name = 'topology_mappings.data'
+net_topology_mapping_data_file_path = os.path.join(net_topology_script_dir, net_topology_mapping_data_file_name)
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/rack_awareness.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/rack_awareness.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/rack_awareness.py
new file mode 100755
index 0000000..9b28d33
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/rack_awareness.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from resource_management.core.resources import File, Directory
+from resource_management.core.source import StaticFile, Template
+from resource_management.libraries.functions import format
+
+import os
+
+def create_topology_mapping():
+  import params
+  
+  path=params.net_topology_mapping_data_file_path 
+  parent_dir=os.path.dirname(path) 
+  # only create the parent directory and set its permission if it does not exist
+  if not os.path.exists(parent_dir): 
+    Directory(parent_dir, 
+              create_parents = True,
+              owner=params.hdfs_user, 
+              group=params.user_group) 
+
+  # placing the mappings file in the same folder where the topology script is located
+  File(path,
+       content=Template("topology_mappings.data.j2"),
+       owner=params.hdfs_user,
+       group=params.user_group,
+       # if there is no hadoop components, don't create the script
+       only_if=format("test -d {net_topology_script_dir}"),
+  )
+
+def create_topology_script():
+  import params
+
+  path=params.net_topology_script_file_path
+  parent_dir=os.path.dirname(path) 
+  # only create the parent directory and set its permission if it does not exist 
+  if not os.path.exists(parent_dir): 
+    Directory(parent_dir, 
+              create_parents = True,
+              owner=params.hdfs_user, 
+              group=params.user_group) 
+
+  # installing the topology script to the specified location
+  File(path,
+       content=StaticFile('topology_script.py'),
+       mode=0755,
+       only_if=format("test -d {net_topology_script_dir}"),
+  )
+
+  
+def create_topology_script_and_mapping():
+  import params
+  if params.has_hadoop_env:
+    create_topology_mapping()
+    create_topology_script()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/shared_initialization.py
new file mode 100755
index 0000000..3922231
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/shared_initialization.py
@@ -0,0 +1,152 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+from resource_management.libraries.providers.hdfs_resource import WebHDFSUtil
+
+from resource_management import *
+
+def setup_hadoop():
+  """
+  Setup hadoop files and directories
+  """
+  import params
+
+  Execute(("setenforce","0"),
+          only_if="test -f /selinux/enforce",
+          not_if="(! which getenforce ) || (which getenforce && getenforce | grep -q Disabled)",
+          sudo=True,
+  )
+
+  #directories
+  if params.has_namenode:
+    Directory(params.hdfs_log_dir_prefix,
+              create_parents = True,
+              owner='root',
+              group=params.user_group,
+              mode=0775,
+              cd_access='a',
+    )
+    Directory(params.hadoop_pid_dir_prefix,
+              create_parents = True,
+              owner='root',
+              group='root',
+              cd_access='a',
+    )
+    Directory(params.hadoop_tmp_dir,
+              create_parents = True,
+              owner=params.hdfs_user,
+              cd_access='a',
+              )
+  #files
+    if params.security_enabled:
+      tc_owner = "root"
+    else:
+      tc_owner = params.hdfs_user
+      
+    # if WebHDFS is not enabled we need this jar to create hadoop folders.
+    if not WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
+      # for source-code of jar goto contrib/fast-hdfs-resource
+      File(format("{ambari_libs_dir}/fast-hdfs-resource.jar"),
+           mode=0644,
+           content=StaticFile("fast-hdfs-resource.jar")
+      )
+      
+    if os.path.exists(params.hadoop_conf_dir):
+      File(os.path.join(params.hadoop_conf_dir, 'commons-logging.properties'),
+           mode=0644,
+           group=params.user_group,
+           owner=tc_owner,
+           content=Template('commons-logging.properties.j2')
+      )
+
+      health_check_template_name = "health_check"
+      File(os.path.join(params.hadoop_conf_dir, health_check_template_name),
+           owner=tc_owner,
+           content=Template(health_check_template_name + ".j2")
+      )
+
+      log4j_filename = os.path.join(params.hadoop_conf_dir, "log4j.properties")
+      if (params.log4j_props != None):
+        File(log4j_filename,
+             mode=0644,
+             group=params.user_group,
+             owner=params.hdfs_user,
+             content=params.log4j_props
+        )
+      elif (os.path.exists(format("{params.hadoop_conf_dir}/log4j.properties"))):
+        File(log4j_filename,
+             mode=0644,
+             group=params.user_group,
+             owner=params.hdfs_user,
+        )
+
+      File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"),
+           owner=params.hdfs_user,
+           content=Template("hadoop-metrics2.properties.j2")
+      )
+
+
+def setup_configs():
+  """
+  Creates configs for services HDFS mapred
+  """
+  import params
+
+  if params.has_namenode:
+    if os.path.exists(params.hadoop_conf_dir):
+      File(params.task_log4j_properties_location,
+           content=StaticFile("task-log4j.properties"),
+           mode=0755
+      )
+
+    if os.path.exists(os.path.join(params.hadoop_conf_dir, 'configuration.xsl')):
+      File(os.path.join(params.hadoop_conf_dir, 'configuration.xsl'),
+           owner=params.hdfs_user,
+           group=params.user_group
+      )
+    if os.path.exists(os.path.join(params.hadoop_conf_dir, 'masters')):
+      File(os.path.join(params.hadoop_conf_dir, 'masters'),
+                owner=params.hdfs_user,
+                group=params.user_group
+      )
+
+  generate_include_file()
+
+
+def generate_include_file():
+  import params
+
+  if params.has_namenode and params.dfs_hosts and params.has_slaves:
+    include_hosts_list = params.slave_hosts
+    File(params.dfs_hosts,
+         content=Template("include_hosts_list.j2"),
+         owner=params.hdfs_user,
+         group=params.user_group
+    )
+
+def create_javahome_symlink():
+  if os.path.exists("/usr/jdk/jdk1.6.0_31") and not os.path.exists("/usr/jdk64/jdk1.6.0_31"):
+    Directory("/usr/jdk64/",
+         create_parents = True,
+    )
+    Link("/usr/jdk/jdk1.6.0_31",
+         to="/usr/jdk64/jdk1.6.0_31",
+    )
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/commons-logging.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/commons-logging.properties.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/commons-logging.properties.j2
new file mode 100755
index 0000000..2197ba5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/commons-logging.properties.j2
@@ -0,0 +1,43 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+#Logging Implementation
+
+#Log4J
+org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger
+
+#JDK Logger
+#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/exclude_hosts_list.j2
new file mode 100755
index 0000000..1adba80
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/exclude_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in hdfs_exclude_file %}
+{{host}}
+{% endfor %}