You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by al...@apache.org on 2017/06/28 00:24:11 UTC

[13/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/nodemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/nodemanager.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/nodemanager.py
new file mode 100755
index 0000000..d185b7e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/nodemanager.py
@@ -0,0 +1,144 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import nodemanager_upgrade
+
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+
+from yarn import yarn
+from service import service
+
+class Nodemanager(Script):
+
+  def get_component_name(self):
+    return "hadoop-yarn-nodemanager"
+
+  def install(self, env):
+    self.install_packages(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    yarn(name="nodemanager")
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing NodeManager Stack Upgrade pre-restart")
+    import params
+    env.set_params(params)
+
+    if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-yarn-nodemanager", params.version)
+      #Execute(format("stack-select set hadoop-yarn-nodemanager {version}"))
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    service('nodemanager',action='start')
+
+  def post_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing NodeManager Stack Upgrade post-restart")
+    import params
+    env.set_params(params)
+
+    nodemanager_upgrade.post_upgrade_check()
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    service('nodemanager',action='stop')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.nodemanager_pid_file)
+
+  def security_status(self, env):
+    import status_params
+    env.set_params(status_params)
+    if status_params.security_enabled:
+      props_value_check = {"yarn.timeline-service.http-authentication.type": "kerberos",
+                           "yarn.acl.enable": "true"}
+      props_empty_check = ["yarn.nodemanager.principal",
+                           "yarn.nodemanager.keytab",
+                           "yarn.nodemanager.webapp.spnego-principal",
+                           "yarn.nodemanager.webapp.spnego-keytab-file"]
+
+      props_read_check = ["yarn.nodemanager.keytab",
+                          "yarn.nodemanager.webapp.spnego-keytab-file"]
+      yarn_site_props = build_expectations('yarn-site', props_value_check, props_empty_check,
+                                           props_read_check)
+
+      yarn_expectations ={}
+      yarn_expectations.update(yarn_site_props)
+
+      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
+                                                   {'yarn-site.xml': FILE_TYPE_XML})
+      result_issues = validate_security_config_properties(security_params, yarn_site_props)
+      if not result_issues: # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if ( 'yarn-site' not in security_params
+               or 'yarn.nodemanager.keytab' not in security_params['yarn-site']
+               or 'yarn.nodemanager.principal' not in security_params['yarn-site']) \
+            or 'yarn.nodemanager.webapp.spnego-keytab-file' not in security_params['yarn-site'] \
+            or 'yarn.nodemanager.webapp.spnego-principal' not in security_params['yarn-site']:
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out(
+              {"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.yarn_user,
+                                security_params['yarn-site']['yarn.nodemanager.keytab'],
+                                security_params['yarn-site']['yarn.nodemanager.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.yarn_user,
+                                security_params['yarn-site']['yarn.nodemanager.webapp.spnego-keytab-file'],
+                                security_params['yarn-site']['yarn.nodemanager.webapp.spnego-principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+if __name__ == "__main__":
+  Nodemanager().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/nodemanager_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/nodemanager_upgrade.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/nodemanager_upgrade.py
new file mode 100755
index 0000000..54e0fae
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/nodemanager_upgrade.py
@@ -0,0 +1,74 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import subprocess
+
+from resource_management.core.logger import Logger
+from resource_management.core.exceptions import Fail
+from resource_management.core.resources.system import Execute
+from resource_management.core import shell
+from resource_management.libraries.functions.decorator import retry
+
+
+def post_upgrade_check():
+  '''
+  Checks that the NodeManager has rejoined the cluster.
+  This function will obtain the Kerberos ticket if security is enabled.
+  :return:
+  '''
+  import params
+
+  Logger.info('NodeManager executing "yarn node -list -states=RUNNING" to verify the node has rejoined the cluster...')
+  if params.security_enabled and params.nodemanager_kinit_cmd:
+    Execute(params.nodemanager_kinit_cmd, user = params.yarn_user)
+
+  _check_nodemanager_startup()
+
+
+@retry(times=12, sleep_time=10, err_class=Fail)
+def _check_nodemanager_startup():
+  '''
+  Checks that a NodeManager is in a RUNNING state in the cluster via
+  "yarn node -list -states=RUNNING" command. Once the NodeManager is found to be
+  alive this method will return, otherwise it will raise a Fail(...) and retry
+  automatically.
+  :return:
+  '''
+  import params
+
+  command = 'yarn node -list -states=RUNNING'
+
+  try:
+    # 'su - yarn -c "yarn node -status c6401.ambari.apache.org:45454"'
+    return_code, yarn_output = shell.call(command, user=params.hdfs_user)
+  except:
+    raise Fail('Unable to determine if the NodeManager has started after upgrade.')
+
+  if return_code == 0:
+    hostname = params.hostname.lower()
+    nodemanager_address = params.nm_address.lower()
+    yarn_output = yarn_output.lower()
+
+    if hostname in yarn_output or nodemanager_address in yarn_output:
+      Logger.info('NodeManager with ID {0} has rejoined the cluster.'.format(nodemanager_address))
+      return
+    else:
+      raise Fail('NodeManager with ID {0} was not found in the list of running NodeManagers'.format(nodemanager_address))
+
+  raise Fail('Unable to determine if the NodeManager has started after upgrade (result code {0})'.format(str(return_code)))

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/params.py
new file mode 100755
index 0000000..957e2dd
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/params.py
@@ -0,0 +1,224 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import os
+from resource_management.libraries.functions.version import format_stack_version, compare_versions
+from resource_management.libraries.functions.default import default
+from resource_management import *
+from resource_management.libraries.functions import conf_select, stack_select
+import status_params
+
+# server configurations
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+stack_name = default("/hostLevelParams/stack_name", None)
+
+# This is expected to be of the form #.#.#.#
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+stack_version = format_stack_version(stack_version_unformatted)
+
+# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
+version = default("/commandParams/version", None)
+
+hostname = config['hostname']
+
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+
+#hadoop params
+yarn_role_root = "hadoop-yarn-client"
+mapred_role_root = "hadoop-mapreduce-client"
+
+command_role = default("/role", "")
+if command_role == "APP_TIMELINE_SERVER":
+  yarn_role_root = "hadoop-yarn-timelineserver"
+elif command_role == "HISTORYSERVER":
+  mapred_role_root = "hadoop-mapreduce-historyserver"
+elif command_role == "MAPREDUCE2_CLIENT":
+  mapred_role_root = "hadoop-mapreduce-client"
+elif command_role == "NODEMANAGER":
+  yarn_role_root = "hadoop-yarn-nodemanager"
+elif command_role == "RESOURCEMANAGER":
+  yarn_role_root = "hadoop-yarn-resourcemanager"
+elif command_role == "YARN_CLIENT":
+  yarn_role_root = "hadoop-yarn-client"
+
+nm_hosts = default("/clusterHostInfo/nm_hosts", [])
+number_of_nm = len(nm_hosts)
+
+hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
+hadoop_bin = stack_select.get_hadoop_dir("sbin")
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+hadoop_mapred2_jar_location = format("/usr/iop/current/{mapred_role_root}")
+mapred_bin = format("/usr/iop/current/{mapred_role_root}/sbin")
+hadoop_yarn_home = format("/usr/iop/current/{yarn_role_root}")
+yarn_bin = format("/usr/iop/current/{yarn_role_root}/sbin")
+yarn_container_bin = format("/usr/iop/current/{yarn_role_root}/bin")
+
+limits_conf_dir = "/etc/security/limits.d"
+execute_path = os.environ['PATH'] + os.pathsep + hadoop_bin_dir + os.pathsep + yarn_container_bin
+
+ulimit_cmd = "ulimit -c unlimited;"
+
+mapred_user = status_params.mapred_user
+yarn_user = status_params.yarn_user
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+yarn_executor_container_group = config['configurations']['yarn-site']['yarn.nodemanager.linux-container-executor.group']
+kinit_path_local = functions.get_kinit_path()
+rm_hosts = config['clusterHostInfo']['rm_host']
+rm_host = rm_hosts[0]
+rm_port = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'].split(':')[-1]
+rm_https_port = "8090"
+
+default_exclude_path=os.path.join(hadoop_conf_dir, 'yarn.exclude')
+# TODO UPGRADE default, update site during upgrade
+rm_nodes_exclude_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path",default_exclude_path)
+
+java64_home = config['hostLevelParams']['java_home']
+hadoop_ssl_enabled = default("/configurations/core-site/hadoop.ssl.enabled", False)
+
+yarn_heapsize = config['configurations']['yarn-env']['yarn_heapsize']
+resourcemanager_heapsize = config['configurations']['yarn-env']['resourcemanager_heapsize']
+nodemanager_heapsize = config['configurations']['yarn-env']['nodemanager_heapsize']
+apptimelineserver_heapsize = default("/configurations/yarn-env/apptimelineserver_heapsize", 1024)
+ats_leveldb_dir = config['configurations']['yarn-site']['yarn.timeline-service.leveldb-timeline-store.path']
+yarn_log_dir_prefix = config['configurations']['yarn-env']['yarn_log_dir_prefix']
+yarn_pid_dir_prefix = status_params.yarn_pid_dir_prefix
+mapred_pid_dir_prefix = status_params.mapred_pid_dir_prefix
+mapred_log_dir_prefix = config['configurations']['mapred-env']['mapred_log_dir_prefix']
+mapred_env_sh_template = config['configurations']['mapred-env']['content']
+yarn_env_sh_template = config['configurations']['yarn-env']['content']
+
+if len(rm_hosts) > 1:
+  additional_rm_host = rm_hosts[1]
+  rm_webui_address = format("{rm_host}:{rm_port},{additional_rm_host}:{rm_port}")
+  rm_webui_https_address = format("{rm_host}:{rm_https_port},{additional_rm_host}:{rm_https_port}")
+else:
+  rm_webui_address = format("{rm_host}:{rm_port}")
+  rm_webui_https_address = format("{rm_host}:{rm_https_port}")
+
+nm_webui_address = config['configurations']['yarn-site']['yarn.nodemanager.webapp.address']
+hs_webui_address = config['configurations']['mapred-site']['mapreduce.jobhistory.webapp.address']
+nm_address = config['configurations']['yarn-site']['yarn.nodemanager.address']  # still contains 0.0.0.0
+if hostname and nm_address and nm_address.startswith("0.0.0.0:"):
+  nm_address = nm_address.replace("0.0.0.0", hostname)
+
+nm_local_dirs = config['configurations']['yarn-site']['yarn.nodemanager.local-dirs']
+nm_log_dirs = config['configurations']['yarn-site']['yarn.nodemanager.log-dirs']
+
+distrAppJarName = "hadoop-yarn-applications-distributedshell-2.*.jar"
+hadoopMapredExamplesJarName = "hadoop-mapreduce-examples-2.*.jar"
+
+yarn_pid_dir = status_params.yarn_pid_dir
+mapred_pid_dir = status_params.mapred_pid_dir
+
+mapred_log_dir = format("{mapred_log_dir_prefix}/{mapred_user}")
+yarn_log_dir = format("{yarn_log_dir_prefix}/{yarn_user}")
+mapred_job_summary_log = format("{mapred_log_dir_prefix}/{mapred_user}/hadoop-mapreduce.jobsummary.log")
+yarn_job_summary_log = format("{yarn_log_dir_prefix}/{yarn_user}/hadoop-mapreduce.jobsummary.log")
+
+user_group = config['configurations']['cluster-env']['user_group']
+
+#exclude file
+default_exclude_path=os.path.join(hadoop_conf_dir, 'yarn.exclude')
+exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
+exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path",default_exclude_path)
+
+ats_host = set(default("/clusterHostInfo/app_timeline_server_hosts", []))
+has_ats = not len(ats_host) == 0
+
+# default kinit commands
+rm_kinit_cmd = ""
+yarn_timelineservice_kinit_cmd = ""
+nodemanager_kinit_cmd = ""
+
+if security_enabled:
+  _rm_principal_name = config['configurations']['yarn-site']['yarn.resourcemanager.principal']
+  _rm_principal_name = _rm_principal_name.replace('_HOST',hostname.lower())
+  _rm_keytab = config['configurations']['yarn-site']['yarn.resourcemanager.keytab']
+  rm_kinit_cmd = format("{kinit_path_local} -kt {_rm_keytab} {_rm_principal_name};")
+
+  if has_ats:
+    _yarn_timelineservice_principal_name = config['configurations']['yarn-site']['yarn.timeline-service.principal']
+    _yarn_timelineservice_principal_name = _yarn_timelineservice_principal_name.replace('_HOST', hostname.lower())
+    _yarn_timelineservice_keytab = config['configurations']['yarn-site']['yarn.timeline-service.keytab']
+    yarn_timelineservice_kinit_cmd = format("{kinit_path_local} -kt {_yarn_timelineservice_keytab} {_yarn_timelineservice_principal_name};")
+
+  if 'yarn.nodemanager.principal' in config['configurations']['yarn-site']:
+    _nodemanager_principal_name = default('/configurations/yarn-site/yarn.nodemanager.principal', None)
+    if _nodemanager_principal_name:
+      _nodemanager_principal_name = _nodemanager_principal_name.replace('_HOST', hostname.lower())
+
+    _nodemanager_keytab = config['configurations']['yarn-site']['yarn.nodemanager.keytab']
+    nodemanager_kinit_cmd = format("{kinit_path_local} -kt {_nodemanager_keytab} {_nodemanager_principal_name};")
+
+else:
+  rm_kinit_cmd = ""
+  yarn_timelineservice_kinit_cmd = ""
+
+yarn_log_aggregation_enabled = config['configurations']['yarn-site']['yarn.log-aggregation-enable']
+yarn_nm_app_log_dir =  config['configurations']['yarn-site']['yarn.nodemanager.remote-app-log-dir']
+mapreduce_jobhistory_intermediate_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.intermediate-done-dir']
+mapreduce_jobhistory_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.done-dir']
+jobhistory_heapsize = default("/configurations/mapred-env/jobhistory_heapsize", "900")
+jhs_leveldb_state_store_dir = default('/configurations/mapred-site/mapreduce.jobhistory.recovery.store.leveldb.path', "/hadoop/mapreduce/jhs")
+
+#for create_hdfs_directory
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+
+
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
+import functools
+#create partial functions with common arguments for every HdfsResource call
+#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
+ )
+update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
+
+mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
+
+#taskcontroller.cfg
+
+mapred_local_dir = "/tmp/hadoop-mapred/mapred/local"
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+min_user_id = config['configurations']['yarn-env']['min_user_id']
+
+# Node labels
+node_labels_dir = default("/configurations/yarn-site/yarn.node-labels.fs-store.root-dir", None)

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/resourcemanager.py
new file mode 100755
index 0000000..8e350b9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/resourcemanager.py
@@ -0,0 +1,179 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+
+from yarn import yarn
+from service import service
+
+
+class Resourcemanager(Script):
+
+  def get_component_name(self):
+    return "hadoop-yarn-resourcemanager"
+
+  def install(self, env):
+    self.install_packages(env)
+
+  def configure(self, env):
+    import params
+
+    env.set_params(params)
+    yarn(name='resourcemanager')
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Stack Upgrade post-restart")
+    import params
+    env.set_params(params)
+
+    if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-yarn-resourcemanager", params.version)
+      #Execute(format("stack-select set hadoop-yarn-resourcemanager {version}"))
+
+  def start(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    service('resourcemanager',
+            action='start'
+    )
+
+  def stop(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+
+    service('resourcemanager',
+            action='stop'
+    )
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    check_process_status(status_params.resourcemanager_pid_file)
+    pass
+
+  def security_status(self, env):
+    import status_params
+    env.set_params(status_params)
+    if status_params.security_enabled:
+      props_value_check = {"yarn.timeline-service.http-authentication.type": "kerberos",
+                           "yarn.acl.enable": "true"}
+      props_empty_check = ["yarn.resourcemanager.principal",
+                           "yarn.resourcemanager.keytab",
+                           "yarn.resourcemanager.webapp.spnego-principal",
+                           "yarn.resourcemanager.webapp.spnego-keytab-file"]
+
+      props_read_check = ["yarn.resourcemanager.keytab",
+                          "yarn.resourcemanager.webapp.spnego-keytab-file"]
+      yarn_site_props = build_expectations('yarn-site', props_value_check, props_empty_check,
+                                           props_read_check)
+
+      yarn_expectations ={}
+      yarn_expectations.update(yarn_site_props)
+
+      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
+                                                   {'yarn-site.xml': FILE_TYPE_XML})
+      result_issues = validate_security_config_properties(security_params, yarn_site_props)
+      if not result_issues: # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if ( 'yarn-site' not in security_params
+               or 'yarn.resourcemanager.keytab' not in security_params['yarn-site']
+               or 'yarn.resourcemanager.principal' not in security_params['yarn-site']) \
+            or 'yarn.resourcemanager.webapp.spnego-keytab-file' not in security_params['yarn-site'] \
+            or 'yarn.resourcemanager.webapp.spnego-principal' not in security_params['yarn-site']:
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out(
+              {"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.yarn_user,
+                                security_params['yarn-site']['yarn.resourcemanager.keytab'],
+                                security_params['yarn-site']['yarn.resourcemanager.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.yarn_user,
+                                security_params['yarn-site']['yarn.resourcemanager.webapp.spnego-keytab-file'],
+                                security_params['yarn-site']['yarn.resourcemanager.webapp.spnego-principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+  def refreshqueues(self, env):
+    import params
+
+    self.configure(env)
+    env.set_params(params)
+
+    service('resourcemanager',
+            action='refreshQueues'
+    )
+
+  def decommission(self, env):
+    import params
+
+    env.set_params(params)
+    rm_kinit_cmd = params.rm_kinit_cmd
+    yarn_user = params.yarn_user
+    conf_dir = params.hadoop_conf_dir
+    user_group = params.user_group
+
+    yarn_refresh_cmd = format("{rm_kinit_cmd} yarn --config {conf_dir} rmadmin -refreshNodes")
+
+    File(params.exclude_file_path,
+         content=Template("exclude_hosts_list.j2"),
+         owner=yarn_user,
+         group=user_group
+    )
+
+    if params.update_exclude_file_only == False:
+      Execute(yarn_refresh_cmd,
+            environment= {'PATH' : params.execute_path },
+            user=yarn_user)
+      pass
+    pass
+
+
+if __name__ == "__main__":
+  Resourcemanager().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/service.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/service.py
new file mode 100755
index 0000000..1002094
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/service.py
@@ -0,0 +1,76 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+
+def service(componentName, action='start', serviceName='yarn'):
+
+  import params
+
+  if serviceName == 'mapreduce' and componentName == 'historyserver':
+    daemon = format("{mapred_bin}/mr-jobhistory-daemon.sh")
+    pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-{componentName}.pid")
+    usr = params.mapred_user
+  else:
+    daemon = format("{yarn_bin}/yarn-daemon.sh")
+    pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-{componentName}.pid")
+    usr = params.yarn_user
+
+  cmd = format("export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {daemon} --config {hadoop_conf_dir}")
+
+  if action == 'start':
+    daemon_cmd = format("{ulimit_cmd} {cmd} start {componentName}")
+    check_process = format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1")
+
+    # Remove the pid file if its corresponding process is not running.
+    File(pid_file,
+         action="delete",
+         not_if=check_process)
+
+    # Attempt to start the process. Internally, this is skipped if the process is already running.
+    Execute(daemon_cmd,
+            user=usr,
+            not_if=check_process
+    )
+
+    # Ensure that the process with the expected PID exists.
+    Execute(check_process,
+            user=usr,
+            not_if=check_process,
+            initial_wait=5
+    )
+
+  elif action == 'stop':
+    daemon_cmd = format("{cmd} stop {componentName}")
+    Execute(daemon_cmd,
+            user=usr)
+
+    File(pid_file,
+         action="delete")
+
+  elif action == 'refreshQueues':
+    rm_kinit_cmd = params.rm_kinit_cmd
+    refresh_cmd = format("{rm_kinit_cmd} export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {yarn_container_bin}/yarn rmadmin -refreshQueues")
+
+    Execute(refresh_cmd,
+            user=usr,
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/service_check.py
new file mode 100755
index 0000000..7f42f9e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/service_check.py
@@ -0,0 +1,89 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+import re
+from resource_management.libraries.functions.version import compare_versions
+from resource_management import *
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+
+CURL_CONNECTION_TIMEOUT = '5'
+
+
+class ServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    if params.stack_version != "" and compare_versions(params.stack_version, '4.0') >= 0:
+      path_to_distributed_shell_jar = "/usr/iop/current/hadoop-yarn-client/hadoop-yarn-applications-distributedshell.jar"
+    else:
+      path_to_distributed_shell_jar = "/usr/lib/hadoop-yarn/hadoop-yarn-applications-distributedshell*.jar"
+
+    yarn_distrubuted_shell_check_cmd = format("yarn org.apache.hadoop.yarn.applications.distributedshell.Client "
+                                              "-shell_command ls -num_containers {number_of_nm} -jar {path_to_distributed_shell_jar}")
+
+    if params.security_enabled:
+      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
+      smoke_cmd = format("{kinit_cmd} {yarn_distrubuted_shell_check_cmd}")
+    else:
+      smoke_cmd = yarn_distrubuted_shell_check_cmd
+
+    return_code, out = shell.checked_call(smoke_cmd,
+                                          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+                                          user=params.smokeuser,
+                                          )
+
+    m = re.search("appTrackingUrl=(.*),\s", out)
+    app_url = m.group(1)
+
+    splitted_app_url = str(app_url).split('/')
+
+    for item in splitted_app_url:
+      if "application" in item:
+        application_name = item
+
+    json_response_received = False
+    for rm_host in params.rm_hosts:
+      info_app_url = "http://" + rm_host + ":" + params.rm_port + "/ws/v1/cluster/apps/" + application_name
+
+      get_app_info_cmd = "curl --negotiate -u : -sL --connect-timeout " + CURL_CONNECTION_TIMEOUT + " " + info_app_url
+
+      return_code, stdout = shell.checked_call(get_app_info_cmd,
+                                            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+                                            user=params.smokeuser,
+                                            )
+
+      try:
+        json_response = json.loads(stdout)
+
+        json_response_received = True
+
+        if json_response['app']['state'] != "FINISHED" or json_response['app']['finalStatus'] != "SUCCEEDED":
+          raise Exception("Application " + app_url + " state/status is not valid. Should be FINISHED/SUCCEEDED.")
+      except Exception as e:
+        pass
+
+    if not json_response_received:
+      raise Exception("Could not get json response from YARN API")
+
+if __name__ == "__main__":
+  ServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/status_params.py
new file mode 100755
index 0000000..3b7f5ba
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/status_params.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+mapred_user = config['configurations']['mapred-env']['mapred_user']
+yarn_user = config['configurations']['yarn-env']['yarn_user']
+yarn_pid_dir_prefix = config['configurations']['yarn-env']['yarn_pid_dir_prefix']
+mapred_pid_dir_prefix = config['configurations']['mapred-env']['mapred_pid_dir_prefix']
+yarn_pid_dir = format("{yarn_pid_dir_prefix}/{yarn_user}")
+mapred_pid_dir = format("{mapred_pid_dir_prefix}/{mapred_user}")
+
+resourcemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-resourcemanager.pid")
+nodemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-nodemanager.pid")
+yarn_historyserver_pid_file_old = format("{yarn_pid_dir}/yarn-{yarn_user}-historyserver.pid")
+yarn_historyserver_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-timelineserver.pid")  # *-historyserver.pid is deprecated
+mapred_historyserver_pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-historyserver.pid")
+
+# Security related/required params
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+hostname = config['hostname']
+kinit_path_local = functions.get_kinit_path()
+security_enabled = config['configurations']['cluster-env']['security_enabled']

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/yarn.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/yarn.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/yarn.py
new file mode 100755
index 0000000..1264b87
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/yarn.py
@@ -0,0 +1,277 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+import sys
+import os
+
+
+def yarn(name = None):
+  import params
+
+
+  if name in ["nodemanager","historyserver"]:
+    if params.yarn_log_aggregation_enabled:
+      params.HdfsResource(params.yarn_nm_app_log_dir,
+                           action="create_on_execute",
+                           type="directory",
+                           owner=params.yarn_user,
+                           group=params.user_group,
+                           mode=0777,
+                           recursive_chmod=True
+      )
+    params.HdfsResource("/mapred",
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.mapred_user
+    )
+    params.HdfsResource("/mapred/system",
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.hdfs_user
+    )
+    params.HdfsResource(params.mapreduce_jobhistory_intermediate_done_dir,
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.mapred_user,
+                         group=params.user_group,
+                         mode=0777
+    )
+
+    params.HdfsResource(params.mapreduce_jobhistory_done_dir,
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.mapred_user,
+                         group=params.user_group,
+                         mode=0777
+    )
+    params.HdfsResource(None, action="execute")
+    Directory(params.jhs_leveldb_state_store_dir,
+              owner=params.mapred_user,
+              group=params.user_group,
+              create_parents=True,
+              cd_access="a",
+              )
+
+  if name == "nodemanager":
+    Directory(params.nm_local_dirs.split(',') + params.nm_log_dirs.split(','),
+              owner=params.yarn_user,
+              group=params.user_group,
+              create_parents=True,
+              cd_access="a",
+              ignore_failures=True,
+              mode=0775
+              )
+
+    Execute(('chown', '-R', params.yarn_user, params.nm_local_dirs),
+            only_if=format("test -d {nm_local_dirs}"),
+            sudo=True)
+
+
+    if params.security_enabled:
+      smokeuser_directories = [os.path.join(dir, 'usercache' ,params.smokeuser)
+                               for dir in params.nm_local_dirs.split(',')]
+      for directory in smokeuser_directories:
+        Execute(('chown', '-R', params.smokeuser, directory),
+                only_if=format("test -d {directory}"),
+                sudo=True,
+        )
+  Directory([params.yarn_pid_dir_prefix, params.yarn_pid_dir, params.yarn_log_dir],
+            owner=params.yarn_user,
+            group=params.user_group,
+            create_parents=True,
+            cd_access = 'a',
+  )
+
+  Directory([params.mapred_pid_dir_prefix, params.mapred_pid_dir, params.mapred_log_dir_prefix, params.mapred_log_dir],
+            owner=params.mapred_user,
+            group=params.user_group,
+            create_parents=True,
+            cd_access = 'a',
+  )
+  Directory([params.yarn_log_dir_prefix],
+            owner=params.yarn_user,
+            create_parents=True,
+            ignore_failures=True,
+            cd_access = 'a',
+  )
+
+  XmlConfig("core-site.xml",
+            conf_dir=params.hadoop_conf_dir,
+            configurations=params.config['configurations']['core-site'],
+            configuration_attributes=params.config['configuration_attributes']['core-site'],
+            owner=params.hdfs_user,
+            group=params.user_group,
+            mode=0644
+  )
+
+  XmlConfig("mapred-site.xml",
+            conf_dir=params.hadoop_conf_dir,
+            configurations=params.config['configurations']['mapred-site'],
+            configuration_attributes=params.config['configuration_attributes']['mapred-site'],
+            owner=params.yarn_user,
+            group=params.user_group,
+            mode=0644
+  )
+
+  XmlConfig("yarn-site.xml",
+            conf_dir=params.hadoop_conf_dir,
+            configurations=params.config['configurations']['yarn-site'],
+            configuration_attributes=params.config['configuration_attributes']['yarn-site'],
+            owner=params.yarn_user,
+            group=params.user_group,
+            mode=0644
+  )
+
+  XmlConfig("capacity-scheduler.xml",
+            conf_dir=params.hadoop_conf_dir,
+            configurations=params.config['configurations']['capacity-scheduler'],
+            configuration_attributes=params.config['configuration_attributes']['capacity-scheduler'],
+            owner=params.yarn_user,
+            group=params.user_group,
+            mode=0644
+  )
+
+  if name == 'resourcemanager':
+    File(params.yarn_job_summary_log,
+       owner=params.yarn_user,
+       group=params.user_group
+    )
+    if params.node_labels_dir:
+      params.HdfsResource(params.node_labels_dir,
+                           type="directory",
+                           action="create_on_execute",
+                           owner=params.yarn_user,
+                           group=params.user_group,
+                           mode=0700
+      )
+      params.HdfsResource(None, action="execute")
+  elif name == 'apptimelineserver':
+    Directory(params.ats_leveldb_dir,
+       owner=params.yarn_user,
+       group=params.user_group,
+       create_parents=True,
+       cd_access="a",
+    )
+
+  File(params.rm_nodes_exclude_path,
+       owner=params.yarn_user,
+       group=params.user_group
+  )
+
+  File(format("{limits_conf_dir}/yarn.conf"),
+       mode=0644,
+       content=Template('yarn.conf.j2')
+  )
+
+  File(format("{limits_conf_dir}/mapreduce.conf"),
+       mode=0644,
+       content=Template('mapreduce.conf.j2')
+  )
+
+  File(format("{hadoop_conf_dir}/yarn-env.sh"),
+       owner=params.yarn_user,
+       group=params.user_group,
+       mode=0755,
+       content=InlineTemplate(params.yarn_env_sh_template)
+  )
+
+  container_executor = format("{yarn_container_bin}/container-executor")
+  File(container_executor,
+       group=params.yarn_executor_container_group,
+       mode=06050
+  )
+
+  File(format("{hadoop_conf_dir}/container-executor.cfg"),
+       group=params.user_group,
+       mode=0644,
+       content=Template('container-executor.cfg.j2')
+  )
+
+
+  if params.security_enabled:
+    tc_mode = 0644
+    tc_owner = "root"
+  else:
+    tc_mode = None
+    tc_owner = params.hdfs_user
+
+  File(format("{hadoop_conf_dir}/mapred-env.sh"),
+       owner=tc_owner,
+       content=InlineTemplate(params.mapred_env_sh_template)
+  )
+
+  if params.security_enabled:
+    File(os.path.join(params.hadoop_bin, "task-controller"),
+         owner="root",
+         group=params.mapred_tt_group,
+         mode=06050
+    )
+    File(os.path.join(params.hadoop_conf_dir, 'taskcontroller.cfg'),
+         owner = tc_owner,
+         mode = tc_mode,
+         group = params.mapred_tt_group,
+         content=Template("taskcontroller.cfg.j2")
+    )
+  else:
+    File(os.path.join(params.hadoop_conf_dir, 'taskcontroller.cfg'),
+         owner=tc_owner,
+         content=Template("taskcontroller.cfg.j2")
+    )
+
+  if "mapred-site" in params.config['configurations']:
+    XmlConfig("mapred-site.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations']['mapred-site'],
+              configuration_attributes=params.config['configuration_attributes']['mapred-site'],
+              owner=params.mapred_user,
+              group=params.user_group
+    )
+
+  if "capacity-scheduler" in params.config['configurations']:
+    XmlConfig("capacity-scheduler.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations'][
+                'capacity-scheduler'],
+              configuration_attributes=params.config['configuration_attributes']['capacity-scheduler'],
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+
+  if os.path.exists(os.path.join(params.hadoop_conf_dir, 'fair-scheduler.xml')):
+    File(os.path.join(params.hadoop_conf_dir, 'fair-scheduler.xml'),
+         owner=params.mapred_user,
+         group=params.user_group
+    )
+
+  if os.path.exists(
+    os.path.join(params.hadoop_conf_dir, 'ssl-client.xml.example')):
+    File(os.path.join(params.hadoop_conf_dir, 'ssl-client.xml.example'),
+         owner=params.mapred_user,
+         group=params.user_group
+    )
+
+  if os.path.exists(
+    os.path.join(params.hadoop_conf_dir, 'ssl-server.xml.example')):
+    File(os.path.join(params.hadoop_conf_dir, 'ssl-server.xml.example'),
+         owner=params.mapred_user,
+         group=params.user_group
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/yarn_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/yarn_client.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/yarn_client.py
new file mode 100755
index 0000000..0af5ccb
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/yarn_client.py
@@ -0,0 +1,56 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+
+from yarn import yarn
+
+class YarnClient(Script):
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    yarn()
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+  def get_component_name(self):
+    return "hadoop-client"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-client", params.version)
+      #Execute(format("stack-select set hadoop-client {version}"))
+
+if __name__ == "__main__":
+  YarnClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/container-executor.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/container-executor.cfg.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/container-executor.cfg.j2
new file mode 100755
index 0000000..9f38f183
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/container-executor.cfg.j2
@@ -0,0 +1,40 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+yarn.nodemanager.local-dirs={{nm_local_dirs}}
+yarn.nodemanager.log-dirs={{nm_log_dirs}}
+yarn.nodemanager.linux-container-executor.group={{yarn_executor_container_group}}
+banned.users=hdfs,yarn,mapred,bin
+min.user.id=200

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/exclude_hosts_list.j2
new file mode 100755
index 0000000..c7ce416
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/exclude_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in exclude_hosts %}
+{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/mapreduce.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/mapreduce.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/mapreduce.conf.j2
new file mode 100755
index 0000000..b996645
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/mapreduce.conf.j2
@@ -0,0 +1,35 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{mapred_user}}   - nofile 32768
+{{mapred_user}}   - nproc  65536

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/taskcontroller.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/taskcontroller.cfg.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/taskcontroller.cfg.j2
new file mode 100755
index 0000000..3d5f4f2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/taskcontroller.cfg.j2
@@ -0,0 +1,38 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+mapred.local.dir={{mapred_local_dir}}
+mapreduce.tasktracker.group={{mapred_tt_group}}
+hadoop.log.dir={{hdfs_log_dir_prefix}}/{{mapred_user}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/yarn.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/yarn.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/yarn.conf.j2
new file mode 100755
index 0000000..3bd7a45
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/yarn.conf.j2
@@ -0,0 +1,35 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{yarn_user}}   - nofile 32768
+{{yarn_user}}   - nproc  65536

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/alerts.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/alerts.json
new file mode 100755
index 0000000..027d7fa
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/alerts.json
@@ -0,0 +1,58 @@
+{
+  "ZOOKEEPER": {
+    "service": [
+      {
+        "name": "zookeeper_server_process_percent",
+        "label": "Percent ZooKeeper Servers Available",
+        "description": "This alert is triggered if the number of down ZooKeeper servers in the cluster is greater than the configured critical threshold. It aggregates the results of ZooKeeper process checks.",
+        "interval": 1,
+        "scope": "SERVICE",
+        "enabled": true,
+        "source": {
+          "type": "AGGREGATE",
+          "alert_name": "zookeeper_server_process",
+          "reporting": {
+            "ok": {
+              "text": "affected: [{1}], total: [{0}]"
+            },
+            "warning": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 0.35
+            },
+            "critical": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 0.70
+            }
+          }
+        }
+      }
+    ],
+    "ZOOKEEPER_SERVER": [
+      {
+        "name": "zookeeper_server_process",
+        "label": "ZooKeeper Server Process",
+        "description": "This host-level alert is triggered if the ZooKeeper server process cannot be determined to be up and listening on the network.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "PORT",
+          "uri": "{{zoo.cfg/clientPort}}",
+          "default_port": 2181,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/configuration/zoo.cfg.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/configuration/zoo.cfg.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/configuration/zoo.cfg.xml
new file mode 100755
index 0000000..275cc6a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/configuration/zoo.cfg.xml
@@ -0,0 +1,84 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>tickTime</name>
+    <display-name>Length of single Tick</display-name>
+    <value>2000</value>
+    <description>The length of a single tick in milliseconds, which is the basic time unit used by ZooKeeper</description>
+    <value-attributes>
+      <type>int</type>
+      <overridable>false</overridable>
+      <unit>ms</unit>
+    </value-attributes>
+  </property>
+  <property>
+    <name>initLimit</name>
+    <display-name>Ticks to allow for sync at Init</display-name>
+    <value>10</value>
+    <description>Ticks to allow for sync at Init.</description>
+    <value-attributes>
+      <type>int</type>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+  <property>
+    <name>syncLimit</name>
+    <display-name>Ticks to allow for sync at Runtime</display-name>
+    <value>5</value>
+    <description>Ticks to allow for sync at Runtime.</description>
+    <value-attributes>
+      <type>int</type>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+  <property>
+    <name>clientPort</name>
+    <display-name>Port for running ZK Server</display-name>
+    <value>2181</value>
+    <description>Port for running ZK Server.</description>
+    <value-attributes>
+      <type>int</type>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+  <property>
+    <name>dataDir</name>
+    <display-name>ZooKeeper directory</display-name>
+    <value>/hadoop/zookeeper</value>
+    <description>Data directory for ZooKeeper.</description>
+    <value-attributes>
+      <type>directory</type>
+    </value-attributes>
+  </property>
+  <property>
+    <name>autopurge.snapRetainCount</name>
+    <value>30</value>
+    <description>ZooKeeper purge feature retains the autopurge.snapRetainCount most recent snapshots and the corresponding transaction logs in the dataDir and dataLogDir respectively and deletes the rest. </description>
+  </property>
+  <property>
+    <name>autopurge.purgeInterval</name>
+    <value>24</value>
+    <description>The time interval in hours for which the purge task has to be triggered. Set to a positive integer (1 and above) to enable the auto purging.</description>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/configuration/zookeeper-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/configuration/zookeeper-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/configuration/zookeeper-env.xml
new file mode 100755
index 0000000..b57992e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/configuration/zookeeper-env.xml
@@ -0,0 +1,73 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>zk_user</name>
+    <value>zookeeper</value>
+    <property-type>USER</property-type>
+    <description>ZooKeeper User.</description>
+  </property>
+  <property>
+    <name>zk_log_dir</name>
+    <display-name>ZooKeeper Log Dir</display-name>
+    <value>/var/log/zookeeper</value>
+    <description>ZooKeeper Log Dir</description>
+    <value-attributes>
+      <type>directory</type>
+      <editable-only-at-install>true</editable-only-at-install>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+  <property>
+    <name>zk_pid_dir</name>
+    <display-name>ZooKeeper PID Dir</display-name>
+    <value>/var/run/zookeeper</value>
+    <description>ZooKeeper Pid Dir</description>
+    <value-attributes>
+      <type>directory</type>
+      <editable-only-at-install>true</editable-only-at-install>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+
+  <!-- zookeeper-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for zookeeper-env.sh file</description>
+    <value>
+export JAVA_HOME={{java64_home}}
+export ZOOKEEPER_HOME={{zk_home}}
+export ZOO_LOG_DIR={{zk_log_dir}}
+export ZOOPIDFILE={{zk_pid_file}}
+export SERVER_JVMFLAGS={{zk_server_heapsize}}
+export JAVA=$JAVA_HOME/bin/java
+export CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*
+
+{% if security_enabled %}
+export SERVER_JVMFLAGS="$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}"
+export CLIENT_JVMFLAGS="$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}"
+{% endif %}
+    </value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/configuration/zookeeper-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/configuration/zookeeper-log4j.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/configuration/zookeeper-log4j.xml
new file mode 100755
index 0000000..6fcf5bc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/configuration/zookeeper-log4j.xml
@@ -0,0 +1,101 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <description>Custom log4j.properties</description>
+    <value>
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+#
+# ZooKeeper Logging Configuration
+#
+
+# DEFAULT: console appender only
+log4j.rootLogger=INFO, CONSOLE
+
+# Example with rolling log file
+#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE
+
+# Example with rolling log file and tracing
+#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE
+
+#
+# Log INFO level and above messages to the console
+#
+log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
+log4j.appender.CONSOLE.Threshold=INFO
+log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
+log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
+
+#
+# Add ROLLINGFILE to rootLogger to get log file output
+#    Log DEBUG level and above messages to a log file
+log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender
+log4j.appender.ROLLINGFILE.Threshold=DEBUG
+log4j.appender.ROLLINGFILE.File=zookeeper.log
+
+# Max log file size of 10MB
+log4j.appender.ROLLINGFILE.MaxFileSize=10MB
+# uncomment the next line to limit number of backup files
+#log4j.appender.ROLLINGFILE.MaxBackupIndex=10
+
+log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout
+log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
+
+
+#
+# Add TRACEFILE to rootLogger to get log file output
+#    Log DEBUG level and above messages to a log file
+log4j.appender.TRACEFILE=org.apache.log4j.FileAppender
+log4j.appender.TRACEFILE.Threshold=TRACE
+log4j.appender.TRACEFILE.File=zookeeper_trace.log
+
+log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout
+### Notice we are including log4j's NDC here (%x)
+log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/kerberos.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/kerberos.json
new file mode 100755
index 0000000..0a64ea5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/kerberos.json
@@ -0,0 +1,39 @@
+{
+  "services": [
+    {
+      "name": "ZOOKEEPER",
+      "identities": [
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "components": [
+        {
+          "name": "ZOOKEEPER_SERVER",
+          "identities": [
+            {
+              "name": "zookeeper_zk",
+              "principal": {
+                "value": "zookeeper/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "zookeeper-env/zookeeper_principal_name"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/zk.service.keytab",
+                "owner": {
+                  "name": "${zookeeper-env/zk_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "zookeeper-env/zookeeper_keytab_path"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/metainfo.xml
new file mode 100755
index 0000000..2bd2532
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/metainfo.xml
@@ -0,0 +1,91 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ZOOKEEPER</name>
+      <displayName>ZooKeeper</displayName>
+      <comment>Centralized service which provides highly reliable distributed coordination</comment>
+      <version>3.4.6</version>
+      <components>
+
+        <component>
+          <name>ZOOKEEPER_SERVER</name>
+          <displayName>ZooKeeper Server</displayName>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/zookeeper_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>ZOOKEEPER_CLIENT</name>
+          <displayName>ZooKeeper Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/zookeeper_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>env</type>
+              <fileName>zookeeper-env.sh</fileName>
+              <dictionaryName>zookeeper-env</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>log4j.properties</fileName>
+              <dictionaryName>zookeeper-log4j</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>zookeeper</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>zookeeper-log4j</config-type>
+        <config-type>zookeeper-env</config-type>
+        <config-type>zoo.cfg</config-type>
+      </configuration-dependencies>
+      <restartRequiredAfterChange>true</restartRequiredAfterChange>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/files/zkEnv.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/files/zkEnv.sh b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/files/zkEnv.sh
new file mode 100755
index 0000000..fa1b832
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/files/zkEnv.sh
@@ -0,0 +1,96 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script should be sourced into other zookeeper
+# scripts to setup the env variables
+
+# We use ZOOCFGDIR if defined,
+# otherwise we use /etc/zookeeper
+# or the conf directory that is
+# a sibling of this script's directory
+if [ "x$ZOOCFGDIR" = "x" ]
+then
+    if [ -d "/etc/zookeeper" ]
+    then
+        ZOOCFGDIR="/etc/zookeeper"
+    else
+        ZOOCFGDIR="$ZOOBINDIR/../conf"
+    fi
+fi
+
+if [ "x$ZOOCFG" = "x" ]
+then
+    ZOOCFG="zoo.cfg"
+fi
+
+ZOOCFG="$ZOOCFGDIR/$ZOOCFG"
+
+if [ -e "$ZOOCFGDIR/zookeeper-env.sh" ]
+then
+    . "$ZOOCFGDIR/zookeeper-env.sh"
+fi
+
+if [ "x${ZOO_LOG_DIR}" = "x" ]
+then
+    ZOO_LOG_DIR="."
+fi
+
+if [ "x${ZOO_LOG4J_PROP}" = "x" ]
+then
+    ZOO_LOG4J_PROP="INFO,CONSOLE"
+fi
+
+#add the zoocfg dir to classpath
+CLASSPATH="$ZOOCFGDIR:$CLASSPATH"
+
+for i in "$ZOOBINDIR"/../src/java/lib/*.jar
+do
+    CLASSPATH="$i:$CLASSPATH"
+done
+
+#make it work in the release
+for i in "$ZOOBINDIR"/../lib/*.jar
+do
+    CLASSPATH="$i:$CLASSPATH"
+done
+
+#make it work in the release
+for i in "$ZOOBINDIR"/../zookeeper-*.jar
+do
+    CLASSPATH="$i:$CLASSPATH"
+done
+
+#make it work for developers
+for d in "$ZOOBINDIR"/../build/lib/*.jar
+do
+   CLASSPATH="$d:$CLASSPATH"
+done
+
+#make it work for developers
+CLASSPATH="$ZOOBINDIR/../build/classes:$CLASSPATH"
+
+case "`uname`" in
+    CYGWIN*) cygwin=true ;;
+    *) cygwin=false ;;
+esac
+
+if $cygwin
+then
+    CLASSPATH=`cygpath -wp "$CLASSPATH"`
+fi
+
+#echo "CLASSPATH=$CLASSPATH"