You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by al...@apache.org on 2016/12/19 17:48:26 UTC

[04/19] ambari git commit: AMBARI-19229. Remove HDP-3.0.0 stack definition from Ambari-2.5 (alejandro)

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_linux.py
deleted file mode 100644
index cdadc80..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_linux.py
+++ /dev/null
@@ -1,479 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import os
-
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.resources.hdfs_resource import HdfsResource
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.stack_features import get_stack_feature_version
-from resource_management.libraries.functions import get_kinit_path
-from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
-from resource_management.libraries.functions.version import format_stack_version
-from resource_management.libraries.functions.default import default
-from resource_management.libraries import functions
-from resource_management.libraries.functions import is_empty
-from resource_management.libraries.functions.get_architecture import get_architecture
-
-import status_params
-
-# a map of the Ambari role to the component name
-# for use with <stack-root>/current/<component>
-MAPR_SERVER_ROLE_DIRECTORY_MAP = {
-  'HISTORYSERVER' : 'hadoop-mapreduce-historyserver',
-  'MAPREDUCE2_CLIENT' : 'hadoop-mapreduce-client',
-}
-
-YARN_SERVER_ROLE_DIRECTORY_MAP = {
-  'APP_TIMELINE_SERVER' : 'hadoop-yarn-timelineserver',
-  'NODEMANAGER' : 'hadoop-yarn-nodemanager',
-  'RESOURCEMANAGER' : 'hadoop-yarn-resourcemanager',
-  'YARN_CLIENT' : 'hadoop-yarn-client'
-}
-
-# server configurations
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-architecture = get_architecture()
-
-stack_name = status_params.stack_name
-stack_root = Script.get_stack_root()
-tarball_map = default("/configurations/cluster-env/tarball_map", None)
-
-config_path = os.path.join(stack_root, "current/hadoop-client/conf")
-config_dir = os.path.realpath(config_path)
-
-# This is expected to be of the form #.#.#.#
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-stack_version_formatted_major = format_stack_version(stack_version_unformatted)
-stack_version_formatted = functions.get_stack_version('hadoop-yarn-resourcemanager')
-
-stack_supports_ru = stack_version_formatted_major and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted_major)
-stack_supports_timeline_state_store = stack_version_formatted_major and check_stack_feature(StackFeature.TIMELINE_STATE_STORE, stack_version_formatted_major)
-
-# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade.
-# It cannot be used during the initial Cluser Install because the version is not yet known.
-version = default("/commandParams/version", None)
-
-# get the correct version to use for checking stack features
-version_for_stack_feature_checks = get_stack_feature_version(config)
-
-stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
-stack_supports_ranger_audit_db = check_stack_feature(StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks)
-
-hostname = config['hostname']
-
-# hadoop default parameters
-hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
-hadoop_bin = stack_select.get_hadoop_dir("sbin")
-hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
-hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-hadoop_yarn_home = '/usr/lib/hadoop-yarn'
-hadoop_mapred2_jar_location = "/usr/lib/hadoop-mapreduce"
-mapred_bin = "/usr/lib/hadoop-mapreduce/sbin"
-yarn_bin = "/usr/lib/hadoop-yarn/sbin"
-yarn_container_bin = "/usr/lib/hadoop-yarn/bin"
-hadoop_java_io_tmpdir = os.path.join(tmp_dir, "hadoop_java_io_tmpdir")
-
-# hadoop parameters stack supporting rolling_uprade
-if stack_supports_ru:
-  # MapR directory root
-  mapred_role_root = "hadoop-mapreduce-client"
-  command_role = default("/role", "")
-  if command_role in MAPR_SERVER_ROLE_DIRECTORY_MAP:
-    mapred_role_root = MAPR_SERVER_ROLE_DIRECTORY_MAP[command_role]
-
-  # YARN directory root
-  yarn_role_root = "hadoop-yarn-client"
-  if command_role in YARN_SERVER_ROLE_DIRECTORY_MAP:
-    yarn_role_root = YARN_SERVER_ROLE_DIRECTORY_MAP[command_role]
-
-  hadoop_mapred2_jar_location = format("{stack_root}/current/{mapred_role_root}")
-  mapred_bin = format("{stack_root}/current/{mapred_role_root}/sbin")
-
-  hadoop_yarn_home = format("{stack_root}/current/{yarn_role_root}")
-  yarn_bin = format("{stack_root}/current/{yarn_role_root}/sbin")
-  yarn_container_bin = format("{stack_root}/current/{yarn_role_root}/bin")
-
-if stack_supports_timeline_state_store:
-  # Timeline Service property that was added timeline_state_store stack feature
-  ats_leveldb_state_store_dir = config['configurations']['yarn-site']['yarn.timeline-service.leveldb-state-store.path']
-
-# ats 1.5 properties
-entity_groupfs_active_dir = config['configurations']['yarn-site']['yarn.timeline-service.entity-group-fs-store.active-dir']
-entity_groupfs_active_dir_mode = 01777
-entity_groupfs_store_dir = config['configurations']['yarn-site']['yarn.timeline-service.entity-group-fs-store.done-dir']
-entity_groupfs_store_dir_mode = 0700
-
-hadoop_conf_secure_dir = os.path.join(hadoop_conf_dir, "secure")
-
-limits_conf_dir = "/etc/security/limits.d"
-yarn_user_nofile_limit = default("/configurations/yarn-env/yarn_user_nofile_limit", "32768")
-yarn_user_nproc_limit = default("/configurations/yarn-env/yarn_user_nproc_limit", "65536")
-
-mapred_user_nofile_limit = default("/configurations/mapred-env/mapred_user_nofile_limit", "32768")
-mapred_user_nproc_limit = default("/configurations/mapred-env/mapred_user_nproc_limit", "65536")
-
-execute_path = os.environ['PATH'] + os.pathsep + hadoop_bin_dir + os.pathsep + yarn_container_bin
-
-ulimit_cmd = "ulimit -c unlimited;"
-
-mapred_user = status_params.mapred_user
-yarn_user = status_params.yarn_user
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-hdfs_tmp_dir = config['configurations']['hadoop-env']['hdfs_tmp_dir']
-
-smokeuser = config['configurations']['cluster-env']['smokeuser']
-smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
-smoke_hdfs_user_mode = 0770
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-nm_security_marker_dir = "/var/lib/hadoop-yarn"
-nm_security_marker = format('{nm_security_marker_dir}/nm_security_enabled')
-current_nm_security_state = os.path.isfile(nm_security_marker)
-toggle_nm_security = (current_nm_security_state and not security_enabled) or (not current_nm_security_state and security_enabled)
-smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-
-yarn_executor_container_group = config['configurations']['yarn-site']['yarn.nodemanager.linux-container-executor.group']
-yarn_nodemanager_container_executor_class =  config['configurations']['yarn-site']['yarn.nodemanager.container-executor.class']
-is_linux_container_executor = (yarn_nodemanager_container_executor_class == 'org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor')
-container_executor_mode = 06050 if is_linux_container_executor else 02050
-kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
-yarn_http_policy = config['configurations']['yarn-site']['yarn.http.policy']
-yarn_https_on = (yarn_http_policy.upper() == 'HTTPS_ONLY')
-rm_hosts = config['clusterHostInfo']['rm_host']
-rm_host = rm_hosts[0]
-rm_port = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'].split(':')[-1]
-rm_https_port = default('/configurations/yarn-site/yarn.resourcemanager.webapp.https.address', ":8090").split(':')[-1]
-# TODO UPGRADE default, update site during upgrade
-rm_nodes_exclude_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
-rm_nodes_exclude_dir = os.path.dirname(rm_nodes_exclude_path)
-
-java64_home = config['hostLevelParams']['java_home']
-hadoop_ssl_enabled = default("/configurations/core-site/hadoop.ssl.enabled", False)
-
-yarn_heapsize = config['configurations']['yarn-env']['yarn_heapsize']
-resourcemanager_heapsize = config['configurations']['yarn-env']['resourcemanager_heapsize']
-nodemanager_heapsize = config['configurations']['yarn-env']['nodemanager_heapsize']
-apptimelineserver_heapsize = default("/configurations/yarn-env/apptimelineserver_heapsize", 1024)
-ats_leveldb_dir = config['configurations']['yarn-site']['yarn.timeline-service.leveldb-timeline-store.path']
-ats_leveldb_lock_file = os.path.join(ats_leveldb_dir, "leveldb-timeline-store.ldb", "LOCK")
-yarn_log_dir_prefix = config['configurations']['yarn-env']['yarn_log_dir_prefix']
-yarn_pid_dir_prefix = status_params.yarn_pid_dir_prefix
-mapred_pid_dir_prefix = status_params.mapred_pid_dir_prefix
-mapred_log_dir_prefix = config['configurations']['mapred-env']['mapred_log_dir_prefix']
-mapred_env_sh_template = config['configurations']['mapred-env']['content']
-yarn_env_sh_template = config['configurations']['yarn-env']['content']
-yarn_nodemanager_recovery_dir = default('/configurations/yarn-site/yarn.nodemanager.recovery.dir', None)
-service_check_queue_name = default('/configurations/yarn-env/service_check.queue.name', 'default')
-
-if len(rm_hosts) > 1:
-  additional_rm_host = rm_hosts[1]
-  rm_webui_address = format("{rm_host}:{rm_port},{additional_rm_host}:{rm_port}")
-  rm_webui_https_address = format("{rm_host}:{rm_https_port},{additional_rm_host}:{rm_https_port}")
-else:
-  rm_webui_address = format("{rm_host}:{rm_port}")
-  rm_webui_https_address = format("{rm_host}:{rm_https_port}")
-
-if security_enabled:
-  tc_mode = 0644
-  tc_owner = "root"
-else:
-  tc_mode = None
-  tc_owner = hdfs_user
-
-nm_webui_address = config['configurations']['yarn-site']['yarn.nodemanager.webapp.address']
-hs_webui_address = config['configurations']['mapred-site']['mapreduce.jobhistory.webapp.address']
-nm_address = config['configurations']['yarn-site']['yarn.nodemanager.address']  # still contains 0.0.0.0
-if hostname and nm_address and nm_address.startswith("0.0.0.0:"):
-  nm_address = nm_address.replace("0.0.0.0", hostname)
-
-# Initialize lists of work directories.
-nm_local_dirs = default("/configurations/yarn-site/yarn.nodemanager.local-dirs", "")
-nm_log_dirs = default("/configurations/yarn-site/yarn.nodemanager.log-dirs", "")
-
-nm_local_dirs_list = nm_local_dirs.split(',')
-nm_log_dirs_list = nm_log_dirs.split(',')
-
-nm_log_dir_to_mount_file = "/var/lib/ambari-agent/data/yarn/yarn_log_dir_mount.hist"
-nm_local_dir_to_mount_file = "/var/lib/ambari-agent/data/yarn/yarn_local_dir_mount.hist"
-
-distrAppJarName = "hadoop-yarn-applications-distributedshell-2.*.jar"
-hadoopMapredExamplesJarName = "hadoop-mapreduce-examples-2.*.jar"
-
-entity_file_history_directory = "/tmp/entity-file-history/active"
-
-yarn_pid_dir = status_params.yarn_pid_dir
-mapred_pid_dir = status_params.mapred_pid_dir
-
-mapred_log_dir = format("{mapred_log_dir_prefix}/{mapred_user}")
-yarn_log_dir = format("{yarn_log_dir_prefix}/{yarn_user}")
-mapred_job_summary_log = format("{mapred_log_dir_prefix}/{mapred_user}/hadoop-mapreduce.jobsummary.log")
-yarn_job_summary_log = format("{yarn_log_dir_prefix}/{yarn_user}/hadoop-mapreduce.jobsummary.log")
-
-user_group = config['configurations']['cluster-env']['user_group']
-
-#exclude file
-exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
-exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
-
-ats_host = set(default("/clusterHostInfo/app_timeline_server_hosts", []))
-has_ats = not len(ats_host) == 0
-
-nm_hosts = default("/clusterHostInfo/nm_hosts", [])
-
-# don't using len(nm_hosts) here, because check can take too much time on large clusters
-number_of_nm = 1
-
-# default kinit commands
-rm_kinit_cmd = ""
-yarn_timelineservice_kinit_cmd = ""
-nodemanager_kinit_cmd = ""
-
-if security_enabled:
-  rm_principal_name = config['configurations']['yarn-site']['yarn.resourcemanager.principal']
-  rm_principal_name = rm_principal_name.replace('_HOST',hostname.lower())
-  rm_keytab = config['configurations']['yarn-site']['yarn.resourcemanager.keytab']
-  rm_kinit_cmd = format("{kinit_path_local} -kt {rm_keytab} {rm_principal_name};")
-
-  # YARN timeline security options
-  if has_ats:
-    _yarn_timelineservice_principal_name = config['configurations']['yarn-site']['yarn.timeline-service.principal']
-    _yarn_timelineservice_principal_name = _yarn_timelineservice_principal_name.replace('_HOST', hostname.lower())
-    _yarn_timelineservice_keytab = config['configurations']['yarn-site']['yarn.timeline-service.keytab']
-    yarn_timelineservice_kinit_cmd = format("{kinit_path_local} -kt {_yarn_timelineservice_keytab} {_yarn_timelineservice_principal_name};")
-
-  if 'yarn.nodemanager.principal' in config['configurations']['yarn-site']:
-    _nodemanager_principal_name = default('/configurations/yarn-site/yarn.nodemanager.principal', None)
-    if _nodemanager_principal_name:
-      _nodemanager_principal_name = _nodemanager_principal_name.replace('_HOST', hostname.lower())
-
-    _nodemanager_keytab = config['configurations']['yarn-site']['yarn.nodemanager.keytab']
-    nodemanager_kinit_cmd = format("{kinit_path_local} -kt {_nodemanager_keytab} {_nodemanager_principal_name};")
-
-
-yarn_log_aggregation_enabled = config['configurations']['yarn-site']['yarn.log-aggregation-enable']
-yarn_nm_app_log_dir =  config['configurations']['yarn-site']['yarn.nodemanager.remote-app-log-dir']
-mapreduce_jobhistory_intermediate_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.intermediate-done-dir']
-mapreduce_jobhistory_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.done-dir']
-jobhistory_heapsize = default("/configurations/mapred-env/jobhistory_heapsize", "900")
-jhs_leveldb_state_store_dir = default('/configurations/mapred-site/mapreduce.jobhistory.recovery.store.leveldb.path', "/hadoop/mapreduce/jhs")
-
-# Tez-related properties
-tez_user = config['configurations']['tez-env']['tez_user']
-
-# Tez jars
-tez_local_api_jars = '/usr/lib/tez/tez*.jar'
-tez_local_lib_jars = '/usr/lib/tez/lib/*.jar'
-app_dir_files = {tez_local_api_jars:None}
-
-# Tez libraries
-tez_lib_uris = default("/configurations/tez-site/tez.lib.uris", None)
-
-#for create_hdfs_directory
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
-
-
-
-hdfs_site = config['configurations']['hdfs-site']
-default_fs = config['configurations']['core-site']['fs.defaultFS']
-is_webhdfs_enabled = hdfs_site['dfs.webhdfs.enabled']
-
-# Path to file that contains list of HDFS resources to be skipped during processing
-hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore"
-
-dfs_type = default("/commandParams/dfs_type", "")
-
-
-import functools
-#create partial functions with common arguments for every HdfsResource call
-#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
-HdfsResource = functools.partial(
-  HdfsResource,
-  user=hdfs_user,
-  hdfs_resource_ignore_file = hdfs_resource_ignore_file,
-  security_enabled = security_enabled,
-  keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local,
-  hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir,
-  principal_name = hdfs_principal_name,
-  hdfs_site = hdfs_site,
-  default_fs = default_fs,
-  immutable_paths = get_not_managed_resources(),
-  dfs_type = dfs_type
- )
-update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
-
-mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
-
-#taskcontroller.cfg
-
-mapred_local_dir = "/tmp/hadoop-mapred/mapred/local"
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-min_user_id = config['configurations']['yarn-env']['min_user_id']
-
-# Node labels
-node_labels_dir = default("/configurations/yarn-site/yarn.node-labels.fs-store.root-dir", None)
-node_label_enable = config['configurations']['yarn-site']['yarn.node-labels.enabled']
-
-cgroups_dir = "/cgroups_test/cpu"
-
-# ***********************  RANGER PLUGIN CHANGES ***********************
-# ranger host
-ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
-has_ranger_admin = not len(ranger_admin_hosts) == 0
-xml_configurations_supported = config['configurations']['ranger-env']['xml_configurations_supported']
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-# hostname of the active HDFS HA Namenode (only used when HA is enabled)
-dfs_ha_namenode_active = default("/configurations/hadoop-env/dfs_ha_initial_namenode_active", None)
-if dfs_ha_namenode_active is not None: 
-  namenode_hostname = dfs_ha_namenode_active
-else:
-  namenode_hostname = config['clusterHostInfo']['namenode_host'][0]
-
-ranger_admin_log_dir = default("/configurations/ranger-env/ranger_admin_log_dir","/var/log/ranger/admin")
-
-scheme = 'http' if not yarn_https_on else 'https'
-yarn_rm_address = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'] if not yarn_https_on else config['configurations']['yarn-site']['yarn.resourcemanager.webapp.https.address']
-rm_active_port = rm_https_port if yarn_https_on else rm_port
-
-rm_ha_enabled = False
-rm_ha_ids_list = []
-rm_webapp_addresses_list = [yarn_rm_address]
-rm_ha_ids = default("/configurations/yarn-site/yarn.resourcemanager.ha.rm-ids", None)
-
-if rm_ha_ids:
-  rm_ha_ids_list = rm_ha_ids.split(",")
-  if len(rm_ha_ids_list) > 1:
-    rm_ha_enabled = True
-
-if rm_ha_enabled:
-  rm_webapp_addresses_list = []
-  for rm_id in rm_ha_ids_list:
-    rm_webapp_address_property = format('yarn.resourcemanager.webapp.address.{rm_id}') if not yarn_https_on else format('yarn.resourcemanager.webapp.https.address.{rm_id}')
-    rm_webapp_address = config['configurations']['yarn-site'][rm_webapp_address_property]
-    rm_webapp_addresses_list.append(rm_webapp_address)
-
-#ranger yarn properties
-if has_ranger_admin:
-  is_supported_yarn_ranger = config['configurations']['yarn-env']['is_supported_yarn_ranger']
-
-  if is_supported_yarn_ranger:
-    enable_ranger_yarn = (config['configurations']['ranger-yarn-plugin-properties']['ranger-yarn-plugin-enabled'].lower() == 'yes')
-    policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
-    if 'admin-properties' in config['configurations'] and 'policymgr_external_url' in config['configurations']['admin-properties'] and policymgr_mgr_url.endswith('/'):
-      policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
-    xa_audit_db_flavor = (config['configurations']['admin-properties']['DB_FLAVOR']).lower()
-    xa_audit_db_name = default('/configurations/admin-properties/audit_db_name', 'ranger_audits')
-    xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
-    xa_audit_db_password = ''
-    if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db:
-      xa_audit_db_password = unicode(config['configurations']['admin-properties']['audit_db_password'])
-    xa_db_host = config['configurations']['admin-properties']['db_host']
-    repo_name = str(config['clusterName']) + '_yarn'
-
-    ranger_env = config['configurations']['ranger-env']
-    ranger_plugin_properties = config['configurations']['ranger-yarn-plugin-properties']
-    policy_user = config['configurations']['ranger-yarn-plugin-properties']['policy_user']
-    yarn_rest_url = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address']  
-
-    ranger_plugin_config = {
-      'username' : config['configurations']['ranger-yarn-plugin-properties']['REPOSITORY_CONFIG_USERNAME'],
-      'password' : unicode(config['configurations']['ranger-yarn-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']),
-      'yarn.url' : format('{scheme}://{yarn_rest_url}'),
-      'commonNameForCertificate' : config['configurations']['ranger-yarn-plugin-properties']['common.name.for.certificate']
-    }
-
-    yarn_ranger_plugin_repo = {
-      'isEnabled': 'true',
-      'configs': ranger_plugin_config,
-      'description': 'yarn repo',
-      'name': repo_name,
-      'repositoryType': 'yarn',
-      'type': 'yarn',
-      'assetType': '1'
-    }
-
-    if stack_supports_ranger_kerberos:
-      ranger_plugin_config['ambari.service.check.user'] = policy_user
-      ranger_plugin_config['hadoop.security.authentication'] = 'kerberos' if security_enabled else 'simple'
-
-    if stack_supports_ranger_kerberos and security_enabled:
-      ranger_plugin_config['policy.download.auth.users'] = yarn_user
-      ranger_plugin_config['tag.download.auth.users'] = yarn_user
-
-    #For curl command in ranger plugin to get db connector
-    jdk_location = config['hostLevelParams']['jdk_location']
-    java_share_dir = '/usr/share/java'
-    previous_jdbc_jar_name = None
-    if stack_supports_ranger_audit_db:
-      if xa_audit_db_flavor and xa_audit_db_flavor == 'mysql':
-        jdbc_jar_name = default("/hostLevelParams/custom_mysql_jdbc_name", None)
-        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
-        audit_jdbc_url = format('jdbc:mysql://{xa_db_host}/{xa_audit_db_name}')
-        jdbc_driver = "com.mysql.jdbc.Driver"
-      elif xa_audit_db_flavor and xa_audit_db_flavor == 'oracle':
-        jdbc_jar_name = default("/hostLevelParams/custom_oracle_jdbc_name", None)
-        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
-        colon_count = xa_db_host.count(':')
-        if colon_count == 2 or colon_count == 0:
-          audit_jdbc_url = format('jdbc:oracle:thin:@{xa_db_host}')
-        else:
-          audit_jdbc_url = format('jdbc:oracle:thin:@//{xa_db_host}')
-        jdbc_driver = "oracle.jdbc.OracleDriver"
-      elif xa_audit_db_flavor and xa_audit_db_flavor == 'postgres':
-        jdbc_jar_name = default("/hostLevelParams/custom_postgres_jdbc_name", None)
-        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_postgres_jdbc_name", None)
-        audit_jdbc_url = format('jdbc:postgresql://{xa_db_host}/{xa_audit_db_name}')
-        jdbc_driver = "org.postgresql.Driver"
-      elif xa_audit_db_flavor and xa_audit_db_flavor == 'mssql':
-        jdbc_jar_name = default("/hostLevelParams/custom_mssql_jdbc_name", None)
-        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
-        audit_jdbc_url = format('jdbc:sqlserver://{xa_db_host};databaseName={xa_audit_db_name}')
-        jdbc_driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
-      elif xa_audit_db_flavor and xa_audit_db_flavor == 'sqla':
-        jdbc_jar_name = default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
-        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_sqlanywhere_jdbc_name", None)
-        audit_jdbc_url = format('jdbc:sqlanywhere:database={xa_audit_db_name};host={xa_db_host}')
-        jdbc_driver = "sap.jdbc4.sqlanywhere.IDriver"
-
-    downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-    driver_curl_source = format("{jdk_location}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-    driver_curl_target = format("{hadoop_yarn_home}/lib/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-    previous_jdbc_jar = format("{hadoop_yarn_home}/lib/{previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-
-    xa_audit_db_is_enabled = False
-    ranger_audit_solr_urls = config['configurations']['ranger-admin-site']['ranger.audit.solr.urls']
-    if xml_configurations_supported and stack_supports_ranger_audit_db:
-      xa_audit_db_is_enabled = config['configurations']['ranger-yarn-audit']['xasecure.audit.destination.db']
-    xa_audit_hdfs_is_enabled = config['configurations']['ranger-yarn-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else None
-    ssl_keystore_password = unicode(config['configurations']['ranger-yarn-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password']) if xml_configurations_supported else None
-    ssl_truststore_password = unicode(config['configurations']['ranger-yarn-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password']) if xml_configurations_supported else None
-    credential_file = format('/etc/ranger/{repo_name}/cred.jceks') if xml_configurations_supported else None
-
-    #For SQLA explicitly disable audit to DB for Ranger
-    if xa_audit_db_flavor == 'sqla':
-      xa_audit_db_is_enabled = False

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_windows.py
deleted file mode 100644
index 52918d2e..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_windows.py
+++ /dev/null
@@ -1,62 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions.default import default
-from resource_management.libraries.functions.format import format
-from resource_management.libraries import functions
-from resource_management.libraries.functions import is_empty
-import os
-from status_params import *
-
-# server configurations
-config = Script.get_config()
-
-hadoop_user = config["configurations"]["cluster-env"]["hadoop.user.name"]
-yarn_user = hadoop_user
-hdfs_user = hadoop_user
-smokeuser = hadoop_user
-config_dir = os.environ["HADOOP_CONF_DIR"]
-hadoop_home = os.environ["HADOOP_HOME"]
-
-yarn_home = os.environ["HADOOP_YARN_HOME"]
-
-hadoop_ssl_enabled = default("/configurations/core-site/hadoop.ssl.enabled", False)
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
-smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
-kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
-rm_host = config['clusterHostInfo']['rm_host'][0]
-rm_port = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'].split(':')[-1]
-rm_https_port = "8090"
-rm_webui_address = format("{rm_host}:{rm_port}")
-rm_webui_https_address = format("{rm_host}:{rm_https_port}")
-
-hs_host = config['clusterHostInfo']['hs_host'][0]
-hs_port = config['configurations']['mapred-site']['mapreduce.jobhistory.webapp.address'].split(':')[-1]
-hs_webui_address = format("{hs_host}:{hs_port}")
-
-hadoop_mapred2_jar_location = os.path.join(os.environ["HADOOP_COMMON_HOME"], "share", "hadoop", "mapreduce")
-hadoopMapredExamplesJarName = "hadoop-mapreduce-examples-2.*.jar"
-
-exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
-exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
-update_exclude_file_only = config['commandParams']['update_exclude_file_only']

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/resourcemanager.py
deleted file mode 100644
index e053fe6..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/resourcemanager.py
+++ /dev/null
@@ -1,293 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.check_process_status import check_process_status
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.security_commons import build_expectations, \
-  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
-  FILE_TYPE_XML
-from resource_management.libraries.functions.decorator import retry
-from resource_management.core.resources.system import File, Execute
-from resource_management.core.source import Template
-from resource_management.core.logger import Logger
-from resource_management.core.exceptions import Fail
-from resource_management.libraries.providers.hdfs_resource import WebHDFSUtil
-from resource_management.libraries.providers.hdfs_resource import HdfsResourceProvider
-from resource_management import is_empty
-from resource_management import shell
-
-
-from yarn import yarn
-from service import service
-from ambari_commons import OSConst
-from ambari_commons.os_family_impl import OsFamilyImpl
-from setup_ranger_yarn import setup_ranger_yarn
-
-
-class Resourcemanager(Script):
-  def install(self, env):
-    self.install_packages(env)
-
-  def stop(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    service('resourcemanager', action='stop')
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    yarn(name='resourcemanager')
-
-  def refreshqueues(self, env):
-    pass
-
-
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class ResourcemanagerWindows(Resourcemanager):
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env)
-    service('resourcemanager', action='start')
-
-  def status(self, env):
-    service('resourcemanager', action='status')
-
-  def decommission(self, env):
-    import params
-
-    env.set_params(params)
-    yarn_user = params.yarn_user
-
-    yarn_refresh_cmd = format("cmd /c yarn rmadmin -refreshNodes")
-
-    File(params.exclude_file_path,
-         content=Template("exclude_hosts_list.j2"),
-         owner=yarn_user,
-         mode="f"
-    )
-
-    if params.update_exclude_file_only == False:
-      Execute(yarn_refresh_cmd, user=yarn_user)
-
-
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class ResourcemanagerDefault(Resourcemanager):
-  def get_component_name(self):
-    return "hadoop-yarn-resourcemanager"
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    Logger.info("Executing Stack Upgrade post-restart")
-    import params
-    env.set_params(params)
-
-    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      conf_select.select(params.stack_name, "hadoop", params.version)
-      stack_select.select("hadoop-yarn-resourcemanager", params.version)
-
-  def start(self, env, upgrade_type=None):
-    import params
-
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    if params.has_ranger_admin and params.is_supported_yarn_ranger:
-      setup_ranger_yarn() #Ranger Yarn Plugin related calls
-
-    # wait for active-dir and done-dir to be created by ATS if needed
-    if params.has_ats:
-      Logger.info("Verifying DFS directories where ATS stores time line data for active and completed applications.")
-      self.wait_for_dfs_directories_created(params.entity_groupfs_store_dir, params.entity_groupfs_active_dir)
-
-    service('resourcemanager', action='start')
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    check_process_status(status_params.resourcemanager_pid_file)
-    pass
-
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"yarn.timeline-service.http-authentication.type": "kerberos",
-                           "yarn.acl.enable": "true"}
-      props_empty_check = ["yarn.resourcemanager.principal",
-                           "yarn.resourcemanager.keytab",
-                           "yarn.resourcemanager.webapp.spnego-principal",
-                           "yarn.resourcemanager.webapp.spnego-keytab-file"]
-
-      props_read_check = ["yarn.resourcemanager.keytab",
-                          "yarn.resourcemanager.webapp.spnego-keytab-file"]
-      yarn_site_props = build_expectations('yarn-site', props_value_check, props_empty_check,
-                                           props_read_check)
-
-      yarn_expectations ={}
-      yarn_expectations.update(yarn_site_props)
-
-      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'yarn-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, yarn_site_props)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'yarn-site' not in security_params
-               or 'yarn.resourcemanager.keytab' not in security_params['yarn-site']
-               or 'yarn.resourcemanager.principal' not in security_params['yarn-site']) \
-            or 'yarn.resourcemanager.webapp.spnego-keytab-file' not in security_params['yarn-site'] \
-            or 'yarn.resourcemanager.webapp.spnego-principal' not in security_params['yarn-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.resourcemanager.keytab'],
-                                security_params['yarn-site']['yarn.resourcemanager.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.resourcemanager.webapp.spnego-keytab-file'],
-                                security_params['yarn-site']['yarn.resourcemanager.webapp.spnego-principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
-  def refreshqueues(self, env):
-    import params
-
-    self.configure(env)
-    env.set_params(params)
-
-    service('resourcemanager',
-            action='refreshQueues'
-    )
-
-  def decommission(self, env):
-    import params
-
-    env.set_params(params)
-    rm_kinit_cmd = params.rm_kinit_cmd
-    yarn_user = params.yarn_user
-    conf_dir = params.hadoop_conf_dir
-    user_group = params.user_group
-
-    yarn_refresh_cmd = format("{rm_kinit_cmd} yarn --config {conf_dir} rmadmin -refreshNodes")
-
-    File(params.exclude_file_path,
-         content=Template("exclude_hosts_list.j2"),
-         owner=yarn_user,
-         group=user_group
-    )
-
-    if params.update_exclude_file_only == False:
-      Execute(yarn_refresh_cmd,
-            environment= {'PATH' : params.execute_path },
-            user=yarn_user)
-      pass
-    pass
-
-
-
-
-  def wait_for_dfs_directories_created(self, *dirs):
-    import params
-
-    ignored_dfs_dirs = HdfsResourceProvider.get_ignored_resources_list(params.hdfs_resource_ignore_file)
-
-    if params.security_enabled:
-      Execute(params.rm_kinit_cmd,
-              user=params.yarn_user
-      )
-      Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
-        user=params.hdfs_user
-      )
-
-    for dir_path in dirs:
-      self.wait_for_dfs_directory_created(dir_path, ignored_dfs_dirs)
-
-
-  @retry(times=8, sleep_time=20, backoff_factor=1, err_class=Fail)
-  def wait_for_dfs_directory_created(self, dir_path, ignored_dfs_dirs):
-    import params
-
-
-    if not is_empty(dir_path):
-      dir_path = HdfsResourceProvider.parse_path(dir_path)
-
-      if dir_path in ignored_dfs_dirs:
-        Logger.info("Skipping DFS directory '" + dir_path + "' as it's marked to be ignored.")
-        return
-
-      Logger.info("Verifying if DFS directory '" + dir_path + "' exists.")
-
-      dir_exists = None
-
-      if WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
-        # check with webhdfs is much faster than executing hdfs dfs -test
-        util = WebHDFSUtil(params.hdfs_site, params.hdfs_user, params.security_enabled)
-        list_status = util.run_command(dir_path, 'GETFILESTATUS', method='GET', ignore_status_codes=['404'], assertable_result=False)
-        dir_exists = ('FileStatus' in list_status)
-      else:
-        # have to do time expensive hdfs dfs -d check.
-        dfs_ret_code = shell.call(format("hdfs --config {hadoop_conf_dir} dfs -test -d " + dir_path), user=params.yarn_user)[0]
-        dir_exists = not dfs_ret_code #dfs -test -d returns 0 in case the dir exists
-
-      if not dir_exists:
-        raise Fail("DFS directory '" + dir_path + "' does not exist !")
-      else:
-        Logger.info("DFS directory '" + dir_path + "' exists.")
-
-  def get_log_folder(self):
-    import params
-    return params.yarn_log_dir
-  
-  def get_user(self):
-    import params
-    return params.yarn_user
-
-  def get_pid_files(self):
-    import status_params
-    return [status_params.resourcemanager_pid_file]
-  
-if __name__ == "__main__":
-  Resourcemanager().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/service.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/service.py
deleted file mode 100644
index 78b2428..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/service.py
+++ /dev/null
@@ -1,106 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
-from ambari_commons import OSConst
-from resource_management.core.shell import as_user, as_sudo
-from resource_management.libraries.functions.show_logs import show_logs
-from resource_management.libraries.functions.format import format
-from resource_management.core.resources.system import Execute, File
-
-@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
-def service(componentName, action='start', serviceName='yarn'):
-  import status_params
-  if status_params.service_map.has_key(componentName):
-    service_name = status_params.service_map[componentName]
-    if action == 'start' or action == 'stop':
-      Service(service_name, action=action)
-    elif action == 'status':
-      check_windows_service_status(service_name)
-
-
-@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def service(componentName, action='start', serviceName='yarn'):
-  import params
-
-  if serviceName == 'mapreduce' and componentName == 'historyserver':
-    delete_pid_file = True
-    daemon = format("{mapred_bin}/mr-jobhistory-daemon.sh")
-    pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-{componentName}.pid")
-    usr = params.mapred_user
-    log_dir = params.mapred_log_dir
-  else:
-    # !!! yarn-daemon.sh deletes the PID for us; if we remove it the script
-    # may not work correctly when stopping the service
-    delete_pid_file = False
-    daemon = format("{yarn_bin}/yarn-daemon.sh")
-    pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-{componentName}.pid")
-    usr = params.yarn_user
-    log_dir = params.yarn_log_dir
-
-  cmd = format("export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {daemon} --config {hadoop_conf_dir}")
-
-  if action == 'start':
-    daemon_cmd = format("{ulimit_cmd} {cmd} start {componentName}")
-    check_process = as_sudo(["test", "-f", pid_file]) + " && " + as_sudo(["pgrep", "-F", pid_file])
-
-    # Remove the pid file if its corresponding process is not running.
-    File(pid_file, action = "delete", not_if = check_process)
-
-    if componentName == 'timelineserver' and serviceName == 'yarn':
-      File(params.ats_leveldb_lock_file,
-         action = "delete",
-         only_if = format("ls {params.ats_leveldb_lock_file}"),
-         not_if = check_process,
-         ignore_failures = True
-      )
-
-    try:
-      # Attempt to start the process. Internally, this is skipped if the process is already running.
-      Execute(daemon_cmd, user = usr, not_if = check_process)
-  
-      # Ensure that the process with the expected PID exists.
-      Execute(check_process,
-              not_if = check_process,
-              tries=5,
-              try_sleep=1,
-      )
-    except:
-      show_logs(log_dir, usr)
-      raise
-
-  elif action == 'stop':
-    daemon_cmd = format("{cmd} stop {componentName}")
-    try:
-      Execute(daemon_cmd, user=usr)
-    except:
-      show_logs(log_dir, usr)
-      raise
-
-    # !!! yarn-daemon doesn't need us to delete PIDs
-    if delete_pid_file is True:
-      File(pid_file, action="delete")
-
-
-  elif action == 'refreshQueues':
-    rm_kinit_cmd = params.rm_kinit_cmd
-    refresh_cmd = format("{rm_kinit_cmd} export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {yarn_container_bin}/yarn rmadmin -refreshQueues")
-    Execute(refresh_cmd, user=usr)

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/service_check.py
deleted file mode 100644
index b934767..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/service_check.py
+++ /dev/null
@@ -1,185 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
-import re
-import subprocess
-from ambari_commons import os_utils
-from ambari_commons import OSConst
-from ambari_commons.os_family_impl import OsFamilyImpl
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions.constants import StackFeature
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.get_user_call_output import get_user_call_output
-from resource_management.core.exceptions import Fail
-from resource_management.core.logger import Logger
-from resource_management.core.resources.system import Execute, File
-from resource_management.core.source import StaticFile
-from resource_management.core import shell
-
-CURL_CONNECTION_TIMEOUT = '5'
-
-class ServiceCheck(Script):
-  def service_check(self, env):
-    pass
-
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class ServiceCheckWindows(ServiceCheck):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    yarn_exe = os_utils.quote_path(os.path.join(params.yarn_home, "bin", "yarn.cmd"))
-
-    run_yarn_check_cmd = "cmd /C %s node -list" % yarn_exe
-
-    component_type = 'rm'
-    if params.hadoop_ssl_enabled:
-      component_address = params.rm_webui_https_address
-    else:
-      component_address = params.rm_webui_address
-
-    #temp_dir = os.path.abspath(os.path.join(params.hadoop_home, os.pardir)), "/tmp"
-    temp_dir = os.path.join(os.path.dirname(params.hadoop_home), "temp")
-    validateStatusFileName = "validateYarnComponentStatusWindows.py"
-    validateStatusFilePath = os.path.join(temp_dir, validateStatusFileName)
-    python_executable = sys.executable
-    validateStatusCmd = "%s %s %s -p %s -s %s" % (python_executable, validateStatusFilePath, component_type, component_address, params.hadoop_ssl_enabled)
-
-    if params.security_enabled:
-      kinit_cmd = "%s -kt %s %s;" % (params.kinit_path_local, params.smoke_user_keytab, params.smokeuser)
-      smoke_cmd = kinit_cmd + ' ' + validateStatusCmd
-    else:
-      smoke_cmd = validateStatusCmd
-
-    File(validateStatusFilePath,
-         content=StaticFile(validateStatusFileName)
-    )
-
-    Execute(smoke_cmd,
-            tries=3,
-            try_sleep=5,
-            logoutput=True
-    )
-
-    Execute(run_yarn_check_cmd, logoutput=True)
-
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class ServiceCheckDefault(ServiceCheck):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    params.HdfsResource(format("/user/{smokeuser}"),
-                        type="directory",
-                        action="create_on_execute",
-                        owner=params.smokeuser,
-                        mode=params.smoke_hdfs_user_mode,
-                        )
-
-    if params.stack_version_formatted_major and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.stack_version_formatted_major):
-      path_to_distributed_shell_jar = format("{stack_root}/current/hadoop-yarn-client/hadoop-yarn-applications-distributedshell.jar")
-    else:
-      path_to_distributed_shell_jar = "/usr/lib/hadoop-yarn/hadoop-yarn-applications-distributedshell*.jar"
-
-    yarn_distrubuted_shell_check_params = ["yarn org.apache.hadoop.yarn.applications.distributedshell.Client",
-                                           "-shell_command", "ls", "-num_containers", "{number_of_nm}",
-                                           "-jar", "{path_to_distributed_shell_jar}", "-timeout", "300000",
-                                           "--queue", "{service_check_queue_name}"]
-    yarn_distrubuted_shell_check_cmd = format(" ".join(yarn_distrubuted_shell_check_params))
-
-    if params.security_enabled:
-      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
-      smoke_cmd = format("{kinit_cmd} {yarn_distrubuted_shell_check_cmd}")
-    else:
-      smoke_cmd = yarn_distrubuted_shell_check_cmd
-
-    return_code, out = shell.checked_call(smoke_cmd,
-                                          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-                                          user=params.smokeuser,
-                                          )
-
-    m = re.search("appTrackingUrl=(.*),\s", out)
-    app_url = m.group(1)
-
-    splitted_app_url = str(app_url).split('/')
-
-    for item in splitted_app_url:
-      if "application" in item:
-        application_name = item
-
-    # Find out the active RM from RM list
-    # Raise an exception if the active rm cannot be determined
-    active_rm_webapp_address = self.get_active_rm_webapp_address()
-    Logger.info("Active Resource Manager web app address is : " + active_rm_webapp_address);
-
-    # Verify job state from active resource manager via rest api
-    info_app_url = params.scheme + "://" + active_rm_webapp_address + "/ws/v1/cluster/apps/" + application_name
-    get_app_info_cmd = "curl --negotiate -u : -ks --location-trusted --connect-timeout " + CURL_CONNECTION_TIMEOUT + " " + info_app_url
-
-    return_code, stdout, _ = get_user_call_output(get_app_info_cmd,
-                                                  user=params.smokeuser,
-                                                  path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-                                                  )
-
-    try:
-      json_response = json.loads(stdout)
-    except Exception as e:
-      raise Fail(format("Response from YARN API was not a valid JSON. Response: {stdout}"))
-
-    if json_response is None or 'app' not in json_response or \
-            'state' not in json_response['app'] or 'finalStatus' not in json_response['app']:
-      raise Fail("Application " + app_url + " returns invalid data.")
-
-    if json_response['app']['state'] != "FINISHED" or json_response['app']['finalStatus'] != "SUCCEEDED":
-      raise Fail("Application " + app_url + " state/status is not valid. Should be FINISHED/SUCCEEDED.")
-
-  def get_active_rm_webapp_address(self):
-    import params
-    active_rm_webapp_address = None
-    rm_webapp_addresses = params.rm_webapp_addresses_list
-    if rm_webapp_addresses is not None and len(rm_webapp_addresses) > 0:
-      for rm_webapp_address in rm_webapp_addresses:
-        rm_state_url = params.scheme + "://" + rm_webapp_address + "/ws/v1/cluster/info"
-        get_cluster_info_cmd = "curl --negotiate -u : -ks --location-trusted --connect-timeout " + CURL_CONNECTION_TIMEOUT + " " + rm_state_url
-        try:
-          return_code, stdout, _ = get_user_call_output(get_cluster_info_cmd,
-                                                        user=params.smokeuser,
-                                                        path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-                                                        )
-          json_response = json.loads(stdout)
-          if json_response is not None and 'clusterInfo' in json_response \
-            and json_response['clusterInfo']['haState'] == "ACTIVE":
-              active_rm_webapp_address = rm_webapp_address
-              break
-        except Exception as e:
-          Logger.warning(format("Cluster info is not available from calling {get_cluster_info_cmd}"))
-
-    if active_rm_webapp_address is None:
-      raise Fail('Resource Manager state is not available. Failed to determine the active Resource Manager web application address from {0}'.format(','.join(rm_webapp_addresses)));
-    return active_rm_webapp_address
-
-if __name__ == "__main__":
-  ServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/setup_ranger_yarn.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/setup_ranger_yarn.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/setup_ranger_yarn.py
deleted file mode 100644
index 6ea7f82..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/setup_ranger_yarn.py
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-    http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-from resource_management.core.logger import Logger
-
-def setup_ranger_yarn():
-  import params
-
-  if params.has_ranger_admin:
-
-    from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_ranger_plugin
-
-    if params.retryAble:
-      Logger.info("YARN: Setup ranger: command retry enables thus retrying if ranger admin is down !")
-    else:
-      Logger.info("YARN: Setup ranger: command retry not enabled thus skipping if ranger admin is down !")
-
-    if params.xml_configurations_supported and params.enable_ranger_yarn and params.xa_audit_hdfs_is_enabled:
-      params.HdfsResource("/ranger/audit",
-                         type="directory",
-                         action="create_on_execute",
-                         owner=params.hdfs_user,
-                         group=params.hdfs_user,
-                         mode=0755,
-                         recursive_chmod=True
-      )
-      params.HdfsResource("/ranger/audit/yarn",
-                         type="directory",
-                         action="create_on_execute",
-                         owner=params.yarn_user,
-                         group=params.yarn_user,
-                         mode=0700,
-                         recursive_chmod=True
-      )
-      params.HdfsResource(None, action="execute")
-
-    setup_ranger_plugin('hadoop-yarn-resourcemanager', 'yarn', params.previous_jdbc_jar,
-                        params.downloaded_custom_connector, params.driver_curl_source,
-                        params.driver_curl_target, params.java64_home,
-                        params.repo_name, params.yarn_ranger_plugin_repo,
-                        params.ranger_env, params.ranger_plugin_properties,
-                        params.policy_user, params.policymgr_mgr_url,
-                        params.enable_ranger_yarn, conf_dict=params.hadoop_conf_dir,
-                        component_user=params.yarn_user, component_group=params.user_group, cache_service_list=['yarn'],
-                        plugin_audit_properties=params.config['configurations']['ranger-yarn-audit'], plugin_audit_attributes=params.config['configuration_attributes']['ranger-yarn-audit'],
-                        plugin_security_properties=params.config['configurations']['ranger-yarn-security'], plugin_security_attributes=params.config['configuration_attributes']['ranger-yarn-security'],
-                        plugin_policymgr_ssl_properties=params.config['configurations']['ranger-yarn-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configuration_attributes']['ranger-yarn-policymgr-ssl'],
-                        component_list=['hadoop-yarn-resourcemanager'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
-                        credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password, 
-                        ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
-                        api_version = 'v2', skip_if_rangeradmin_down= not params.retryAble,
-                        is_security_enabled = params.security_enabled,
-                        is_stack_supports_ranger_kerberos = params.stack_supports_ranger_kerberos,
-                        component_user_principal=params.rm_principal_name if params.security_enabled else None,
-                        component_user_keytab=params.rm_keytab if params.security_enabled else None
-      )
-  else:
-    Logger.info('Ranger admin not installed')

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/status_params.py
deleted file mode 100644
index c2e9d92..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/status_params.py
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from resource_management.libraries.script.script import Script
-from resource_management.libraries import functions
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions.default import default
-from ambari_commons import OSCheck
-
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-if OSCheck.is_windows_family():
-  resourcemanager_win_service_name = 'resourcemanager'
-  nodemanager_win_service_name = 'nodemanager'
-  historyserver_win_service_name = 'historyserver'
-  timelineserver_win_service_name = 'timelineserver'
-
-  service_map = {
-    'resourcemanager' : resourcemanager_win_service_name,
-    'nodemanager' : nodemanager_win_service_name,
-    'historyserver' : historyserver_win_service_name,
-    'timelineserver' : timelineserver_win_service_name
-  }
-else:
-  mapred_user = config['configurations']['mapred-env']['mapred_user']
-  yarn_user = config['configurations']['yarn-env']['yarn_user']
-  yarn_pid_dir_prefix = config['configurations']['yarn-env']['yarn_pid_dir_prefix']
-  mapred_pid_dir_prefix = config['configurations']['mapred-env']['mapred_pid_dir_prefix']
-  yarn_pid_dir = format("{yarn_pid_dir_prefix}/{yarn_user}")
-  mapred_pid_dir = format("{mapred_pid_dir_prefix}/{mapred_user}")
-
-  resourcemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-resourcemanager.pid")
-  nodemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-nodemanager.pid")
-  yarn_historyserver_pid_file_old = format("{yarn_pid_dir}/yarn-{yarn_user}-historyserver.pid")
-  yarn_historyserver_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-timelineserver.pid")  # *-historyserver.pid is deprecated
-  mapred_historyserver_pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-historyserver.pid")
-
-  hadoop_conf_dir = functions.conf_select.get_hadoop_conf_dir()
-
-  hostname = config['hostname']
-  kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
-  security_enabled = config['configurations']['cluster-env']['security_enabled']
-
-stack_name = default("/hostLevelParams/stack_name", None)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/yarn.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/yarn.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/yarn.py
deleted file mode 100644
index 70ed5b3..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/yarn.py
+++ /dev/null
@@ -1,498 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-# Python Imports
-import os
-
-# Ambari Common and Resource Management Imports
-from resource_management.libraries.script.script import Script
-from resource_management.core.resources.service import ServiceConfig
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.is_empty import is_empty
-from resource_management.core.resources.system import Directory
-from resource_management.core.resources.system import File
-from resource_management.libraries.resources.xml_config import XmlConfig
-from resource_management.core.source import InlineTemplate, Template
-from resource_management.core.logger import Logger
-from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
-from ambari_commons import OSConst
-
-from resource_management.libraries.functions.mounted_dirs_helper import handle_mounted_dirs
-
-@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def yarn(name=None, config_dir=None):
-  """
-  :param name: Component name, apptimelineserver, nodemanager, resourcemanager, or None (defaults for client)
-  :param config_dir: Which config directory to write configs to, which could be different during rolling upgrade.
-  """
-  import params
-
-  if name == 'resourcemanager':
-    setup_resourcemanager()
-  elif name == 'nodemanager':
-    setup_nodemanager()
-  elif name == 'apptimelineserver':
-    setup_ats()
-  elif name == 'historyserver':
-    setup_historyserver()
-
-  if config_dir is None:
-    config_dir = params.hadoop_conf_dir
-
-  if params.yarn_nodemanager_recovery_dir:
-    Directory(InlineTemplate(params.yarn_nodemanager_recovery_dir).get_content(),
-              owner=params.yarn_user,
-              group=params.user_group,
-              create_parents = True,
-              mode=0755,
-              cd_access = 'a',
-    )
-
-  Directory([params.yarn_pid_dir_prefix, params.yarn_pid_dir, params.yarn_log_dir],
-            owner=params.yarn_user,
-            group=params.user_group,
-            create_parents = True,
-            cd_access = 'a',
-  )
-
-  Directory([params.mapred_pid_dir_prefix, params.mapred_pid_dir, params.mapred_log_dir_prefix, params.mapred_log_dir],
-            owner=params.mapred_user,
-            group=params.user_group,
-            create_parents = True,
-            cd_access = 'a',
-  )
-  Directory([params.yarn_log_dir_prefix],
-            owner=params.yarn_user,
-            group=params.user_group,
-            create_parents = True,
-            ignore_failures=True,
-            cd_access = 'a',
-  )
-
-  XmlConfig("core-site.xml",
-            conf_dir=config_dir,
-            configurations=params.config['configurations']['core-site'],
-            configuration_attributes=params.config['configuration_attributes']['core-site'],
-            owner=params.hdfs_user,
-            group=params.user_group,
-            mode=0644
-  )
-
-  # During RU, Core Masters and Slaves need hdfs-site.xml
-  # TODO, instead of specifying individual configs, which is susceptible to breaking when new configs are added,
-  # RU should rely on all available in <stack-root>/<version>/hadoop/conf
-  XmlConfig("hdfs-site.xml",
-            conf_dir=config_dir,
-            configurations=params.config['configurations']['hdfs-site'],
-            configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
-            owner=params.hdfs_user,
-            group=params.user_group,
-            mode=0644
-  )
-
-  XmlConfig("mapred-site.xml",
-            conf_dir=config_dir,
-            configurations=params.config['configurations']['mapred-site'],
-            configuration_attributes=params.config['configuration_attributes']['mapred-site'],
-            owner=params.yarn_user,
-            group=params.user_group,
-            mode=0644
-  )
-
-  XmlConfig("yarn-site.xml",
-            conf_dir=config_dir,
-            configurations=params.config['configurations']['yarn-site'],
-            configuration_attributes=params.config['configuration_attributes']['yarn-site'],
-            owner=params.yarn_user,
-            group=params.user_group,
-            mode=0644
-  )
-
-  XmlConfig("capacity-scheduler.xml",
-            conf_dir=config_dir,
-            configurations=params.config['configurations']['capacity-scheduler'],
-            configuration_attributes=params.config['configuration_attributes']['capacity-scheduler'],
-            owner=params.yarn_user,
-            group=params.user_group,
-            mode=0644
-  )
-
-  File(format("{limits_conf_dir}/yarn.conf"),
-       mode=0644,
-       content=Template('yarn.conf.j2')
-  )
-
-  File(format("{limits_conf_dir}/mapreduce.conf"),
-       mode=0644,
-       content=Template('mapreduce.conf.j2')
-  )
-
-  File(os.path.join(config_dir, "yarn-env.sh"),
-       owner=params.yarn_user,
-       group=params.user_group,
-       mode=0755,
-       content=InlineTemplate(params.yarn_env_sh_template)
-  )
-
-  File(format("{yarn_container_bin}/container-executor"),
-      group=params.yarn_executor_container_group,
-      mode=params.container_executor_mode
-  )
-
-  File(os.path.join(config_dir, "container-executor.cfg"),
-      group=params.user_group,
-      mode=0644,
-      content=Template('container-executor.cfg.j2')
-  )
-
-  Directory(params.cgroups_dir,
-            group=params.user_group,
-            create_parents = True,
-            mode=0755,
-            cd_access="a")
-
-  File(os.path.join(config_dir, "mapred-env.sh"),
-       owner=params.tc_owner,
-       mode=0755,
-       content=InlineTemplate(params.mapred_env_sh_template)
-  )
-
-  if params.security_enabled:
-    File(os.path.join(params.hadoop_bin, "task-controller"),
-         owner="root",
-         group=params.mapred_tt_group,
-         mode=06050
-    )
-    File(os.path.join(config_dir, 'taskcontroller.cfg'),
-         owner = params.tc_owner,
-         mode = params.tc_mode,
-         group = params.mapred_tt_group,
-         content=Template("taskcontroller.cfg.j2")
-    )
-  else:
-    File(os.path.join(config_dir, 'taskcontroller.cfg'),
-         owner=params.tc_owner,
-         content=Template("taskcontroller.cfg.j2")
-    )
-
-  XmlConfig("mapred-site.xml",
-            conf_dir=config_dir,
-            configurations=params.config['configurations']['mapred-site'],
-            configuration_attributes=params.config['configuration_attributes']['mapred-site'],
-            owner=params.mapred_user,
-            group=params.user_group
-  )
-
-  XmlConfig("capacity-scheduler.xml",
-            conf_dir=config_dir,
-            configurations=params.config['configurations'][
-              'capacity-scheduler'],
-            configuration_attributes=params.config['configuration_attributes']['capacity-scheduler'],
-            owner=params.hdfs_user,
-            group=params.user_group
-  )
-
-  if "ssl-client" in params.config['configurations']:
-    XmlConfig("ssl-client.xml",
-              conf_dir=config_dir,
-              configurations=params.config['configurations']['ssl-client'],
-              configuration_attributes=params.config['configuration_attributes']['ssl-client'],
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-
-    Directory(params.hadoop_conf_secure_dir,
-              create_parents = True,
-              owner='root',
-              group=params.user_group,
-              cd_access='a',
-              )
-
-    XmlConfig("ssl-client.xml",
-              conf_dir=params.hadoop_conf_secure_dir,
-              configurations=params.config['configurations']['ssl-client'],
-              configuration_attributes=params.config['configuration_attributes']['ssl-client'],
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-
-  if "ssl-server" in params.config['configurations']:
-    XmlConfig("ssl-server.xml",
-              conf_dir=config_dir,
-              configurations=params.config['configurations']['ssl-server'],
-              configuration_attributes=params.config['configuration_attributes']['ssl-server'],
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-  if os.path.exists(os.path.join(config_dir, 'fair-scheduler.xml')):
-    File(os.path.join(config_dir, 'fair-scheduler.xml'),
-         owner=params.mapred_user,
-         group=params.user_group
-    )
-
-  if os.path.exists(
-    os.path.join(config_dir, 'ssl-client.xml.example')):
-    File(os.path.join(config_dir, 'ssl-client.xml.example'),
-         owner=params.mapred_user,
-         group=params.user_group
-    )
-
-  if os.path.exists(
-    os.path.join(config_dir, 'ssl-server.xml.example')):
-    File(os.path.join(config_dir, 'ssl-server.xml.example'),
-         owner=params.mapred_user,
-         group=params.user_group
-    )
-
-def setup_historyserver():
-  import params
-
-  if params.yarn_log_aggregation_enabled:
-    params.HdfsResource(params.yarn_nm_app_log_dir,
-                         action="create_on_execute",
-                         type="directory",
-                         owner=params.yarn_user,
-                         group=params.user_group,
-                         mode=01777,
-                         recursive_chmod=True
-    )
-
-  # create the /tmp folder with proper permissions if it doesn't exist yet
-  if params.entity_file_history_directory.startswith('/tmp'):
-      params.HdfsResource(params.hdfs_tmp_dir,
-                          action="create_on_execute",
-                          type="directory",
-                          owner=params.hdfs_user,
-                          mode=0777,
-      )
-
-  params.HdfsResource(params.entity_file_history_directory,
-                         action="create_on_execute",
-                         type="directory",
-                         owner=params.yarn_user,
-                         group=params.user_group
-  )
-  params.HdfsResource("/mapred",
-                       type="directory",
-                       action="create_on_execute",
-                       owner=params.mapred_user
-  )
-  params.HdfsResource("/mapred/system",
-                       type="directory",
-                       action="create_on_execute",
-                       owner=params.hdfs_user
-  )
-  params.HdfsResource(params.mapreduce_jobhistory_done_dir,
-                       type="directory",
-                       action="create_on_execute",
-                       owner=params.mapred_user,
-                       group=params.user_group,
-                       change_permissions_for_parents=True,
-                       mode=0777
-  )
-  params.HdfsResource(None, action="execute")
-  Directory(params.jhs_leveldb_state_store_dir,
-            owner=params.mapred_user,
-            group=params.user_group,
-            create_parents = True,
-            cd_access="a",
-            recursive_ownership = True,
-            )
-
-def setup_nodemanager():
-  import params
-
-  # First start after enabling/disabling security
-  if params.toggle_nm_security:
-    Directory(params.nm_local_dirs_list + params.nm_log_dirs_list,
-              action='delete'
-    )
-
-    # If yarn.nodemanager.recovery.dir exists, remove this dir
-    if params.yarn_nodemanager_recovery_dir:
-      Directory(InlineTemplate(params.yarn_nodemanager_recovery_dir).get_content(),
-                action='delete'
-      )
-
-    # Setting NM marker file
-    if params.security_enabled:
-      Directory(params.nm_security_marker_dir)
-      File(params.nm_security_marker,
-           content="Marker file to track first start after enabling/disabling security. "
-                   "During first start yarn local, log dirs are removed and recreated"
-           )
-    elif not params.security_enabled:
-      File(params.nm_security_marker, action="delete")
-
-
-  if not params.security_enabled or params.toggle_nm_security:
-    # handle_mounted_dirs ensures that we don't create dirs which are temporary unavailable (unmounted), and intended to reside on a different mount.
-    nm_log_dir_to_mount_file_content = handle_mounted_dirs(create_log_dir, params.nm_log_dirs, params.nm_log_dir_to_mount_file, params)
-    # create a history file used by handle_mounted_dirs
-    File(params.nm_log_dir_to_mount_file,
-         owner=params.hdfs_user,
-         group=params.user_group,
-         mode=0644,
-         content=nm_log_dir_to_mount_file_content
-    )
-    nm_local_dir_to_mount_file_content = handle_mounted_dirs(create_local_dir, params.nm_local_dirs, params.nm_local_dir_to_mount_file, params)
-    File(params.nm_local_dir_to_mount_file,
-         owner=params.hdfs_user,
-         group=params.user_group,
-         mode=0644,
-         content=nm_local_dir_to_mount_file_content
-    )
-
-def setup_resourcemanager():
-  import params
-
-  Directory(params.rm_nodes_exclude_dir,
-       mode=0755,
-       create_parents=True,
-       cd_access='a',
-  )
-  File(params.rm_nodes_exclude_path,
-       owner=params.yarn_user,
-       group=params.user_group
-  )
-  File(params.yarn_job_summary_log,
-     owner=params.yarn_user,
-     group=params.user_group
-  )
-  if not is_empty(params.node_label_enable) and params.node_label_enable or is_empty(params.node_label_enable) and params.node_labels_dir:
-    params.HdfsResource(params.node_labels_dir,
-                         type="directory",
-                         action="create_on_execute",
-                         change_permissions_for_parents=True,
-                         owner=params.yarn_user,
-                         group=params.user_group,
-                         mode=0700
-    )
-    params.HdfsResource(None, action="execute")
-
-def setup_ats():
-  import params
-
-  Directory(params.ats_leveldb_dir,
-     owner=params.yarn_user,
-     group=params.user_group,
-     create_parents = True,
-     cd_access="a",
-  )
-
-  # if stack support application timeline-service state store property (timeline_state_store stack feature)
-  if params.stack_supports_timeline_state_store:
-    Directory(params.ats_leveldb_state_store_dir,
-     owner=params.yarn_user,
-     group=params.user_group,
-     create_parents = True,
-     cd_access="a",
-    )
-  # app timeline server 1.5 directories
-  if not is_empty(params.entity_groupfs_store_dir):
-    parent_path = os.path.dirname(params.entity_groupfs_store_dir)
-    params.HdfsResource(parent_path,
-                        type="directory",
-                        action="create_on_execute",
-                        change_permissions_for_parents=True,
-                        owner=params.yarn_user,
-                        group=params.user_group,
-                        mode=0755
-                        )
-    params.HdfsResource(params.entity_groupfs_store_dir,
-                        type="directory",
-                        action="create_on_execute",
-                        owner=params.yarn_user,
-                        group=params.user_group,
-                        mode=params.entity_groupfs_store_dir_mode
-                        )
-  if not is_empty(params.entity_groupfs_active_dir):
-    parent_path = os.path.dirname(params.entity_groupfs_active_dir)
-    params.HdfsResource(parent_path,
-                        type="directory",
-                        action="create_on_execute",
-                        change_permissions_for_parents=True,
-                        owner=params.yarn_user,
-                        group=params.user_group,
-                        mode=0755
-                        )
-    params.HdfsResource(params.entity_groupfs_active_dir,
-                        type="directory",
-                        action="create_on_execute",
-                        owner=params.yarn_user,
-                        group=params.user_group,
-                        mode=params.entity_groupfs_active_dir_mode
-                        )
-  params.HdfsResource(None, action="execute")
-
-def create_log_dir(dir_name):
-  import params
-  Directory(dir_name,
-            create_parents = True,
-            cd_access="a",
-            mode=0775,
-            owner=params.yarn_user,
-            group=params.user_group,
-            ignore_failures=True,
-  )
-
-def create_local_dir(dir_name):
-  import params
-  Directory(dir_name,
-            create_parents = True,
-            cd_access="a",
-            mode=0755,
-            owner=params.yarn_user,
-            group=params.user_group,
-            ignore_failures=True,
-            recursive_mode_flags = {'f': 'a+rw', 'd': 'a+rwx'},
-  )
-
-@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
-def yarn(name = None):
-  import params
-  XmlConfig("mapred-site.xml",
-            conf_dir=params.config_dir,
-            configurations=params.config['configurations']['mapred-site'],
-            owner=params.yarn_user,
-            mode='f'
-  )
-  XmlConfig("yarn-site.xml",
-            conf_dir=params.config_dir,
-            configurations=params.config['configurations']['yarn-site'],
-            owner=params.yarn_user,
-            mode='f',
-            configuration_attributes=params.config['configuration_attributes']['yarn-site']
-  )
-  XmlConfig("capacity-scheduler.xml",
-            conf_dir=params.config_dir,
-            configurations=params.config['configurations']['capacity-scheduler'],
-            owner=params.yarn_user,
-            mode='f'
-  )
-
-  if params.service_map.has_key(name):
-    service_name = params.service_map[name]
-
-    ServiceConfig(service_name,
-                  action="change_user",
-                  username = params.yarn_user,
-                  password = Script.get_password(params.yarn_user))
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/yarn_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/yarn_client.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/yarn_client.py
deleted file mode 100644
index beea8b9..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/yarn_client.py
+++ /dev/null
@@ -1,67 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import conf_select, stack_select
-from resource_management.libraries.functions.constants import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.core.exceptions import ClientComponentHasNoStatus
-from yarn import yarn
-from ambari_commons import OSConst
-from ambari_commons.os_family_impl import OsFamilyImpl
-
-
-class YarnClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    yarn()
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class YarnClientWindows(YarnClient):
-  pass
-
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class YarnClientDefault(YarnClient):
-  def get_component_name(self):
-    return "hadoop-client"
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-
-    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      conf_select.select(params.stack_name, "hadoop", params.version)
-      stack_select.select("hadoop-client", params.version)
-
-
-if __name__ == "__main__":
-  YarnClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/templates/container-executor.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/templates/container-executor.cfg.j2 b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/templates/container-executor.cfg.j2
deleted file mode 100644
index c6f1ff6..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/templates/container-executor.cfg.j2
+++ /dev/null
@@ -1,40 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-yarn.nodemanager.local-dirs={{nm_local_dirs}}
-yarn.nodemanager.log-dirs={{nm_log_dirs}}
-yarn.nodemanager.linux-container-executor.group={{yarn_executor_container_group}}
-banned.users=hdfs,yarn,mapred,bin
-min.user.id={{min_user_id}}