You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@bigtop.apache.org by rv...@apache.org on 2017/03/22 06:10:26 UTC

[35/52] bigtop git commit: BIGTOP-1406. package Ambari in Bigtop

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/mapred_service_check.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/mapred_service_check.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/mapred_service_check.py
deleted file mode 100755
index 5fc498d..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/mapred_service_check.py
+++ /dev/null
@@ -1,168 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-from ambari_commons import OSConst
-from ambari_commons.os_family_impl import OsFamilyImpl
-from resource_management.core.logger import Logger
-
-
-class MapReduce2ServiceCheck(Script):
-  def service_check(self, env):
-    pass
-
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class MapReduce2ServiceCheckWindows(MapReduce2ServiceCheck):
-  def service_check(self, env):
-    import params
-
-    env.set_params(params)
-
-    component_type = 'hs'
-    if params.hadoop_ssl_enabled:
-      component_address = params.hs_webui_address
-    else:
-      component_address = params.hs_webui_address
-
-    validateStatusFileName = "validateYarnComponentStatusWindows.py"
-    validateStatusFilePath = os.path.join(os.path.dirname(params.hadoop_home), "temp", validateStatusFileName)
-    python_executable = sys.executable
-    validateStatusCmd = "{0} {1} {2} -p {3} -s {4}".format(
-      python_executable, validateStatusFilePath, component_type, component_address, params.hadoop_ssl_enabled)
-
-    if params.security_enabled:
-      kinit_cmd = "{0} -kt {1} {2};".format(params.kinit_path_local, params.smoke_user_keytab, params.smokeuser)
-      smoke_cmd = kinit_cmd + validateStatusCmd
-    else:
-      smoke_cmd = validateStatusCmd
-
-    File(validateStatusFilePath,
-         content=StaticFile(validateStatusFileName)
-    )
-
-    Execute(smoke_cmd,
-            tries=3,
-            try_sleep=5,
-            logoutput=True
-    )
-
-    # hadoop_exe = os.path.join(params.hadoop_home, "bin", "hadoop")
-    #
-    # tested_file = os.path.join(params.hadoop_home, "bin", "hadoop.cmd")
-    # jar_path = os.path.join(params.hadoop_mapred2_jar_location, params.hadoopMapredExamplesJarName)
-    # input_file = format("/user/hadoop/mapredsmokeinput")
-    # output_file = format("/user/hadoop/mapredsmokeoutput")
-    # cleanup_cmd = format("cmd /C {hadoop_exe} fs -rm -r -f {output_file} {input_file}")
-    # create_file_cmd = format("cmd /C {hadoop_exe} fs -put {tested_file} {input_file}")
-    # run_wordcount_job = format("cmd /C {hadoop_exe} jar {jar_path} wordcount {input_file} {output_file}")
-    # test_cmd = format("cmd /C {hadoop_exe} fs -test -e {output_file}")
-    #
-    # if params.security_enabled:
-    #   kinit_cmd = "{0} -kt {1} {2};".format(kinit_path_local, smoke_user_keytab, smokeuser)
-    #   Execute(kinit_cmd)
-    #
-    # Execute(cleanup_cmd,
-    #         tries=1,
-    #         try_sleep=5,
-    #         logoutput=True,
-    #         user=params.hdfs_user
-    # )
-    #
-    # Execute(create_file_cmd,
-    #         tries=1,
-    #         try_sleep=5,
-    #         logoutput=True,
-    #         user=params.hdfs_user
-    # )
-    #
-    # Execute(run_wordcount_job,
-    #         tries=1,
-    #         try_sleep=5,
-    #         logoutput=True,
-    #         user=params.hdfs_user
-    # )
-    #
-    # Execute(test_cmd,
-    #         logoutput=True,
-    #         user=params.hdfs_user
-    # )
-
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class MapReduce2ServiceCheckDefault(MapReduce2ServiceCheck):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    jar_path = format("{hadoop_mapred2_jar_location}/{hadoopMapredExamplesJarName}")
-    input_file = format("/user/{smokeuser}/mapredsmokeinput")
-    output_file = format("/user/{smokeuser}/mapredsmokeoutput")
-
-    test_cmd = format("fs -test -e {output_file}")
-    run_wordcount_job = format("jar {jar_path} wordcount {input_file} {output_file}")
-
-    params.HdfsResource(format("/user/{smokeuser}"),
-                      type="directory",
-                      action="create_on_execute",
-                      owner=params.smokeuser,
-                      mode=params.smoke_hdfs_user_mode,
-    )
-    params.HdfsResource(output_file,
-                        action = "delete_on_execute",
-                        type = "directory",
-                        dfs_type = params.dfs_type,
-    )
-    params.HdfsResource(input_file,
-                        action = "create_on_execute",
-                        type = "file",
-                        source = "/etc/passwd",
-                        dfs_type = params.dfs_type,
-    )
-    params.HdfsResource(None, action="execute")
-
-    # initialize the ticket
-    if params.security_enabled:
-      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
-      Execute(kinit_cmd, user=params.smokeuser)
-
-    ExecuteHadoop(run_wordcount_job,
-                  tries=1,
-                  try_sleep=5,
-                  user=params.smokeuser,
-                  bin_dir=params.execute_path,
-                  conf_dir=params.hadoop_conf_dir,
-                  logoutput=True)
-
-    # the ticket may have expired, so re-initialize
-    if params.security_enabled:
-      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
-      Execute(kinit_cmd, user=params.smokeuser)
-
-    ExecuteHadoop(test_cmd,
-                  user=params.smokeuser,
-                  bin_dir=params.execute_path,
-                  conf_dir=params.hadoop_conf_dir)
-
-
-if __name__ == "__main__":
-  MapReduce2ServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/mapreduce2_client.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/mapreduce2_client.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/mapreduce2_client.py
deleted file mode 100755
index 424157b..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/mapreduce2_client.py
+++ /dev/null
@@ -1,98 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-# Python imports
-import os
-import sys
-
-# Local imports
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import conf_select, stack_select
-from resource_management.libraries.functions.constants import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.core.exceptions import ClientComponentHasNoStatus
-from yarn import yarn
-from ambari_commons import OSConst
-from ambari_commons.os_family_impl import OsFamilyImpl
-from resource_management.core.logger import Logger
-
-
-class MapReduce2Client(Script):
-  def install(self, env):
-    import params
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env, config_dir=None, upgrade_type=None):
-    """
-    :param env: Python environment
-    :param config_dir: During rolling upgrade, which config directory to save configs to.
-    """
-    import params
-    env.set_params(params)
-    yarn(config_dir=config_dir)
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-  def stack_upgrade_save_new_config(self, env):
-    """
-    Because this gets called during a Rolling Upgrade, the new mapreduce configs have already been saved, so we must be
-    careful to only call configure() on the directory of the new version.
-    :param env:
-    """
-    import params
-    env.set_params(params)
-
-    conf_select_name = "hadoop"
-    base_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
-    config_dir = self.get_config_dir_during_stack_upgrade(env, base_dir, conf_select_name)
-
-    if config_dir:
-      Logger.info("stack_upgrade_save_new_config(): Calling conf-select on %s using version %s" % (conf_select_name, str(params.version)))
-
-      # Because this script was called from ru_execute_tasks.py which already enters an Environment with its own basedir,
-      # must change it now so this function can find the Jinja Templates for the service.
-      env.config.basedir = base_dir
-      conf_select.select(params.stack_name, conf_select_name, params.version)
-      self.configure(env, config_dir=config_dir)
-
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class MapReduce2ClientWindows(MapReduce2Client):
-  pass
-
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class MapReduce2ClientDefault(MapReduce2Client):
-  def get_component_name(self):
-    return "hadoop-client"
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-
-    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      conf_select.select(params.stack_name, "hadoop", params.version)
-      stack_select.select("hadoop-client", params.version)
-
-
-if __name__ == "__main__":
-  MapReduce2Client().execute()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/nodemanager.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/nodemanager.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/nodemanager.py
deleted file mode 100755
index b235cad..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/nodemanager.py
+++ /dev/null
@@ -1,161 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import nodemanager_upgrade
-
-from resource_management import *
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.security_commons import build_expectations, \
-  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
-  FILE_TYPE_XML
-from yarn import yarn
-from service import service
-from ambari_commons import OSConst
-from ambari_commons.os_family_impl import OsFamilyImpl
-
-
-class Nodemanager(Script):
-  def install(self, env):
-    self.install_packages(env)
-
-  def stop(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    service('nodemanager',action='stop')
-
-  def start(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    service('nodemanager',action='start')
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    yarn(name="nodemanager")
-
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class NodemanagerWindows(Nodemanager):
-  def status(self, env):
-    service('nodemanager', action='status')
-
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class NodemanagerDefault(Nodemanager):
-  def get_component_name(self):
-    return "hadoop-yarn-nodemanager"
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    Logger.info("Executing NodeManager Stack Upgrade pre-restart")
-    import params
-    env.set_params(params)
-
-    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      conf_select.select(params.stack_name, "hadoop", params.version)
-      stack_select.select("hadoop-yarn-nodemanager", params.version)
-
-  def post_upgrade_restart(self, env, upgrade_type=None):
-    Logger.info("Executing NodeManager Stack Upgrade post-restart")
-    import params
-    env.set_params(params)
-
-    nodemanager_upgrade.post_upgrade_check()
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.nodemanager_pid_file)
-
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"yarn.timeline-service.http-authentication.type": "kerberos",
-                           "yarn.acl.enable": "true"}
-      props_empty_check = ["yarn.nodemanager.principal",
-                           "yarn.nodemanager.keytab",
-                           "yarn.nodemanager.webapp.spnego-principal",
-                           "yarn.nodemanager.webapp.spnego-keytab-file"]
-
-      props_read_check = ["yarn.nodemanager.keytab",
-                          "yarn.nodemanager.webapp.spnego-keytab-file"]
-      yarn_site_props = build_expectations('yarn-site', props_value_check, props_empty_check,
-                                           props_read_check)
-
-      yarn_expectations ={}
-      yarn_expectations.update(yarn_site_props)
-
-      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'yarn-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, yarn_site_props)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'yarn-site' not in security_params
-               or 'yarn.nodemanager.keytab' not in security_params['yarn-site']
-               or 'yarn.nodemanager.principal' not in security_params['yarn-site']) \
-            or 'yarn.nodemanager.webapp.spnego-keytab-file' not in security_params['yarn-site'] \
-            or 'yarn.nodemanager.webapp.spnego-principal' not in security_params['yarn-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.nodemanager.keytab'],
-                                security_params['yarn-site']['yarn.nodemanager.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.nodemanager.webapp.spnego-keytab-file'],
-                                security_params['yarn-site']['yarn.nodemanager.webapp.spnego-principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
-  def get_log_folder(self):
-    import params
-    return params.yarn_log_dir
-  
-  def get_user(self):
-    import params
-    return params.yarn_user
-
-if __name__ == "__main__":
-  Nodemanager().execute()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/nodemanager_upgrade.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/nodemanager_upgrade.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/nodemanager_upgrade.py
deleted file mode 100755
index 1c886f9..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/nodemanager_upgrade.py
+++ /dev/null
@@ -1,73 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import subprocess
-
-from resource_management.core.logger import Logger
-from resource_management.core.exceptions import Fail
-from resource_management.core.resources.system import Execute
-from resource_management.core import shell
-from resource_management.libraries.functions.decorator import retry
-from resource_management.libraries.functions.show_logs import show_logs
-
-
-def post_upgrade_check():
-  '''
-  Checks that the NodeManager has rejoined the cluster.
-  This function will obtain the Kerberos ticket if security is enabled.
-  :return:
-  '''
-  import params
-
-  Logger.info('NodeManager executing "yarn node -list -states=RUNNING" to verify the node has rejoined the cluster...')
-  if params.security_enabled and params.nodemanager_kinit_cmd:
-    Execute(params.nodemanager_kinit_cmd, user=params.yarn_user)
-
-  try:
-    _check_nodemanager_startup()
-  except Fail:
-    show_logs(params.yarn_log_dir, params.yarn_user)
-    raise
-    
-
-@retry(times=30, sleep_time=10, err_class=Fail)
-def _check_nodemanager_startup():
-  '''
-  Checks that a NodeManager is in a RUNNING state in the cluster via
-  "yarn node -list -states=RUNNING" command. Once the NodeManager is found to be
-  alive this method will return, otherwise it will raise a Fail(...) and retry
-  automatically.
-  :return:
-  '''
-  import params
-  import socket
-
-  command = 'yarn node -list -states=RUNNING'
-  return_code, yarn_output = shell.checked_call(command, user=params.yarn_user)
-  
-  hostname = params.hostname.lower()
-  hostname_ip = socket.gethostbyname(params.hostname.lower())
-  nodemanager_address = params.nm_address.lower()
-  yarn_output = yarn_output.lower()
-
-  if hostname in yarn_output or nodemanager_address in yarn_output or hostname_ip in yarn_output:
-    Logger.info('NodeManager with ID \'{0}\' has rejoined the cluster.'.format(nodemanager_address))
-    return
-  else:
-    raise Fail('NodeManager with ID \'{0}\' was not found in the list of running NodeManagers. \'{1}\' output was:\n{2}'.format(nodemanager_address, command, yarn_output))

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/params.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/params.py
deleted file mode 100755
index 073e84f..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/params.py
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-from ambari_commons import OSCheck
-from resource_management.libraries.functions.default import default
-
-if OSCheck.is_windows_family():
-  from params_windows import *
-else:
-  from params_linux import *
-
-host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
-retryAble = default("/commandParams/command_retry_enabled", False)

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/params_linux.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/params_linux.py
deleted file mode 100755
index 4d42861..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/params_linux.py
+++ /dev/null
@@ -1,469 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import os
-
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.resources.hdfs_resource import HdfsResource
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.stack_features import get_stack_feature_version
-from resource_management.libraries.functions import get_kinit_path
-from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
-from resource_management.libraries.functions.version import format_stack_version
-from resource_management.libraries.functions.default import default
-from resource_management.libraries import functions
-from resource_management.libraries.functions import is_empty
-
-import status_params
-
-# a map of the Ambari role to the component name
-# for use with <stack-root>/current/<component>
-MAPR_SERVER_ROLE_DIRECTORY_MAP = {
-  'HISTORYSERVER' : 'hadoop-mapreduce-historyserver',
-  'MAPREDUCE2_CLIENT' : 'hadoop-mapreduce-client',
-}
-
-YARN_SERVER_ROLE_DIRECTORY_MAP = {
-  'APP_TIMELINE_SERVER' : 'hadoop-yarn-timelineserver',
-  'NODEMANAGER' : 'hadoop-yarn-nodemanager',
-  'RESOURCEMANAGER' : 'hadoop-yarn-resourcemanager',
-  'YARN_CLIENT' : 'hadoop-yarn-client'
-}
-
-# server configurations
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-stack_name = status_params.stack_name
-stack_root = Script.get_stack_root()
-tarball_map = default("/configurations/cluster-env/tarball_map", None)
-
-config_path = os.path.join(stack_root, "current/hadoop-client/conf")
-config_dir = os.path.realpath(config_path)
-
-# This is expected to be of the form #.#.#.#
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-stack_version_formatted_major = format_stack_version(stack_version_unformatted)
-stack_version_formatted = functions.get_stack_version('hadoop-yarn-resourcemanager')
-
-stack_supports_ru = stack_version_formatted_major and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted_major)
-stack_supports_timeline_state_store = stack_version_formatted_major and check_stack_feature(StackFeature.TIMELINE_STATE_STORE, stack_version_formatted_major)
-
-# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade.
-# It cannot be used during the initial Cluser Install because the version is not yet known.
-version = default("/commandParams/version", None)
-
-# get the correct version to use for checking stack features
-version_for_stack_feature_checks = get_stack_feature_version(config)
-
-stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
-stack_supports_ranger_audit_db = check_stack_feature(StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks)
-
-hostname = config['hostname']
-
-# hadoop default parameters
-hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
-hadoop_bin = stack_select.get_hadoop_dir("sbin")
-hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
-hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-hadoop_yarn_home = '/usr/lib/hadoop-yarn'
-hadoop_mapred2_jar_location = "/usr/lib/hadoop-mapreduce"
-mapred_bin = "/usr/lib/hadoop-mapreduce/sbin"
-yarn_bin = "/usr/lib/hadoop-yarn/sbin"
-yarn_container_bin = "/usr/lib/hadoop-yarn/bin"
-hadoop_java_io_tmpdir = os.path.join(tmp_dir, "hadoop_java_io_tmpdir")
-
-# hadoop parameters stack supporting rolling_uprade
-if stack_supports_ru:
-  # MapR directory root
-  mapred_role_root = "hadoop-mapreduce-client"
-  command_role = default("/role", "")
-  if command_role in MAPR_SERVER_ROLE_DIRECTORY_MAP:
-    mapred_role_root = MAPR_SERVER_ROLE_DIRECTORY_MAP[command_role]
-
-  # YARN directory root
-  yarn_role_root = "hadoop-yarn-client"
-  if command_role in YARN_SERVER_ROLE_DIRECTORY_MAP:
-    yarn_role_root = YARN_SERVER_ROLE_DIRECTORY_MAP[command_role]
-
-  hadoop_mapred2_jar_location = format("{stack_root}/current/{mapred_role_root}")
-  mapred_bin = format("{stack_root}/current/{mapred_role_root}/sbin")
-
-  hadoop_yarn_home = format("{stack_root}/current/{yarn_role_root}")
-  yarn_bin = format("{stack_root}/current/{yarn_role_root}/sbin")
-  yarn_container_bin = format("{stack_root}/current/{yarn_role_root}/bin")
-
-if stack_supports_timeline_state_store:
-  # Timeline Service property that was added timeline_state_store stack feature
-  ats_leveldb_state_store_dir = config['configurations']['yarn-site']['yarn.timeline-service.leveldb-state-store.path']
-
-# ats 1.5 properties
-entity_groupfs_active_dir = config['configurations']['yarn-site']['yarn.timeline-service.entity-group-fs-store.active-dir']
-entity_groupfs_active_dir_mode = 01777
-entity_groupfs_store_dir = config['configurations']['yarn-site']['yarn.timeline-service.entity-group-fs-store.done-dir']
-entity_groupfs_store_dir_mode = 0700
-
-hadoop_conf_secure_dir = os.path.join(hadoop_conf_dir, "secure")
-
-limits_conf_dir = "/etc/security/limits.d"
-yarn_user_nofile_limit = default("/configurations/yarn-env/yarn_user_nofile_limit", "32768")
-yarn_user_nproc_limit = default("/configurations/yarn-env/yarn_user_nproc_limit", "65536")
-
-mapred_user_nofile_limit = default("/configurations/mapred-env/mapred_user_nofile_limit", "32768")
-mapred_user_nproc_limit = default("/configurations/mapred-env/mapred_user_nproc_limit", "65536")
-
-execute_path = os.environ['PATH'] + os.pathsep + hadoop_bin_dir + os.pathsep + yarn_container_bin
-
-ulimit_cmd = "ulimit -c unlimited;"
-
-mapred_user = status_params.mapred_user
-yarn_user = status_params.yarn_user
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-hdfs_tmp_dir = config['configurations']['hadoop-env']['hdfs_tmp_dir']
-
-smokeuser = config['configurations']['cluster-env']['smokeuser']
-smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
-smoke_hdfs_user_mode = 0770
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-nm_security_marker_dir = "/var/lib/hadoop-yarn"
-nm_security_marker = format('{nm_security_marker_dir}/nm_security_enabled')
-current_nm_security_state = os.path.isfile(nm_security_marker)
-toggle_nm_security = (current_nm_security_state and not security_enabled) or (not current_nm_security_state and security_enabled)
-smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-
-yarn_executor_container_group = config['configurations']['yarn-site']['yarn.nodemanager.linux-container-executor.group']
-yarn_nodemanager_container_executor_class =  config['configurations']['yarn-site']['yarn.nodemanager.container-executor.class']
-is_linux_container_executor = (yarn_nodemanager_container_executor_class == 'org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor')
-container_executor_mode = 06050 if is_linux_container_executor else 02050
-kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
-yarn_http_policy = config['configurations']['yarn-site']['yarn.http.policy']
-yarn_https_on = (yarn_http_policy.upper() == 'HTTPS_ONLY')
-rm_hosts = config['clusterHostInfo']['rm_host']
-rm_host = rm_hosts[0]
-rm_port = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'].split(':')[-1]
-rm_https_port = default('/configurations/yarn-site/yarn.resourcemanager.webapp.https.address', ":8090").split(':')[-1]
-# TODO UPGRADE default, update site during upgrade
-rm_nodes_exclude_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
-rm_nodes_exclude_dir = os.path.dirname(rm_nodes_exclude_path)
-
-java64_home = config['hostLevelParams']['java_home']
-hadoop_ssl_enabled = default("/configurations/core-site/hadoop.ssl.enabled", False)
-
-yarn_heapsize = config['configurations']['yarn-env']['yarn_heapsize']
-resourcemanager_heapsize = config['configurations']['yarn-env']['resourcemanager_heapsize']
-nodemanager_heapsize = config['configurations']['yarn-env']['nodemanager_heapsize']
-apptimelineserver_heapsize = default("/configurations/yarn-env/apptimelineserver_heapsize", 1024)
-ats_leveldb_dir = config['configurations']['yarn-site']['yarn.timeline-service.leveldb-timeline-store.path']
-ats_leveldb_lock_file = os.path.join(ats_leveldb_dir, "leveldb-timeline-store.ldb", "LOCK")
-yarn_log_dir_prefix = config['configurations']['yarn-env']['yarn_log_dir_prefix']
-yarn_pid_dir_prefix = status_params.yarn_pid_dir_prefix
-mapred_pid_dir_prefix = status_params.mapred_pid_dir_prefix
-mapred_log_dir_prefix = config['configurations']['mapred-env']['mapred_log_dir_prefix']
-mapred_env_sh_template = config['configurations']['mapred-env']['content']
-yarn_env_sh_template = config['configurations']['yarn-env']['content']
-yarn_nodemanager_recovery_dir = default('/configurations/yarn-site/yarn.nodemanager.recovery.dir', None)
-service_check_queue_name = default('/configurations/yarn-env/service_check.queue.name', 'default')
-
-if len(rm_hosts) > 1:
-  additional_rm_host = rm_hosts[1]
-  rm_webui_address = format("{rm_host}:{rm_port},{additional_rm_host}:{rm_port}")
-  rm_webui_https_address = format("{rm_host}:{rm_https_port},{additional_rm_host}:{rm_https_port}")
-else:
-  rm_webui_address = format("{rm_host}:{rm_port}")
-  rm_webui_https_address = format("{rm_host}:{rm_https_port}")
-
-nm_webui_address = config['configurations']['yarn-site']['yarn.nodemanager.webapp.address']
-hs_webui_address = config['configurations']['mapred-site']['mapreduce.jobhistory.webapp.address']
-nm_address = config['configurations']['yarn-site']['yarn.nodemanager.address']  # still contains 0.0.0.0
-if hostname and nm_address and nm_address.startswith("0.0.0.0:"):
-  nm_address = nm_address.replace("0.0.0.0", hostname)
-
-# Initialize lists of work directories.
-nm_local_dirs = default("/configurations/yarn-site/yarn.nodemanager.local-dirs", "")
-nm_log_dirs = default("/configurations/yarn-site/yarn.nodemanager.log-dirs", "")
-
-nm_local_dirs_list = nm_local_dirs.split(',')
-nm_log_dirs_list = nm_log_dirs.split(',')
-
-nm_log_dir_to_mount_file = "/var/lib/ambari-agent/data/yarn/yarn_log_dir_mount.hist"
-nm_local_dir_to_mount_file = "/var/lib/ambari-agent/data/yarn/yarn_local_dir_mount.hist"
-
-distrAppJarName = "hadoop-yarn-applications-distributedshell-2.*.jar"
-hadoopMapredExamplesJarName = "hadoop-mapreduce-examples-2.*.jar"
-
-entity_file_history_directory = "/tmp/entity-file-history/active"
-
-yarn_pid_dir = status_params.yarn_pid_dir
-mapred_pid_dir = status_params.mapred_pid_dir
-
-mapred_log_dir = format("{mapred_log_dir_prefix}/{mapred_user}")
-yarn_log_dir = format("{yarn_log_dir_prefix}/{yarn_user}")
-mapred_job_summary_log = format("{mapred_log_dir_prefix}/{mapred_user}/hadoop-mapreduce.jobsummary.log")
-yarn_job_summary_log = format("{yarn_log_dir_prefix}/{yarn_user}/hadoop-mapreduce.jobsummary.log")
-
-user_group = config['configurations']['cluster-env']['user_group']
-
-#exclude file
-exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
-exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
-
-ats_host = set(default("/clusterHostInfo/app_timeline_server_hosts", []))
-has_ats = not len(ats_host) == 0
-
-nm_hosts = default("/clusterHostInfo/nm_hosts", [])
-
-# don't using len(nm_hosts) here, because check can take too much time on large clusters
-number_of_nm = 1
-
-# default kinit commands
-rm_kinit_cmd = ""
-yarn_timelineservice_kinit_cmd = ""
-nodemanager_kinit_cmd = ""
-
-if security_enabled:
-  rm_principal_name = config['configurations']['yarn-site']['yarn.resourcemanager.principal']
-  rm_principal_name = rm_principal_name.replace('_HOST',hostname.lower())
-  rm_keytab = config['configurations']['yarn-site']['yarn.resourcemanager.keytab']
-  rm_kinit_cmd = format("{kinit_path_local} -kt {rm_keytab} {rm_principal_name};")
-
-  # YARN timeline security options
-  if has_ats:
-    _yarn_timelineservice_principal_name = config['configurations']['yarn-site']['yarn.timeline-service.principal']
-    _yarn_timelineservice_principal_name = _yarn_timelineservice_principal_name.replace('_HOST', hostname.lower())
-    _yarn_timelineservice_keytab = config['configurations']['yarn-site']['yarn.timeline-service.keytab']
-    yarn_timelineservice_kinit_cmd = format("{kinit_path_local} -kt {_yarn_timelineservice_keytab} {_yarn_timelineservice_principal_name};")
-
-  if 'yarn.nodemanager.principal' in config['configurations']['yarn-site']:
-    _nodemanager_principal_name = default('/configurations/yarn-site/yarn.nodemanager.principal', None)
-    if _nodemanager_principal_name:
-      _nodemanager_principal_name = _nodemanager_principal_name.replace('_HOST', hostname.lower())
-
-    _nodemanager_keytab = config['configurations']['yarn-site']['yarn.nodemanager.keytab']
-    nodemanager_kinit_cmd = format("{kinit_path_local} -kt {_nodemanager_keytab} {_nodemanager_principal_name};")
-
-
-yarn_log_aggregation_enabled = config['configurations']['yarn-site']['yarn.log-aggregation-enable']
-yarn_nm_app_log_dir =  config['configurations']['yarn-site']['yarn.nodemanager.remote-app-log-dir']
-mapreduce_jobhistory_intermediate_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.intermediate-done-dir']
-mapreduce_jobhistory_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.done-dir']
-jobhistory_heapsize = default("/configurations/mapred-env/jobhistory_heapsize", "900")
-jhs_leveldb_state_store_dir = default('/configurations/mapred-site/mapreduce.jobhistory.recovery.store.leveldb.path', "/hadoop/mapreduce/jhs")
-
-# Tez-related properties
-tez_user = config['configurations']['tez-env']['tez_user']
-
-# Tez jars
-tez_local_api_jars = '/usr/lib/tez/tez*.jar'
-tez_local_lib_jars = '/usr/lib/tez/lib/*.jar'
-app_dir_files = {tez_local_api_jars:None}
-
-# Tez libraries
-tez_lib_uris = default("/configurations/tez-site/tez.lib.uris", None)
-
-#for create_hdfs_directory
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
-
-
-
-hdfs_site = config['configurations']['hdfs-site']
-default_fs = config['configurations']['core-site']['fs.defaultFS']
-is_webhdfs_enabled = hdfs_site['dfs.webhdfs.enabled']
-
-# Path to file that contains list of HDFS resources to be skipped during processing
-hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore"
-
-dfs_type = default("/commandParams/dfs_type", "")
-
-
-import functools
-#create partial functions with common arguments for every HdfsResource call
-#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
-HdfsResource = functools.partial(
-  HdfsResource,
-  user=hdfs_user,
-  hdfs_resource_ignore_file = hdfs_resource_ignore_file,
-  security_enabled = security_enabled,
-  keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local,
-  hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir,
-  principal_name = hdfs_principal_name,
-  hdfs_site = hdfs_site,
-  default_fs = default_fs,
-  immutable_paths = get_not_managed_resources(),
-  dfs_type = dfs_type
- )
-update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
-
-mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
-
-#taskcontroller.cfg
-
-mapred_local_dir = "/tmp/hadoop-mapred/mapred/local"
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-min_user_id = config['configurations']['yarn-env']['min_user_id']
-
-# Node labels
-node_labels_dir = default("/configurations/yarn-site/yarn.node-labels.fs-store.root-dir", None)
-node_label_enable = config['configurations']['yarn-site']['yarn.node-labels.enabled']
-
-cgroups_dir = "/cgroups_test/cpu"
-
-# ***********************  RANGER PLUGIN CHANGES ***********************
-# ranger host
-ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
-has_ranger_admin = not len(ranger_admin_hosts) == 0
-xml_configurations_supported = config['configurations']['ranger-env']['xml_configurations_supported']
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-# hostname of the active HDFS HA Namenode (only used when HA is enabled)
-dfs_ha_namenode_active = default("/configurations/hadoop-env/dfs_ha_initial_namenode_active", None)
-if dfs_ha_namenode_active is not None: 
-  namenode_hostname = dfs_ha_namenode_active
-else:
-  namenode_hostname = config['clusterHostInfo']['namenode_host'][0]
-
-ranger_admin_log_dir = default("/configurations/ranger-env/ranger_admin_log_dir","/var/log/ranger/admin")
-
-scheme = 'http' if not yarn_https_on else 'https'
-yarn_rm_address = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'] if not yarn_https_on else config['configurations']['yarn-site']['yarn.resourcemanager.webapp.https.address']
-rm_active_port = rm_https_port if yarn_https_on else rm_port
-
-rm_ha_enabled = False
-rm_ha_ids_list = []
-rm_webapp_addresses_list = [yarn_rm_address]
-rm_ha_ids = default("/configurations/yarn-site/yarn.resourcemanager.ha.rm-ids", None)
-
-if rm_ha_ids:
-  rm_ha_ids_list = rm_ha_ids.split(",")
-  if len(rm_ha_ids_list) > 1:
-    rm_ha_enabled = True
-
-if rm_ha_enabled:
-  rm_webapp_addresses_list = []
-  for rm_id in rm_ha_ids_list:
-    rm_webapp_address_property = format('yarn.resourcemanager.webapp.address.{rm_id}') if not yarn_https_on else format('yarn.resourcemanager.webapp.https.address.{rm_id}')
-    rm_webapp_address = config['configurations']['yarn-site'][rm_webapp_address_property]
-    rm_webapp_addresses_list.append(rm_webapp_address)
-
-#ranger yarn properties
-if has_ranger_admin:
-  is_supported_yarn_ranger = config['configurations']['yarn-env']['is_supported_yarn_ranger']
-
-  if is_supported_yarn_ranger:
-    enable_ranger_yarn = (config['configurations']['ranger-yarn-plugin-properties']['ranger-yarn-plugin-enabled'].lower() == 'yes')
-    policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
-    if 'admin-properties' in config['configurations'] and 'policymgr_external_url' in config['configurations']['admin-properties'] and policymgr_mgr_url.endswith('/'):
-      policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
-    xa_audit_db_flavor = (config['configurations']['admin-properties']['DB_FLAVOR']).lower()
-    xa_audit_db_name = default('/configurations/admin-properties/audit_db_name', 'ranger_audits')
-    xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
-    xa_audit_db_password = ''
-    if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db:
-      xa_audit_db_password = unicode(config['configurations']['admin-properties']['audit_db_password'])
-    xa_db_host = config['configurations']['admin-properties']['db_host']
-    repo_name = str(config['clusterName']) + '_yarn'
-
-    ranger_env = config['configurations']['ranger-env']
-    ranger_plugin_properties = config['configurations']['ranger-yarn-plugin-properties']
-    policy_user = config['configurations']['ranger-yarn-plugin-properties']['policy_user']
-    yarn_rest_url = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address']  
-
-    ranger_plugin_config = {
-      'username' : config['configurations']['ranger-yarn-plugin-properties']['REPOSITORY_CONFIG_USERNAME'],
-      'password' : unicode(config['configurations']['ranger-yarn-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']),
-      'yarn.url' : format('{scheme}://{yarn_rest_url}'),
-      'commonNameForCertificate' : config['configurations']['ranger-yarn-plugin-properties']['common.name.for.certificate']
-    }
-
-    yarn_ranger_plugin_repo = {
-      'isEnabled': 'true',
-      'configs': ranger_plugin_config,
-      'description': 'yarn repo',
-      'name': repo_name,
-      'repositoryType': 'yarn',
-      'type': 'yarn',
-      'assetType': '1'
-    }
-
-    if stack_supports_ranger_kerberos:
-      ranger_plugin_config['ambari.service.check.user'] = policy_user
-      ranger_plugin_config['hadoop.security.authentication'] = 'kerberos' if security_enabled else 'simple'
-
-    if stack_supports_ranger_kerberos and security_enabled:
-      ranger_plugin_config['policy.download.auth.users'] = yarn_user
-      ranger_plugin_config['tag.download.auth.users'] = yarn_user
-
-    #For curl command in ranger plugin to get db connector
-    jdk_location = config['hostLevelParams']['jdk_location']
-    java_share_dir = '/usr/share/java'
-    previous_jdbc_jar_name = None
-    if stack_supports_ranger_audit_db:
-      if xa_audit_db_flavor and xa_audit_db_flavor == 'mysql':
-        jdbc_jar_name = default("/hostLevelParams/custom_mysql_jdbc_name", None)
-        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
-        audit_jdbc_url = format('jdbc:mysql://{xa_db_host}/{xa_audit_db_name}')
-        jdbc_driver = "com.mysql.jdbc.Driver"
-      elif xa_audit_db_flavor and xa_audit_db_flavor == 'oracle':
-        jdbc_jar_name = default("/hostLevelParams/custom_oracle_jdbc_name", None)
-        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
-        colon_count = xa_db_host.count(':')
-        if colon_count == 2 or colon_count == 0:
-          audit_jdbc_url = format('jdbc:oracle:thin:@{xa_db_host}')
-        else:
-          audit_jdbc_url = format('jdbc:oracle:thin:@//{xa_db_host}')
-        jdbc_driver = "oracle.jdbc.OracleDriver"
-      elif xa_audit_db_flavor and xa_audit_db_flavor == 'postgres':
-        jdbc_jar_name = default("/hostLevelParams/custom_postgres_jdbc_name", None)
-        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_postgres_jdbc_name", None)
-        audit_jdbc_url = format('jdbc:postgresql://{xa_db_host}/{xa_audit_db_name}')
-        jdbc_driver = "org.postgresql.Driver"
-      elif xa_audit_db_flavor and xa_audit_db_flavor == 'mssql':
-        jdbc_jar_name = default("/hostLevelParams/custom_mssql_jdbc_name", None)
-        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
-        audit_jdbc_url = format('jdbc:sqlserver://{xa_db_host};databaseName={xa_audit_db_name}')
-        jdbc_driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
-      elif xa_audit_db_flavor and xa_audit_db_flavor == 'sqla':
-        jdbc_jar_name = default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
-        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_sqlanywhere_jdbc_name", None)
-        audit_jdbc_url = format('jdbc:sqlanywhere:database={xa_audit_db_name};host={xa_db_host}')
-        jdbc_driver = "sap.jdbc4.sqlanywhere.IDriver"
-
-    downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-    driver_curl_source = format("{jdk_location}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-    driver_curl_target = format("{hadoop_yarn_home}/lib/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-    previous_jdbc_jar = format("{hadoop_yarn_home}/lib/{previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-
-    xa_audit_db_is_enabled = False
-    ranger_audit_solr_urls = config['configurations']['ranger-admin-site']['ranger.audit.solr.urls']
-    if xml_configurations_supported and stack_supports_ranger_audit_db:
-      xa_audit_db_is_enabled = config['configurations']['ranger-yarn-audit']['xasecure.audit.destination.db']
-    xa_audit_hdfs_is_enabled = config['configurations']['ranger-yarn-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else None
-    ssl_keystore_password = unicode(config['configurations']['ranger-yarn-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password']) if xml_configurations_supported else None
-    ssl_truststore_password = unicode(config['configurations']['ranger-yarn-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password']) if xml_configurations_supported else None
-    credential_file = format('/etc/ranger/{repo_name}/cred.jceks') if xml_configurations_supported else None
-
-    #For SQLA explicitly disable audit to DB for Ranger
-    if xa_audit_db_flavor == 'sqla':
-      xa_audit_db_is_enabled = False

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/params_windows.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/params_windows.py
deleted file mode 100755
index 0f8ce73..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/params_windows.py
+++ /dev/null
@@ -1,59 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-from resource_management.libraries import functions
-import os
-from status_params import *
-
-# server configurations
-config = Script.get_config()
-
-hadoop_user = config["configurations"]["cluster-env"]["hadoop.user.name"]
-yarn_user = hadoop_user
-hdfs_user = hadoop_user
-smokeuser = hadoop_user
-config_dir = os.environ["HADOOP_CONF_DIR"]
-hadoop_home = os.environ["HADOOP_HOME"]
-
-yarn_home = os.environ["HADOOP_YARN_HOME"]
-
-hadoop_ssl_enabled = default("/configurations/core-site/hadoop.ssl.enabled", False)
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
-smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
-kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
-rm_host = config['clusterHostInfo']['rm_host'][0]
-rm_port = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'].split(':')[-1]
-rm_https_port = "8090"
-rm_webui_address = format("{rm_host}:{rm_port}")
-rm_webui_https_address = format("{rm_host}:{rm_https_port}")
-
-hs_host = config['clusterHostInfo']['hs_host'][0]
-hs_port = config['configurations']['mapred-site']['mapreduce.jobhistory.webapp.address'].split(':')[-1]
-hs_webui_address = format("{hs_host}:{hs_port}")
-
-hadoop_mapred2_jar_location = os.path.join(os.environ["HADOOP_COMMON_HOME"], "share", "hadoop", "mapreduce")
-hadoopMapredExamplesJarName = "hadoop-mapreduce-examples-2.*.jar"
-
-exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
-exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
-update_exclude_file_only = config['commandParams']['update_exclude_file_only']

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/resourcemanager.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/resourcemanager.py
deleted file mode 100755
index 6a7eea7..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/resourcemanager.py
+++ /dev/null
@@ -1,289 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.check_process_status import check_process_status
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.security_commons import build_expectations, \
-  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
-  FILE_TYPE_XML
-from resource_management.libraries.functions.decorator import retry
-from resource_management.core.resources.system import File, Execute
-from resource_management.core.source import Template
-from resource_management.core.logger import Logger
-from resource_management.core.exceptions import Fail
-from resource_management.libraries.providers.hdfs_resource import WebHDFSUtil
-from resource_management.libraries.providers.hdfs_resource import HdfsResourceProvider
-from resource_management import is_empty
-from resource_management import shell
-
-
-from yarn import yarn
-from service import service
-from ambari_commons import OSConst
-from ambari_commons.os_family_impl import OsFamilyImpl
-from setup_ranger_yarn import setup_ranger_yarn
-
-
-class Resourcemanager(Script):
-  def install(self, env):
-    self.install_packages(env)
-
-  def stop(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    service('resourcemanager', action='stop')
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    yarn(name='resourcemanager')
-
-  def refreshqueues(self, env):
-    pass
-
-
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class ResourcemanagerWindows(Resourcemanager):
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env)
-    service('resourcemanager', action='start')
-
-  def status(self, env):
-    service('resourcemanager', action='status')
-
-  def decommission(self, env):
-    import params
-
-    env.set_params(params)
-    yarn_user = params.yarn_user
-
-    yarn_refresh_cmd = format("cmd /c yarn rmadmin -refreshNodes")
-
-    File(params.exclude_file_path,
-         content=Template("exclude_hosts_list.j2"),
-         owner=yarn_user,
-         mode="f"
-    )
-
-    if params.update_exclude_file_only == False:
-      Execute(yarn_refresh_cmd, user=yarn_user)
-
-
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class ResourcemanagerDefault(Resourcemanager):
-  def get_component_name(self):
-    return "hadoop-yarn-resourcemanager"
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    Logger.info("Executing Stack Upgrade post-restart")
-    import params
-    env.set_params(params)
-
-    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      conf_select.select(params.stack_name, "hadoop", params.version)
-      stack_select.select("hadoop-yarn-resourcemanager", params.version)
-
-  def start(self, env, upgrade_type=None):
-    import params
-
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    if params.has_ranger_admin and params.is_supported_yarn_ranger:
-      setup_ranger_yarn() #Ranger Yarn Plugin related calls
-
-    # wait for active-dir and done-dir to be created by ATS if needed
-    if params.has_ats:
-      Logger.info("Verifying DFS directories where ATS stores time line data for active and completed applications.")
-      self.wait_for_dfs_directories_created(params.entity_groupfs_store_dir, params.entity_groupfs_active_dir)
-
-    service('resourcemanager', action='start')
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    check_process_status(status_params.resourcemanager_pid_file)
-    pass
-
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"yarn.timeline-service.http-authentication.type": "kerberos",
-                           "yarn.acl.enable": "true"}
-      props_empty_check = ["yarn.resourcemanager.principal",
-                           "yarn.resourcemanager.keytab",
-                           "yarn.resourcemanager.webapp.spnego-principal",
-                           "yarn.resourcemanager.webapp.spnego-keytab-file"]
-
-      props_read_check = ["yarn.resourcemanager.keytab",
-                          "yarn.resourcemanager.webapp.spnego-keytab-file"]
-      yarn_site_props = build_expectations('yarn-site', props_value_check, props_empty_check,
-                                           props_read_check)
-
-      yarn_expectations ={}
-      yarn_expectations.update(yarn_site_props)
-
-      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'yarn-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, yarn_site_props)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'yarn-site' not in security_params
-               or 'yarn.resourcemanager.keytab' not in security_params['yarn-site']
-               or 'yarn.resourcemanager.principal' not in security_params['yarn-site']) \
-            or 'yarn.resourcemanager.webapp.spnego-keytab-file' not in security_params['yarn-site'] \
-            or 'yarn.resourcemanager.webapp.spnego-principal' not in security_params['yarn-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.resourcemanager.keytab'],
-                                security_params['yarn-site']['yarn.resourcemanager.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.resourcemanager.webapp.spnego-keytab-file'],
-                                security_params['yarn-site']['yarn.resourcemanager.webapp.spnego-principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
-  def refreshqueues(self, env):
-    import params
-
-    self.configure(env)
-    env.set_params(params)
-
-    service('resourcemanager',
-            action='refreshQueues'
-    )
-
-  def decommission(self, env):
-    import params
-
-    env.set_params(params)
-    rm_kinit_cmd = params.rm_kinit_cmd
-    yarn_user = params.yarn_user
-    conf_dir = params.hadoop_conf_dir
-    user_group = params.user_group
-
-    yarn_refresh_cmd = format("{rm_kinit_cmd} yarn --config {conf_dir} rmadmin -refreshNodes")
-
-    File(params.exclude_file_path,
-         content=Template("exclude_hosts_list.j2"),
-         owner=yarn_user,
-         group=user_group
-    )
-
-    if params.update_exclude_file_only == False:
-      Execute(yarn_refresh_cmd,
-            environment= {'PATH' : params.execute_path },
-            user=yarn_user)
-      pass
-    pass
-
-
-
-
-  def wait_for_dfs_directories_created(self, *dirs):
-    import params
-
-    ignored_dfs_dirs = HdfsResourceProvider.get_ignored_resources_list(params.hdfs_resource_ignore_file)
-
-    if params.security_enabled:
-      Execute(params.rm_kinit_cmd,
-              user=params.yarn_user
-      )
-      Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
-        user=params.hdfs_user
-      )
-
-    for dir_path in dirs:
-      self.wait_for_dfs_directory_created(dir_path, ignored_dfs_dirs)
-
-
-  @retry(times=8, sleep_time=20, backoff_factor=1, err_class=Fail)
-  def wait_for_dfs_directory_created(self, dir_path, ignored_dfs_dirs):
-    import params
-
-
-    if not is_empty(dir_path):
-      dir_path = HdfsResourceProvider.parse_path(dir_path)
-
-      if dir_path in ignored_dfs_dirs:
-        Logger.info("Skipping DFS directory '" + dir_path + "' as it's marked to be ignored.")
-        return
-
-      Logger.info("Verifying if DFS directory '" + dir_path + "' exists.")
-
-      dir_exists = None
-
-      if WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
-        # check with webhdfs is much faster than executing hdfs dfs -test
-        util = WebHDFSUtil(params.hdfs_site, params.hdfs_user, params.security_enabled)
-        list_status = util.run_command(dir_path, 'GETFILESTATUS', method='GET', ignore_status_codes=['404'], assertable_result=False)
-        dir_exists = ('FileStatus' in list_status)
-      else:
-        # have to do time expensive hdfs dfs -d check.
-        dfs_ret_code = shell.call(format("hdfs --config {hadoop_conf_dir} dfs -test -d " + dir_path), user=params.yarn_user)[0]
-        dir_exists = not dfs_ret_code #dfs -test -d returns 0 in case the dir exists
-
-      if not dir_exists:
-        raise Fail("DFS directory '" + dir_path + "' does not exist !")
-      else:
-        Logger.info("DFS directory '" + dir_path + "' exists.")
-
-  def get_log_folder(self):
-    import params
-    return params.yarn_log_dir
-  
-  def get_user(self):
-    import params
-    return params.yarn_user
-  
-if __name__ == "__main__":
-  Resourcemanager().execute()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/service.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/service.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/service.py
deleted file mode 100755
index b1179b9..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/service.py
+++ /dev/null
@@ -1,105 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
-from ambari_commons import OSConst
-from resource_management.core.shell import as_user
-from resource_management.libraries.functions.show_logs import show_logs
-
-@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
-def service(componentName, action='start', serviceName='yarn'):
-  import status_params
-  if status_params.service_map.has_key(componentName):
-    service_name = status_params.service_map[componentName]
-    if action == 'start' or action == 'stop':
-      Service(service_name, action=action)
-    elif action == 'status':
-      check_windows_service_status(service_name)
-
-
-@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def service(componentName, action='start', serviceName='yarn'):
-  import params
-
-  if serviceName == 'mapreduce' and componentName == 'historyserver':
-    delete_pid_file = True
-    daemon = format("{mapred_bin}/mr-jobhistory-daemon.sh")
-    pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-{componentName}.pid")
-    usr = params.mapred_user
-    log_dir = params.mapred_log_dir
-  else:
-    # !!! yarn-daemon.sh deletes the PID for us; if we remove it the script
-    # may not work correctly when stopping the service
-    delete_pid_file = False
-    daemon = format("{yarn_bin}/yarn-daemon.sh")
-    pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-{componentName}.pid")
-    usr = params.yarn_user
-    log_dir = params.yarn_log_dir
-
-  cmd = format("export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {daemon} --config {hadoop_conf_dir}")
-
-  if action == 'start':
-    daemon_cmd = format("{ulimit_cmd} {cmd} start {componentName}")
-    check_process = as_user(format("ls {pid_file} && ps -p `cat {pid_file}`"), user=usr)
-
-    # Remove the pid file if its corresponding process is not running.
-    File(pid_file, action = "delete", not_if = check_process)
-
-    if componentName == 'timelineserver' and serviceName == 'yarn':
-      File(params.ats_leveldb_lock_file,
-         action = "delete",
-         only_if = format("ls {params.ats_leveldb_lock_file}"),
-         not_if = check_process,
-         ignore_failures = True
-      )
-
-    try:
-      # Attempt to start the process. Internally, this is skipped if the process is already running.
-      Execute(daemon_cmd, user = usr, not_if = check_process)
-  
-      # Ensure that the process with the expected PID exists.
-      Execute(check_process,
-              not_if = check_process,
-              tries=5,
-              try_sleep=1,
-      )
-    except:
-      show_logs(log_dir, usr)
-      raise
-
-  elif action == 'stop':
-    daemon_cmd = format("{cmd} stop {componentName}")
-    try:
-      Execute(daemon_cmd, user=usr)
-    except:
-      show_logs(log_dir, usr)
-      raise
-
-    # !!! yarn-daemon doesn't need us to delete PIDs
-    if delete_pid_file is True:
-      File(pid_file, action="delete")
-
-
-  elif action == 'refreshQueues':
-    rm_kinit_cmd = params.rm_kinit_cmd
-    refresh_cmd = format("{rm_kinit_cmd} export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {yarn_container_bin}/yarn rmadmin -refreshQueues")
-    Execute(refresh_cmd, user=usr)

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/service_check.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/service_check.py
deleted file mode 100755
index daa8e7e..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/service_check.py
+++ /dev/null
@@ -1,159 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-import sys
-import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
-import re
-import subprocess
-from ambari_commons import os_utils
-from ambari_commons import OSConst
-from ambari_commons.os_family_impl import OsFamilyImpl
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.get_user_call_output import get_user_call_output
-from resource_management.core.exceptions import Fail
-from resource_management.core.logger import Logger
-
-CURL_CONNECTION_TIMEOUT = '5'
-
-class ServiceCheck(Script):
-  def service_check(self, env):
-    pass
-
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class ServiceCheckWindows(ServiceCheck):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    yarn_exe = os_utils.quote_path(os.path.join(params.yarn_home, "bin", "yarn.cmd"))
-
-    run_yarn_check_cmd = "cmd /C %s node -list" % yarn_exe
-
-    component_type = 'rm'
-    if params.hadoop_ssl_enabled:
-      component_address = params.rm_webui_https_address
-    else:
-      component_address = params.rm_webui_address
-
-    #temp_dir = os.path.abspath(os.path.join(params.hadoop_home, os.pardir)), "/tmp"
-    temp_dir = os.path.join(os.path.dirname(params.hadoop_home), "temp")
-    validateStatusFileName = "validateYarnComponentStatusWindows.py"
-    validateStatusFilePath = os.path.join(temp_dir, validateStatusFileName)
-    python_executable = sys.executable
-    validateStatusCmd = "%s %s %s -p %s -s %s" % (python_executable, validateStatusFilePath, component_type, component_address, params.hadoop_ssl_enabled)
-
-    if params.security_enabled:
-      kinit_cmd = "%s -kt %s %s;" % (params.kinit_path_local, params.smoke_user_keytab, params.smokeuser)
-      smoke_cmd = kinit_cmd + ' ' + validateStatusCmd
-    else:
-      smoke_cmd = validateStatusCmd
-
-    File(validateStatusFilePath,
-         content=StaticFile(validateStatusFileName)
-    )
-
-    Execute(smoke_cmd,
-            tries=3,
-            try_sleep=5,
-            logoutput=True
-    )
-
-    Execute(run_yarn_check_cmd, logoutput=True)
-
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class ServiceCheckDefault(ServiceCheck):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    params.HdfsResource(format("/user/{smokeuser}"),
-                        type="directory",
-                        action="create_on_execute",
-                        owner=params.smokeuser,
-                        mode=params.smoke_hdfs_user_mode,
-                        )
-
-    if params.stack_version_formatted_major and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.stack_version_formatted_major):
-      path_to_distributed_shell_jar = format("{stack_root}/current/hadoop-yarn-client/hadoop-yarn-applications-distributedshell.jar")
-    else:
-      path_to_distributed_shell_jar = "/usr/lib/hadoop-yarn/hadoop-yarn-applications-distributedshell*.jar"
-
-    yarn_distrubuted_shell_check_params = ["yarn org.apache.hadoop.yarn.applications.distributedshell.Client",
-                                           "-shell_command", "ls", "-num_containers", "{number_of_nm}",
-                                           "-jar", "{path_to_distributed_shell_jar}", "-timeout", "300000",
-                                           "--queue", "{service_check_queue_name}"]
-    yarn_distrubuted_shell_check_cmd = format(" ".join(yarn_distrubuted_shell_check_params))
-
-    if params.security_enabled:
-      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
-      smoke_cmd = format("{kinit_cmd} {yarn_distrubuted_shell_check_cmd}")
-    else:
-      smoke_cmd = yarn_distrubuted_shell_check_cmd
-
-    return_code, out = shell.checked_call(smoke_cmd,
-                                          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-                                          user=params.smokeuser,
-                                          )
-
-    m = re.search("appTrackingUrl=(.*),\s", out)
-    app_url = m.group(1)
-
-    splitted_app_url = str(app_url).split('/')
-
-    for item in splitted_app_url:
-      if "application" in item:
-        application_name = item
-
-    for rm_webapp_address in params.rm_webapp_addresses_list:
-      info_app_url = params.scheme + "://" + rm_webapp_address + "/ws/v1/cluster/apps/" + application_name
-
-      get_app_info_cmd = "curl --negotiate -u : -ksL --connect-timeout " + CURL_CONNECTION_TIMEOUT + " " + info_app_url
-
-      return_code, stdout, _ = get_user_call_output(get_app_info_cmd,
-                                            user=params.smokeuser,
-                                            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-                                            )
-      
-      # Handle HDP<2.2.8.1 where RM doesn't do automatic redirection from standby to active
-      if stdout.startswith("This is standby RM. Redirecting to the current active RM:"):
-        Logger.info(format("Skipped checking of {rm_webapp_address} since returned '{stdout}'"))
-        continue
-
-      try:
-        json_response = json.loads(stdout)
-      except Exception as e:
-        raise Fail(format("Response from YARN API was not a valid JSON. Response: {stdout}"))
-      
-      if json_response is None or 'app' not in json_response or \
-              'state' not in json_response['app'] or 'finalStatus' not in json_response['app']:
-        raise Fail("Application " + app_url + " returns invalid data.")
-
-      if json_response['app']['state'] != "FINISHED" or json_response['app']['finalStatus'] != "SUCCEEDED":
-        raise Fail("Application " + app_url + " state/status is not valid. Should be FINISHED/SUCCEEDED.")
-
-
-
-if __name__ == "__main__":
-  ServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/setup_ranger_yarn.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/setup_ranger_yarn.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/setup_ranger_yarn.py
deleted file mode 100755
index 6ea7f82..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/setup_ranger_yarn.py
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-    http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-from resource_management.core.logger import Logger
-
-def setup_ranger_yarn():
-  import params
-
-  if params.has_ranger_admin:
-
-    from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_ranger_plugin
-
-    if params.retryAble:
-      Logger.info("YARN: Setup ranger: command retry enables thus retrying if ranger admin is down !")
-    else:
-      Logger.info("YARN: Setup ranger: command retry not enabled thus skipping if ranger admin is down !")
-
-    if params.xml_configurations_supported and params.enable_ranger_yarn and params.xa_audit_hdfs_is_enabled:
-      params.HdfsResource("/ranger/audit",
-                         type="directory",
-                         action="create_on_execute",
-                         owner=params.hdfs_user,
-                         group=params.hdfs_user,
-                         mode=0755,
-                         recursive_chmod=True
-      )
-      params.HdfsResource("/ranger/audit/yarn",
-                         type="directory",
-                         action="create_on_execute",
-                         owner=params.yarn_user,
-                         group=params.yarn_user,
-                         mode=0700,
-                         recursive_chmod=True
-      )
-      params.HdfsResource(None, action="execute")
-
-    setup_ranger_plugin('hadoop-yarn-resourcemanager', 'yarn', params.previous_jdbc_jar,
-                        params.downloaded_custom_connector, params.driver_curl_source,
-                        params.driver_curl_target, params.java64_home,
-                        params.repo_name, params.yarn_ranger_plugin_repo,
-                        params.ranger_env, params.ranger_plugin_properties,
-                        params.policy_user, params.policymgr_mgr_url,
-                        params.enable_ranger_yarn, conf_dict=params.hadoop_conf_dir,
-                        component_user=params.yarn_user, component_group=params.user_group, cache_service_list=['yarn'],
-                        plugin_audit_properties=params.config['configurations']['ranger-yarn-audit'], plugin_audit_attributes=params.config['configuration_attributes']['ranger-yarn-audit'],
-                        plugin_security_properties=params.config['configurations']['ranger-yarn-security'], plugin_security_attributes=params.config['configuration_attributes']['ranger-yarn-security'],
-                        plugin_policymgr_ssl_properties=params.config['configurations']['ranger-yarn-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configuration_attributes']['ranger-yarn-policymgr-ssl'],
-                        component_list=['hadoop-yarn-resourcemanager'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
-                        credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password, 
-                        ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
-                        api_version = 'v2', skip_if_rangeradmin_down= not params.retryAble,
-                        is_security_enabled = params.security_enabled,
-                        is_stack_supports_ranger_kerberos = params.stack_supports_ranger_kerberos,
-                        component_user_principal=params.rm_principal_name if params.security_enabled else None,
-                        component_user_keytab=params.rm_keytab if params.security_enabled else None
-      )
-  else:
-    Logger.info('Ranger admin not installed')

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/status_params.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/status_params.py
deleted file mode 100755
index c2e9d92..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/status_params.py
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from resource_management.libraries.script.script import Script
-from resource_management.libraries import functions
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions.default import default
-from ambari_commons import OSCheck
-
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-if OSCheck.is_windows_family():
-  resourcemanager_win_service_name = 'resourcemanager'
-  nodemanager_win_service_name = 'nodemanager'
-  historyserver_win_service_name = 'historyserver'
-  timelineserver_win_service_name = 'timelineserver'
-
-  service_map = {
-    'resourcemanager' : resourcemanager_win_service_name,
-    'nodemanager' : nodemanager_win_service_name,
-    'historyserver' : historyserver_win_service_name,
-    'timelineserver' : timelineserver_win_service_name
-  }
-else:
-  mapred_user = config['configurations']['mapred-env']['mapred_user']
-  yarn_user = config['configurations']['yarn-env']['yarn_user']
-  yarn_pid_dir_prefix = config['configurations']['yarn-env']['yarn_pid_dir_prefix']
-  mapred_pid_dir_prefix = config['configurations']['mapred-env']['mapred_pid_dir_prefix']
-  yarn_pid_dir = format("{yarn_pid_dir_prefix}/{yarn_user}")
-  mapred_pid_dir = format("{mapred_pid_dir_prefix}/{mapred_user}")
-
-  resourcemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-resourcemanager.pid")
-  nodemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-nodemanager.pid")
-  yarn_historyserver_pid_file_old = format("{yarn_pid_dir}/yarn-{yarn_user}-historyserver.pid")
-  yarn_historyserver_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-timelineserver.pid")  # *-historyserver.pid is deprecated
-  mapred_historyserver_pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-historyserver.pid")
-
-  hadoop_conf_dir = functions.conf_select.get_hadoop_conf_dir()
-
-  hostname = config['hostname']
-  kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
-  security_enabled = config['configurations']['cluster-env']['security_enabled']
-
-stack_name = default("/hostLevelParams/stack_name", None)
\ No newline at end of file