You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by nc...@apache.org on 2016/12/16 22:02:36 UTC
[28/51] [abbrv] ambari git commit: AMBARI-19220. Fix version of HDFS
and YARN used by HDP 3.0 (alejandro)
http://git-wip-us.apache.org/repos/asf/ambari/blob/ab4b864c/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/service_check.py
deleted file mode 100644
index 981f002..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/service_check.py
+++ /dev/null
@@ -1,152 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.libraries.script.script import Script
-from resource_management.core.shell import as_user
-from ambari_commons.os_family_impl import OsFamilyImpl
-from ambari_commons import OSConst
-from resource_management.libraries.functions.curl_krb_request import curl_krb_request
-from resource_management.libraries import functions
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.resources.execute_hadoop import ExecuteHadoop
-from resource_management.core.logger import Logger
-from resource_management.core.source import StaticFile
-from resource_management.core.resources.system import Execute, File
-
-
-class HdfsServiceCheck(Script):
- pass
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class HdfsServiceCheckDefault(HdfsServiceCheck):
- def service_check(self, env):
- import params
-
- env.set_params(params)
- unique = functions.get_unique_id_and_date()
- dir = params.hdfs_tmp_dir
- tmp_file = format("{dir}/{unique}")
-
- safemode_command = format("dfsadmin -fs {namenode_address} -safemode get | grep OFF")
-
- if params.security_enabled:
- Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
- user=params.hdfs_user
- )
- ExecuteHadoop(safemode_command,
- user=params.hdfs_user,
- logoutput=True,
- conf_dir=params.hadoop_conf_dir,
- try_sleep=3,
- tries=20,
- bin_dir=params.hadoop_bin_dir
- )
- params.HdfsResource(dir,
- type="directory",
- action="create_on_execute",
- mode=0777
- )
- params.HdfsResource(tmp_file,
- type="file",
- action="delete_on_execute",
- )
-
- params.HdfsResource(tmp_file,
- type="file",
- source="/etc/passwd",
- action="create_on_execute"
- )
- params.HdfsResource(None, action="execute")
-
- if params.has_journalnode_hosts:
- if params.security_enabled:
- for host in params.journalnode_hosts:
- if params.https_only:
- uri = format("https://{host}:{journalnode_port}")
- else:
- uri = format("http://{host}:{journalnode_port}")
- response, errmsg, time_millis = curl_krb_request(params.tmp_dir, params.smoke_user_keytab,
- params.smokeuser_principal, uri, "jn_service_check",
- params.kinit_path_local, False, None, params.smoke_user)
- if not response:
- Logger.error("Cannot access WEB UI on: {0}. Error : {1}", uri, errmsg)
- return 1
- else:
- journalnode_port = params.journalnode_port
- checkWebUIFileName = "checkWebUI.py"
- checkWebUIFilePath = format("{tmp_dir}/{checkWebUIFileName}")
- comma_sep_jn_hosts = ",".join(params.journalnode_hosts)
- checkWebUICmd = format("ambari-python-wrap {checkWebUIFilePath} -m {comma_sep_jn_hosts} -p {journalnode_port} -s {https_only}")
- File(checkWebUIFilePath,
- content=StaticFile(checkWebUIFileName),
- mode=0775)
-
- Execute(checkWebUICmd,
- logoutput=True,
- try_sleep=3,
- tries=5,
- user=params.smoke_user
- )
-
- if params.is_namenode_master:
- if params.has_zkfc_hosts:
- pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
- pid_file = format("{pid_dir}/hadoop-{hdfs_user}-zkfc.pid")
- check_zkfc_process_cmd = as_user(format(
- "ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1"), user=params.hdfs_user)
- Execute(check_zkfc_process_cmd,
- logoutput=True,
- try_sleep=3,
- tries=5
- )
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class HdfsServiceCheckWindows(HdfsServiceCheck):
- def service_check(self, env):
- import params
- env.set_params(params)
-
- unique = functions.get_unique_id_and_date()
-
- #Hadoop uses POSIX-style paths, separator is always /
- dir = params.hdfs_tmp_dir
- tmp_file = dir + '/' + unique
-
- #commands for execution
- hadoop_cmd = "cmd /C %s" % (os.path.join(params.hadoop_home, "bin", "hadoop.cmd"))
- create_dir_cmd = "%s fs -mkdir %s" % (hadoop_cmd, dir)
- own_dir = "%s fs -chmod 777 %s" % (hadoop_cmd, dir)
- test_dir_exists = "%s fs -test -e %s" % (hadoop_cmd, dir)
- cleanup_cmd = "%s fs -rm %s" % (hadoop_cmd, tmp_file)
- create_file_cmd = "%s fs -put %s %s" % (hadoop_cmd, os.path.join(params.hadoop_conf_dir, "core-site.xml"), tmp_file)
- test_cmd = "%s fs -test -e %s" % (hadoop_cmd, tmp_file)
-
- hdfs_cmd = "cmd /C %s" % (os.path.join(params.hadoop_home, "bin", "hdfs.cmd"))
- safemode_command = "%s dfsadmin -safemode get | %s OFF" % (hdfs_cmd, params.grep_exe)
-
- Execute(safemode_command, logoutput=True, try_sleep=3, tries=20)
- Execute(create_dir_cmd, user=params.hdfs_user,logoutput=True, ignore_failures=True)
- Execute(own_dir, user=params.hdfs_user,logoutput=True)
- Execute(test_dir_exists, user=params.hdfs_user,logoutput=True)
- Execute(create_file_cmd, user=params.hdfs_user,logoutput=True)
- Execute(test_cmd, user=params.hdfs_user,logoutput=True)
- Execute(cleanup_cmd, user=params.hdfs_user,logoutput=True)
-
-if __name__ == "__main__":
- HdfsServiceCheck().execute()
http://git-wip-us.apache.org/repos/asf/ambari/blob/ab4b864c/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/setup_ranger_hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/setup_ranger_hdfs.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/setup_ranger_hdfs.py
deleted file mode 100644
index e3aff9d..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/setup_ranger_hdfs.py
+++ /dev/null
@@ -1,121 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-from resource_management.core.logger import Logger
-from resource_management.core.resources.system import Execute
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.constants import Direction
-from resource_management.libraries.functions.format import format
-
-
-def setup_ranger_hdfs(upgrade_type=None):
- import params
-
- if params.has_ranger_admin:
-
-
- stack_version = None
-
- if upgrade_type is not None:
- stack_version = params.version
-
- if params.retryAble:
- Logger.info("HDFS: Setup ranger: command retry enables thus retrying if ranger admin is down !")
- else:
- Logger.info("HDFS: Setup ranger: command retry not enabled thus skipping if ranger admin is down !")
-
-
- if params.xml_configurations_supported:
- from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_ranger_plugin
- api_version=None
- if params.stack_supports_ranger_kerberos:
- api_version='v2'
- setup_ranger_plugin('hadoop-client', 'hdfs', params.previous_jdbc_jar,
- params.downloaded_custom_connector, params.driver_curl_source,
- params.driver_curl_target, params.java_home,
- params.repo_name, params.hdfs_ranger_plugin_repo,
- params.ranger_env, params.ranger_plugin_properties,
- params.policy_user, params.policymgr_mgr_url,
- params.enable_ranger_hdfs, conf_dict=params.hadoop_conf_dir,
- component_user=params.hdfs_user, component_group=params.user_group, cache_service_list=['hdfs'],
- plugin_audit_properties=params.config['configurations']['ranger-hdfs-audit'], plugin_audit_attributes=params.config['configuration_attributes']['ranger-hdfs-audit'],
- plugin_security_properties=params.config['configurations']['ranger-hdfs-security'], plugin_security_attributes=params.config['configuration_attributes']['ranger-hdfs-security'],
- plugin_policymgr_ssl_properties=params.config['configurations']['ranger-hdfs-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configuration_attributes']['ranger-hdfs-policymgr-ssl'],
- component_list=['hadoop-client'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
- credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password,
- ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
- api_version=api_version ,stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble,
- is_security_enabled = params.security_enabled,
- is_stack_supports_ranger_kerberos = params.stack_supports_ranger_kerberos,
- component_user_principal=params.nn_principal_name if params.security_enabled else None,
- component_user_keytab=params.nn_keytab if params.security_enabled else None)
- else:
- from resource_management.libraries.functions.setup_ranger_plugin import setup_ranger_plugin
-
- setup_ranger_plugin('hadoop-client', 'hdfs', params.previous_jdbc_jar,
- params.downloaded_custom_connector, params.driver_curl_source,
- params.driver_curl_target, params.java_home,
- params.repo_name, params.hdfs_ranger_plugin_repo,
- params.ranger_env, params.ranger_plugin_properties,
- params.policy_user, params.policymgr_mgr_url,
- params.enable_ranger_hdfs, conf_dict=params.hadoop_conf_dir,
- component_user=params.hdfs_user, component_group=params.user_group, cache_service_list=['hdfs'],
- plugin_audit_properties=params.config['configurations']['ranger-hdfs-audit'], plugin_audit_attributes=params.config['configuration_attributes']['ranger-hdfs-audit'],
- plugin_security_properties=params.config['configurations']['ranger-hdfs-security'], plugin_security_attributes=params.config['configuration_attributes']['ranger-hdfs-security'],
- plugin_policymgr_ssl_properties=params.config['configurations']['ranger-hdfs-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configuration_attributes']['ranger-hdfs-policymgr-ssl'],
- component_list=['hadoop-client'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
- credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password,
- ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
- stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble)
-
- if stack_version and params.upgrade_direction == Direction.UPGRADE:
- # when upgrading to stack remove_ranger_hdfs_plugin_env, this env file must be removed
- if check_stack_feature(StackFeature.REMOVE_RANGER_HDFS_PLUGIN_ENV, stack_version):
- source_file = os.path.join(params.hadoop_conf_dir, 'set-hdfs-plugin-env.sh')
- target_file = source_file + ".bak"
- Execute(("mv", source_file, target_file), sudo=True, only_if=format("test -f {source_file}"))
- else:
- Logger.info('Ranger admin not installed')
-
-def create_ranger_audit_hdfs_directories():
- import params
-
- if params.has_ranger_admin:
- if params.xml_configurations_supported and params.enable_ranger_hdfs and params.xa_audit_hdfs_is_enabled:
- params.HdfsResource("/ranger/audit",
- type="directory",
- action="create_on_execute",
- owner=params.hdfs_user,
- group=params.hdfs_user,
- mode=0755,
- recursive_chmod=True,
- )
- params.HdfsResource("/ranger/audit/hdfs",
- type="directory",
- action="create_on_execute",
- owner=params.hdfs_user,
- group=params.hdfs_user,
- mode=0700,
- recursive_chmod=True,
- )
- params.HdfsResource(None, action="execute")
- else:
- Logger.info('Ranger admin not installed')
http://git-wip-us.apache.org/repos/asf/ambari/blob/ab4b864c/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/snamenode.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/snamenode.py
deleted file mode 100644
index 0f1f438..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/snamenode.py
+++ /dev/null
@@ -1,155 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import conf_select, stack_select
-from resource_management.libraries.functions.constants import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.security_commons import build_expectations, \
- cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
- FILE_TYPE_XML
-
-from hdfs_snamenode import snamenode
-from hdfs import hdfs
-from ambari_commons.os_family_impl import OsFamilyImpl
-from ambari_commons import OSConst
-
-from resource_management.core.logger import Logger
-
-class SNameNode(Script):
- def install(self, env):
- import params
- env.set_params(params)
- self.install_packages(env)
-
- def configure(self, env):
- import params
- env.set_params(params)
- hdfs("secondarynamenode")
- snamenode(action="configure")
-
- def start(self, env, upgrade_type=None):
- import params
- env.set_params(params)
- self.configure(env)
- snamenode(action="start")
-
- def stop(self, env, upgrade_type=None):
- import params
- env.set_params(params)
- snamenode(action="stop")
-
- def status(self, env):
- import status_params
- env.set_params(status_params)
- snamenode(action="status")
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class SNameNodeDefault(SNameNode):
-
- def get_component_name(self):
- return "hadoop-hdfs-secondarynamenode"
-
- def pre_upgrade_restart(self, env, upgrade_type=None):
- Logger.info("Executing Stack Upgrade pre-restart")
- import params
- env.set_params(params)
-
- if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
- conf_select.select(params.stack_name, "hadoop", params.version)
- stack_select.select("hadoop-hdfs-secondarynamenode", params.version)
-
- def security_status(self, env):
- import status_params
-
- env.set_params(status_params)
- props_value_check = {"hadoop.security.authentication": "kerberos",
- "hadoop.security.authorization": "true"}
- props_empty_check = ["hadoop.security.auth_to_local"]
- props_read_check = None
- core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
- props_read_check)
- props_value_check = None
- props_empty_check = ['dfs.secondary.namenode.kerberos.internal.spnego.principal',
- 'dfs.secondary.namenode.keytab.file',
- 'dfs.secondary.namenode.kerberos.principal']
- props_read_check = ['dfs.secondary.namenode.keytab.file']
- hdfs_site_expectations = build_expectations('hdfs-site', props_value_check, props_empty_check,
- props_read_check)
-
- hdfs_expectations = {}
- hdfs_expectations.update(core_site_expectations)
- hdfs_expectations.update(hdfs_site_expectations)
-
- security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
- {'core-site.xml': FILE_TYPE_XML,
- 'hdfs-site.xml': FILE_TYPE_XML})
-
- if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
- security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
- result_issues = validate_security_config_properties(security_params, hdfs_expectations)
- if not result_issues: # If all validations passed successfully
- try:
- # Double check the dict before calling execute
- if ('hdfs-site' not in security_params or
- 'dfs.secondary.namenode.keytab.file' not in security_params['hdfs-site'] or
- 'dfs.secondary.namenode.kerberos.principal' not in security_params['hdfs-site']):
- self.put_structured_out({"securityState": "UNSECURED"})
- self.put_structured_out(
- {"securityIssuesFound": "Keytab file or principal are not set property."})
- return
-
- cached_kinit_executor(status_params.kinit_path_local,
- status_params.hdfs_user,
- security_params['hdfs-site']['dfs.secondary.namenode.keytab.file'],
- security_params['hdfs-site'][
- 'dfs.secondary.namenode.kerberos.principal'],
- status_params.hostname,
- status_params.tmp_dir)
- self.put_structured_out({"securityState": "SECURED_KERBEROS"})
- except Exception as e:
- self.put_structured_out({"securityState": "ERROR"})
- self.put_structured_out({"securityStateErrorInfo": str(e)})
- else:
- issues = []
- for cf in result_issues:
- issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
- self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
- self.put_structured_out({"securityState": "UNSECURED"})
- else:
- self.put_structured_out({"securityState": "UNSECURED"})
-
- def get_log_folder(self):
- import params
- return params.hdfs_log_dir
-
- def get_user(self):
- import params
- return params.hdfs_user
-
- def get_pid_files(self):
- import status_params
- return [status_params.snamenode_pid_file]
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class SNameNodeWindows(SNameNode):
- pass
-
-if __name__ == "__main__":
- SNameNode().execute()
http://git-wip-us.apache.org/repos/asf/ambari/blob/ab4b864c/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/status_params.py
deleted file mode 100644
index 153f9a6..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/status_params.py
+++ /dev/null
@@ -1,58 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from ambari_commons import OSCheck
-
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions.default import default
-from resource_management.libraries.functions import get_kinit_path
-from resource_management.libraries.script.script import Script
-
-config = Script.get_config()
-
-if OSCheck.is_windows_family():
- namenode_win_service_name = "namenode"
- datanode_win_service_name = "datanode"
- snamenode_win_service_name = "secondarynamenode"
- journalnode_win_service_name = "journalnode"
- zkfc_win_service_name = "zkfc"
-else:
- hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
- hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
- hadoop_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
- datanode_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-datanode.pid")
- namenode_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-namenode.pid")
- snamenode_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-secondarynamenode.pid")
- journalnode_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-journalnode.pid")
- zkfc_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-zkfc.pid")
- nfsgateway_pid_file = format("{hadoop_pid_dir_prefix}/root/hadoop_privileged_nfs3.pid")
-
- # Security related/required params
- hostname = config['hostname']
- security_enabled = config['configurations']['cluster-env']['security_enabled']
- hdfs_user_principal = config['configurations']['hadoop-env']['hdfs_principal_name']
- hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-
- hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-
- kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
- tmp_dir = Script.get_tmp_dir()
-
-stack_name = default("/hostLevelParams/stack_name", None)
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/ab4b864c/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/utils.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/utils.py
deleted file mode 100644
index f76935a..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/utils.py
+++ /dev/null
@@ -1,384 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-import re
-import urllib2
-import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
-
-from resource_management.core.resources.system import Directory, File, Execute
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions import check_process_status
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.core import shell
-from resource_management.core.shell import as_user, as_sudo
-from resource_management.core.exceptions import ComponentIsNotRunning
-from resource_management.core.logger import Logger
-from resource_management.libraries.functions.curl_krb_request import curl_krb_request
-from resource_management.core.exceptions import Fail
-from resource_management.libraries.functions.namenode_ha_utils import get_namenode_states
-from resource_management.libraries.functions.show_logs import show_logs
-from resource_management.libraries.script.script import Script
-from ambari_commons.inet_utils import ensure_ssl_using_protocol
-from zkfc_slave import ZkfcSlaveDefault
-
-ensure_ssl_using_protocol(Script.get_force_https_protocol())
-
-def safe_zkfc_op(action, env):
- """
- Idempotent operation on the zkfc process to either start or stop it.
- :param action: start or stop
- :param env: environment
- """
- Logger.info("Performing action {0} on zkfc.".format(action))
- zkfc = None
- if action == "start":
- try:
- ZkfcSlaveDefault.status_static(env)
- except ComponentIsNotRunning:
- ZkfcSlaveDefault.start_static(env)
-
- if action == "stop":
- try:
- ZkfcSlaveDefault.status_static(env)
- except ComponentIsNotRunning:
- pass
- else:
- ZkfcSlaveDefault.stop_static(env)
-
-def initiate_safe_zkfc_failover():
- """
- If this is the active namenode, initiate a safe failover and wait for it to become the standby.
-
- If an error occurs, force a failover to happen by killing zkfc on this host. In this case, during the Restart,
- will also have to start ZKFC manually.
- """
- import params
-
- # Must kinit before running the HDFS command
- if params.security_enabled:
- Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
- user = params.hdfs_user)
-
- active_namenode_id = None
- standby_namenode_id = None
- active_namenodes, standby_namenodes, unknown_namenodes = get_namenode_states(params.hdfs_site, params.security_enabled, params.hdfs_user)
- if active_namenodes:
- active_namenode_id = active_namenodes[0][0]
- if standby_namenodes:
- standby_namenode_id = standby_namenodes[0][0]
-
- if active_namenode_id:
- Logger.info(format("Active NameNode id: {active_namenode_id}"))
- if standby_namenode_id:
- Logger.info(format("Standby NameNode id: {standby_namenode_id}"))
- if unknown_namenodes:
- for unknown_namenode in unknown_namenodes:
- Logger.info("NameNode HA state for {0} is unknown".format(unknown_namenode[0]))
-
- if params.namenode_id == active_namenode_id and params.other_namenode_id == standby_namenode_id:
- # Failover if this NameNode is active and other NameNode is up and in standby (i.e. ready to become active on failover)
- Logger.info(format("NameNode {namenode_id} is active and NameNode {other_namenode_id} is in standby"))
-
- failover_command = format("hdfs haadmin -ns {dfs_ha_nameservices} -failover {namenode_id} {other_namenode_id}")
- check_standby_cmd = format("hdfs haadmin -ns {dfs_ha_nameservices} -getServiceState {namenode_id} | grep standby")
-
- msg = "Rolling Upgrade - Initiating a ZKFC failover on active NameNode host {0}.".format(params.hostname)
- Logger.info(msg)
- code, out = shell.call(failover_command, user=params.hdfs_user, logoutput=True)
- Logger.info(format("Rolling Upgrade - failover command returned {code}"))
- wait_for_standby = False
-
- if code == 0:
- wait_for_standby = True
- else:
- # Try to kill ZKFC manually
- was_zkfc_killed = kill_zkfc(params.hdfs_user)
- code, out = shell.call(check_standby_cmd, user=params.hdfs_user, logoutput=True)
- Logger.info(format("Rolling Upgrade - check for standby returned {code}"))
- if code == 255 and out:
- Logger.info("Rolling Upgrade - NameNode is already down.")
- else:
- if was_zkfc_killed:
- # Only mandate that this be the standby namenode if ZKFC was indeed killed to initiate a failover.
- wait_for_standby = True
-
- if wait_for_standby:
- Logger.info("Waiting for this NameNode to become the standby one.")
- Execute(check_standby_cmd,
- user=params.hdfs_user,
- tries=50,
- try_sleep=6,
- logoutput=True)
- else:
- msg = "Rolling Upgrade - Skipping ZKFC failover on NameNode host {0}.".format(params.hostname)
- Logger.info(msg)
-
-def kill_zkfc(zkfc_user):
- """
- There are two potential methods for failing over the namenode, especially during a Rolling Upgrade.
- Option 1. Kill zkfc on primary namenode provided that the secondary is up and has zkfc running on it.
- Option 2. Silent failover
- :param zkfc_user: User that started the ZKFC process.
- :return: Return True if ZKFC was killed, otherwise, false.
- """
- import params
- if params.dfs_ha_enabled:
- if params.zkfc_pid_file:
- check_process = as_user(format("ls {zkfc_pid_file} > /dev/null 2>&1 && ps -p `cat {zkfc_pid_file}` > /dev/null 2>&1"), user=zkfc_user)
- code, out = shell.call(check_process)
- if code == 0:
- Logger.debug("ZKFC is running and will be killed.")
- kill_command = format("kill -15 `cat {zkfc_pid_file}`")
- Execute(kill_command,
- user=zkfc_user
- )
- File(params.zkfc_pid_file,
- action = "delete",
- )
- return True
- return False
-
-def service(action=None, name=None, user=None, options="", create_pid_dir=False,
- create_log_dir=False):
- """
- :param action: Either "start" or "stop"
- :param name: Component name, e.g., "namenode", "datanode", "secondarynamenode", "zkfc"
- :param user: User to run the command as
- :param options: Additional options to pass to command as a string
- :param create_pid_dir: Create PID directory
- :param create_log_dir: Crate log file directory
- """
- import params
-
- options = options if options else ""
- pid_dir = format("{hadoop_pid_dir_prefix}/{user}")
- pid_file = format("{pid_dir}/hadoop-{user}-{name}.pid")
- hadoop_env_exports = {
- 'HADOOP_LIBEXEC_DIR': params.hadoop_libexec_dir
- }
- log_dir = format("{hdfs_log_dir_prefix}/{user}")
-
- # NFS GATEWAY is always started by root using jsvc due to rpcbind bugs
- # on Linux such as CentOS6.2. https://bugzilla.redhat.com/show_bug.cgi?id=731542
- if name == "nfs3" :
- pid_file = format("{pid_dir}/hadoop_privileged_nfs3.pid")
- custom_export = {
- 'HADOOP_PRIVILEGED_NFS_USER': params.hdfs_user,
- 'HADOOP_PRIVILEGED_NFS_PID_DIR': pid_dir,
- 'HADOOP_PRIVILEGED_NFS_LOG_DIR': log_dir
- }
- hadoop_env_exports.update(custom_export)
-
- process_id_exists_command = as_sudo(["test", "-f", pid_file]) + " && " + as_sudo(["pgrep", "-F", pid_file])
-
- # on STOP directories shouldn't be created
- # since during stop still old dirs are used (which were created during previous start)
- if action != "stop":
- if name == "nfs3":
- Directory(params.hadoop_pid_dir_prefix,
- mode=0755,
- owner=params.root_user,
- group=params.root_group
- )
- else:
- Directory(params.hadoop_pid_dir_prefix,
- mode=0755,
- owner=params.hdfs_user,
- group=params.user_group
- )
- if create_pid_dir:
- Directory(pid_dir,
- owner=user,
- group=params.user_group,
- create_parents = True)
- if create_log_dir:
- if name == "nfs3":
- Directory(log_dir,
- mode=0775,
- owner=params.root_user,
- group=params.user_group)
- else:
- Directory(log_dir,
- owner=user,
- group=params.user_group,
- create_parents = True)
-
- if params.security_enabled and name == "datanode":
- ## The directory where pid files are stored in the secure data environment.
- hadoop_secure_dn_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
- hadoop_secure_dn_pid_file = format("{hadoop_secure_dn_pid_dir}/hadoop_secure_dn.pid")
-
- # At datanode_non_root stack version and further, we may start datanode as a non-root even in secure cluster
- if not (params.stack_version_formatted and check_stack_feature(StackFeature.DATANODE_NON_ROOT, params.stack_version_formatted)) or params.secure_dn_ports_are_in_use:
- user = "root"
- pid_file = format(
- "{hadoop_pid_dir_prefix}/{hdfs_user}/hadoop-{hdfs_user}-{name}.pid")
-
- if action == 'stop' and (params.stack_version_formatted and check_stack_feature(StackFeature.DATANODE_NON_ROOT, params.stack_version_formatted)) and \
- os.path.isfile(hadoop_secure_dn_pid_file):
- # We need special handling for this case to handle the situation
- # when we configure non-root secure DN and then restart it
- # to handle new configs. Otherwise we will not be able to stop
- # a running instance
- user = "root"
-
- try:
- check_process_status(hadoop_secure_dn_pid_file)
-
- custom_export = {
- 'HADOOP_SECURE_DN_USER': params.hdfs_user
- }
- hadoop_env_exports.update(custom_export)
-
- except ComponentIsNotRunning:
- pass
-
- hadoop_daemon = format("{hadoop_bin}/hadoop-daemon.sh")
-
- if user == "root":
- cmd = [hadoop_daemon, "--config", params.hadoop_conf_dir, action, name]
- if options:
- cmd += [options, ]
- daemon_cmd = as_sudo(cmd)
- else:
- cmd = format("{ulimit_cmd} {hadoop_daemon} --config {hadoop_conf_dir} {action} {name}")
- if options:
- cmd += " " + options
- daemon_cmd = as_user(cmd, user)
-
- if action == "start":
- # remove pid file from dead process
- File(pid_file, action="delete", not_if=process_id_exists_command)
-
- try:
- Execute(daemon_cmd, not_if=process_id_exists_command, environment=hadoop_env_exports)
- except:
- show_logs(log_dir, user)
- raise
- elif action == "stop":
- try:
- Execute(daemon_cmd, only_if=process_id_exists_command, environment=hadoop_env_exports)
- except:
- show_logs(log_dir, user)
- raise
- File(pid_file, action="delete")
-
-def get_jmx_data(nn_address, modeler_type, metric, encrypted=False, security_enabled=False):
- """
- :param nn_address: Namenode Address, e.g., host:port, ** MAY ** be preceded with "http://" or "https://" already.
- If not preceded, will use the encrypted param to determine.
- :param modeler_type: Modeler type to query using startswith function
- :param metric: Metric to return
- :return: Return an object representation of the metric, or None if it does not exist
- """
- if not nn_address or not modeler_type or not metric:
- return None
-
- nn_address = nn_address.strip()
- if not nn_address.startswith("http"):
- nn_address = ("https://" if encrypted else "http://") + nn_address
- if not nn_address.endswith("/"):
- nn_address = nn_address + "/"
-
- nn_address = nn_address + "jmx"
- Logger.info("Retrieve modeler: %s, metric: %s from JMX endpoint %s" % (modeler_type, metric, nn_address))
-
- if security_enabled:
- import params
- data, error_msg, time_millis = curl_krb_request(params.tmp_dir, params.smoke_user_keytab, params.smokeuser_principal, nn_address,
- "jn_upgrade", params.kinit_path_local, False, None, params.smoke_user)
- else:
- data = urllib2.urlopen(nn_address).read()
- my_data = None
- if data:
- data_dict = json.loads(data)
- if data_dict:
- for el in data_dict['beans']:
- if el is not None and el['modelerType'] is not None and el['modelerType'].startswith(modeler_type):
- if metric in el:
- my_data = el[metric]
- if my_data:
- my_data = json.loads(str(my_data))
- break
- return my_data
-
-def get_port(address):
- """
- Extracts port from the address like 0.0.0.0:1019
- """
- if address is None:
- return None
- m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address)
- if m is not None and len(m.groups()) >= 2:
- return int(m.group(2))
- else:
- return None
-
-
-def is_secure_port(port):
- """
- Returns True if port is root-owned at *nix systems
- """
- if port is not None:
- return port < 1024
- else:
- return False
-
-def is_previous_fs_image():
- """
- Return true if there's a previous folder in the HDFS namenode directories.
- """
- import params
- if params.dfs_name_dir:
- nn_name_dirs = params.dfs_name_dir.split(',')
- for nn_dir in nn_name_dirs:
- prev_dir = os.path.join(nn_dir, "previous")
- if os.path.isdir(prev_dir):
- return True
- return False
-
-def get_hdfs_binary(distro_component_name):
- """
- Get the hdfs binary to use depending on the stack and version.
- :param distro_component_name: e.g., hadoop-hdfs-namenode, hadoop-hdfs-datanode
- :return: The hdfs binary to use
- """
- import params
- hdfs_binary = "hdfs"
- if params.stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.stack_version_formatted):
- hdfs_binary = "{0}/current/{1}/bin/hdfs".format(params.stack_root, distro_component_name)
-
- return hdfs_binary
-
-def get_dfsadmin_base_command(hdfs_binary, use_specific_namenode = False):
- """
- Get the dfsadmin base command constructed using hdfs_binary path and passing namenode address as explicit -fs argument
- :param hdfs_binary: path to hdfs binary to use
- :param use_specific_namenode: flag if set and Namenode HA is enabled, then the dfsadmin command will use
- current namenode's address
- :return: the constructed dfsadmin base command
- """
- import params
- dfsadmin_base_command = ""
- if params.dfs_ha_enabled and use_specific_namenode:
- dfsadmin_base_command = format("{hdfs_binary} dfsadmin -fs hdfs://{params.namenode_rpc}")
- else:
- dfsadmin_base_command = format("{hdfs_binary} dfsadmin -fs {params.namenode_address}")
- return dfsadmin_base_command
http://git-wip-us.apache.org/repos/asf/ambari/blob/ab4b864c/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/zkfc_slave.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/zkfc_slave.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/zkfc_slave.py
deleted file mode 100644
index f1891a5..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/zkfc_slave.py
+++ /dev/null
@@ -1,225 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-# this is needed to avoid a circular dependency since utils.py calls this class
-import utils
-from hdfs import hdfs
-
-from ambari_commons import OSConst
-from ambari_commons.os_family_impl import OsFamilyImpl
-from resource_management.core.logger import Logger
-from resource_management.core.exceptions import Fail
-from resource_management.core.resources.system import Directory
-from resource_management.core.resources.service import Service
-from resource_management.core import shell
-from resource_management.libraries.functions import conf_select, stack_select
-from resource_management.libraries.functions.constants import StackFeature
-from resource_management.libraries.functions.check_process_status import check_process_status
-from resource_management.libraries.functions.security_commons import build_expectations
-from resource_management.libraries.functions.security_commons import cached_kinit_executor
-from resource_management.libraries.functions.security_commons import get_params_from_filesystem
-from resource_management.libraries.functions.security_commons import validate_security_config_properties
-from resource_management.libraries.functions.security_commons import FILE_TYPE_XML
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.version import compare_versions
-from resource_management.libraries.script import Script
-from resource_management.libraries.functions.version_select_util import get_component_version
-
-class ZkfcSlave(Script):
- def get_component_name(self):
- import params
- if params.version_for_stack_feature_checks and check_stack_feature(StackFeature.ZKFC_VERSION_ADVERTISED, params.version_for_stack_feature_checks):
- # params.version is not defined when installing cluster from blueprint
- return "hadoop-hdfs-zkfc"
- pass
-
- def install(self, env):
- import params
- env.set_params(params)
- self.install_packages(env)
-
- def configure(env):
- ZkfcSlave.configure_static(env)
-
- @staticmethod
- def configure_static(env):
- import params
- env.set_params(params)
- hdfs("zkfc_slave")
- pass
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class ZkfcSlaveDefault(ZkfcSlave):
-
- def start(self, env, upgrade_type=None):
- ZkfcSlaveDefault.start_static(env, upgrade_type)
-
- @staticmethod
- def start_static(env, upgrade_type=None):
- import params
-
- env.set_params(params)
- ZkfcSlave.configure_static(env)
- Directory(params.hadoop_pid_dir_prefix,
- mode=0755,
- owner=params.hdfs_user,
- group=params.user_group
- )
-
- # format the znode for this HA setup
- # only run this format command if the active namenode hostname is set
- # The Ambari UI HA Wizard prompts the user to run this command
- # manually, so this guarantees it is only run in the Blueprints case
- if params.dfs_ha_enabled and \
- params.dfs_ha_namenode_active is not None:
- success = initialize_ha_zookeeper(params)
- if not success:
- raise Fail("Could not initialize HA state in zookeeper")
-
- utils.service(
- action="start", name="zkfc", user=params.hdfs_user, create_pid_dir=True,
- create_log_dir=True
- )
-
- def stop(self, env, upgrade_type=None):
- ZkfcSlaveDefault.stop_static(env, upgrade_type)
-
- @staticmethod
- def stop_static(env, upgrade_type=None):
- import params
-
- env.set_params(params)
- utils.service(
- action="stop", name="zkfc", user=params.hdfs_user, create_pid_dir=True,
- create_log_dir=True
- )
-
-
- def status(self, env):
- ZkfcSlaveDefault.status_static(env)
-
- @staticmethod
- def status_static(env):
- import status_params
- env.set_params(status_params)
- check_process_status(status_params.zkfc_pid_file)
-
- def security_status(self, env):
- import status_params
- env.set_params(status_params)
- props_value_check = {"hadoop.security.authentication": "kerberos",
- "hadoop.security.authorization": "true"}
- props_empty_check = ["hadoop.security.auth_to_local"]
- props_read_check = None
- core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
- props_read_check)
- hdfs_expectations = {}
- hdfs_expectations.update(core_site_expectations)
-
- security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
- {'core-site.xml': FILE_TYPE_XML})
- result_issues = validate_security_config_properties(security_params, hdfs_expectations)
- if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
- security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
- if not result_issues: # If all validations passed successfully
- if status_params.hdfs_user_principal or status_params.hdfs_user_keytab:
- try:
- cached_kinit_executor(status_params.kinit_path_local,
- status_params.hdfs_user,
- status_params.hdfs_user_keytab,
- status_params.hdfs_user_principal,
- status_params.hostname,
- status_params.tmp_dir)
- self.put_structured_out({"securityState": "SECURED_KERBEROS"})
- except Exception as e:
- self.put_structured_out({"securityState": "ERROR"})
- self.put_structured_out({"securityStateErrorInfo": str(e)})
- else:
- self.put_structured_out(
- {"securityIssuesFound": "hdfs principal and/or keytab file is not specified"})
- self.put_structured_out({"securityState": "UNSECURED"})
- else:
- issues = []
- for cf in result_issues:
- issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
- self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
- self.put_structured_out({"securityState": "UNSECURED"})
- else:
- self.put_structured_out({"securityState": "UNSECURED"})
-
- def get_log_folder(self):
- import params
- return params.hdfs_log_dir
-
- def get_user(self):
- import params
- return params.hdfs_user
-
- def get_pid_files(self):
- import status_params
- return [status_params.zkfc_pid_file]
-
- def pre_upgrade_restart(self, env, upgrade_type=None):
- Logger.info("Executing Stack Upgrade pre-restart")
- import params
- env.set_params(params)
- if params.version and check_stack_feature(StackFeature.ZKFC_VERSION_ADVERTISED, params.version) \
- and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
- conf_select.select(params.stack_name, "hadoop", params.version)
- stack_select.select("hadoop-hdfs-zkfc", params.version)
-
-def initialize_ha_zookeeper(params):
- try:
- iterations = 10
- formatZK_cmd = "hdfs zkfc -formatZK -nonInteractive"
- Logger.info("Initialize HA state in ZooKeeper: %s" % (formatZK_cmd))
- for i in range(iterations):
- Logger.info('Try %d out of %d' % (i+1, iterations))
- code, out = shell.call(formatZK_cmd, logoutput=False, user=params.hdfs_user)
- if code == 0:
- Logger.info("HA state initialized in ZooKeeper successfully")
- return True
- elif code == 2:
- Logger.info("HA state already initialized in ZooKeeper")
- return True
- else:
- Logger.warning('HA state initialization in ZooKeeper failed with %d error code. Will retry' % (code))
- except Exception as ex:
- Logger.error('HA state initialization in ZooKeeper threw an exception. Reason %s' %(str(ex)))
- return False
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class ZkfcSlaveWindows(ZkfcSlave):
- def start(self, env):
- import params
- self.configure(env)
- Service(params.zkfc_win_service_name, action="start")
-
- def stop(self, env):
- import params
- Service(params.zkfc_win_service_name, action="stop")
-
- def status(self, env):
- import status_params
- from resource_management.libraries.functions.windows_service_utils import check_windows_service_status
-
- env.set_params(status_params)
- check_windows_service_status(status_params.zkfc_win_service_name)
-
-if __name__ == "__main__":
- ZkfcSlave().execute()
http://git-wip-us.apache.org/repos/asf/ambari/blob/ab4b864c/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/templates/exclude_hosts_list.j2
deleted file mode 100644
index a92cdc1..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/templates/exclude_hosts_list.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for host in hdfs_exclude_file %}
-{{host}}
-{% endfor %}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/ab4b864c/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/templates/hdfs.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/templates/hdfs.conf.j2 b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/templates/hdfs.conf.j2
deleted file mode 100644
index fad5621..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/templates/hdfs.conf.j2
+++ /dev/null
@@ -1,35 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-{{hdfs_user}} - nofile {{hdfs_user_nofile_limit}}
-{{hdfs_user}} - nproc {{hdfs_user_nproc_limit}}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/ab4b864c/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/templates/slaves.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/templates/slaves.j2 b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/templates/slaves.j2
deleted file mode 100644
index 4a9e713..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/templates/slaves.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for host in slave_hosts %}
-{{host}}
-{% endfor %}
http://git-wip-us.apache.org/repos/asf/ambari/blob/ab4b864c/ambari-server/src/main/resources/common-services/HDFS/3.0.0/quicklinks/quicklinks.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/quicklinks/quicklinks.json b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/quicklinks/quicklinks.json
deleted file mode 100644
index 5318ba0..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/quicklinks/quicklinks.json
+++ /dev/null
@@ -1,80 +0,0 @@
-{
- "name": "default",
- "description": "default quick links configuration",
- "configuration": {
- "protocol":
- {
- "type":"https",
- "checks":[
- {
- "property":"dfs.http.policy",
- "desired":"HTTPS_ONLY",
- "site":"hdfs-site"
- }
- ]
- },
-
- "links": [
- {
- "name": "namenode_ui",
- "label": "NameNode UI",
- "component_name": "NAMENODE",
- "url":"%@://%@:%@",
- "requires_user_name": "false",
- "port":{
- "http_property": "dfs.namenode.http-address",
- "http_default_port": "50070",
- "https_property": "dfs.namenode.https-address",
- "https_default_port": "50470",
- "regex": "\\w*:(\\d+)",
- "site": "hdfs-site"
- }
- },
- {
- "name": "namenode_logs",
- "label": "NameNode Logs",
- "component_name": "NAMENODE",
- "url":"%@://%@:%@/logs",
- "requires_user_name": "false",
- "port":{
- "http_property": "dfs.namenode.http-address",
- "http_default_port": "50070",
- "https_property": "dfs.namenode.https-address",
- "https_default_port": "50470",
- "regex": "\\w*:(\\d+)",
- "site": "hdfs-site"
- }
- },
- {
- "name": "namenode_jmx",
- "label": "NameNode JMX",
- "component_name": "NAMENODE",
- "url":"%@://%@:%@/jmx",
- "requires_user_name": "false",
- "port":{
- "http_property": "dfs.namenode.http-address",
- "http_default_port": "50070",
- "https_property": "dfs.namenode.https-address",
- "https_default_port": "50470",
- "regex": "\\w*:(\\d+)",
- "site": "hdfs-site"
- }
- },
- {
- "name": "Thread Stacks",
- "label": "Thread Stacks",
- "component_name": "NAMENODE",
- "url":"%@://%@:%@/stacks",
- "requires_user_name": "false",
- "port":{
- "http_property": "dfs.namenode.http-address",
- "http_default_port": "50070",
- "https_property": "dfs.namenode.https-address",
- "https_default_port": "50470",
- "regex": "\\w*:(\\d+)",
- "site": "hdfs-site"
- }
- }
- ]
- }
-}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/ab4b864c/ambari-server/src/main/resources/common-services/HDFS/3.0.0/themes/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/themes/theme.json b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/themes/theme.json
deleted file mode 100644
index 6f2b797..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/themes/theme.json
+++ /dev/null
@@ -1,179 +0,0 @@
-{
- "name": "default",
- "description": "Default theme for HDFS service",
- "configuration": {
- "layouts": [
- {
- "name": "default",
- "tabs": [
- {
- "name": "settings",
- "display-name": "Settings",
- "layout": {
- "tab-columns": "2",
- "tab-rows": "1",
- "sections": [
- {
- "name": "section-namenode",
- "display-name": "NameNode",
- "row-index": "0",
- "column-index": "0",
- "row-span": "1",
- "column-span": "1",
- "section-columns": "1",
- "section-rows": "1",
- "subsections": [
- {
- "name": "subsection-namenode-col1",
- "row-index": "0",
- "column-index": "0",
- "row-span": "1",
- "column-span": "1"
- }
- ]
- },
- {
- "name": "section-datanode",
- "display-name": "DataNode",
- "row-index": "0",
- "column-index": "1",
- "row-span": "1",
- "column-span": "1",
- "section-columns": "1",
- "section-rows": "1",
- "subsections": [
- {
- "name": "subsection-datanode-col1",
- "row-index": "0",
- "column-index": "0",
- "row-span": "1",
- "column-span": "1"
- }
- ]
- }
- ]
- }
- }
- ]
- }
- ],
- "placement": {
- "configuration-layout": "default",
- "configs": [
- {
- "config": "hdfs-site/dfs.namenode.name.dir",
- "subsection-name": "subsection-namenode-col1"
- },
- {
- "config": "hadoop-env/namenode_heapsize",
- "subsection-name": "subsection-namenode-col1"
- },
- {
- "config": "hdfs-site/dfs.namenode.handler.count",
- "subsection-name": "subsection-namenode-col1"
- },
- {
- "config": "hdfs-site/dfs.namenode.safemode.threshold-pct",
- "subsection-name": "subsection-namenode-col1"
- },
- {
- "config": "hdfs-site/dfs.datanode.data.dir",
- "subsection-name": "subsection-datanode-col1"
- },
- {
- "config": "hdfs-site/dfs.datanode.failed.volumes.tolerated",
- "subsection-name": "subsection-datanode-col1"
- },
- {
- "config": "hadoop-env/dtnode_heapsize",
- "subsection-name": "subsection-datanode-col1"
- },
- {
- "config": "hdfs-site/dfs.datanode.max.transfer.threads",
- "subsection-name": "subsection-datanode-col1"
- }
- ]
- },
- "widgets": [
- {
- "config": "hdfs-site/dfs.namenode.name.dir",
- "widget": {
- "type": "directories"
- }
- },
- {
- "config": "hdfs-site/dfs.namenode.safemode.threshold-pct",
- "widget": {
- "type": "slider",
- "units": [
- {
- "unit-name": "percent"
- }
- ]
- }
- },
- {
- "config": "hdfs-site/dfs.namenode.handler.count",
- "widget": {
- "type": "slider",
- "units": [
- {
- "unit-name": "int"
- }
- ]
- }
- },
- {
- "config": "hadoop-env/namenode_heapsize",
- "widget": {
- "type": "slider",
- "units": [
- {
- "unit-name": "GB"
- }
- ]
- }
- },
- {
- "config": "hdfs-site/dfs.datanode.failed.volumes.tolerated",
- "widget": {
- "type": "slider",
- "units": [
- {
- "unit-name": "int"
- }
- ]
- }
- },
- {
- "config": "hdfs-site/dfs.datanode.data.dir",
- "widget": {
- "type": "directories"
- }
- },
- {
- "config": "hadoop-env/dtnode_heapsize",
- "widget": {
- "type": "slider",
- "units": [
- {
- "unit-name": "GB"
- }
- ]
- }
- },
- {
- "config": "hdfs-site/dfs.datanode.max.transfer.threads",
- "widget": {
- "type": "slider",
- "units": [
- {
- "unit-name": "int"
- }
- ]
- }
- }
- ]
- }
-}
-