You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by sw...@apache.org on 2016/12/08 23:29:57 UTC
[18/25] ambari git commit: Merge from branch-2.5
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/task-log4j.properties
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/task-log4j.properties b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/task-log4j.properties
new file mode 100644
index 0000000..7e12962
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/task-log4j.properties
@@ -0,0 +1,134 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+#
+# Job Summary Appender
+#
+# Use following logger to send summary to separate file defined by
+# hadoop.mapreduce.jobsummary.log.file rolled daily:
+# hadoop.mapreduce.jobsummary.logger=INFO,JSA
+#
+hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+# Rolling File Appender
+#
+
+#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+#log4j.appender.RFA.MaxFileSize=1MB
+#log4j.appender.RFA.MaxBackupIndex=30
+
+#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+# Custom Logging levels
+
+hadoop.metrics.log.level=INFO
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Null Appender
+# Trap security logger on the hadoop client side
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+
+# Removes "deprecated" messages
+log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/topology_script.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/topology_script.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/topology_script.py
new file mode 100644
index 0000000..0f7a55c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/topology_script.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import sys, os
+from string import join
+import ConfigParser
+
+
+DEFAULT_RACK = "/default-rack"
+DATA_FILE_NAME = os.path.dirname(os.path.abspath(__file__)) + "/topology_mappings.data"
+SECTION_NAME = "network_topology"
+
+class TopologyScript():
+
+ def load_rack_map(self):
+ try:
+ #RACK_MAP contains both host name vs rack and ip vs rack mappings
+ mappings = ConfigParser.ConfigParser()
+ mappings.read(DATA_FILE_NAME)
+ return dict(mappings.items(SECTION_NAME))
+ except ConfigParser.NoSectionError:
+ return {}
+
+ def get_racks(self, rack_map, args):
+ if len(args) == 1:
+ return DEFAULT_RACK
+ else:
+ return join([self.lookup_by_hostname_or_ip(input_argument, rack_map) for input_argument in args[1:]],)
+
+ def lookup_by_hostname_or_ip(self, hostname_or_ip, rack_map):
+ #try looking up by hostname
+ rack = rack_map.get(hostname_or_ip)
+ if rack is not None:
+ return rack
+ #try looking up by ip
+ rack = rack_map.get(self.extract_ip(hostname_or_ip))
+ #try by localhost since hadoop could be passing in 127.0.0.1 which might not be mapped
+ return rack if rack is not None else rack_map.get("localhost.localdomain", DEFAULT_RACK)
+
+ #strips out port and slashes in case hadoop passes in something like 127.0.0.1/127.0.0.1:50010
+ def extract_ip(self, container_string):
+ return container_string.split("/")[0].split(":")[0]
+
+ def execute(self, args):
+ rack_map = self.load_rack_map()
+ rack = self.get_racks(rack_map, args)
+ print rack
+
+if __name__ == "__main__":
+ TopologyScript().execute(sys.argv)
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/hook.py
new file mode 100644
index 0000000..f21e4b1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/hook.py
@@ -0,0 +1,39 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from rack_awareness import create_topology_script_and_mapping
+from shared_initialization import setup_hadoop, setup_configs, create_javahome_symlink
+
+class BeforeStartHook(Hook):
+
+ def hook(self, env):
+ import params
+
+ self.run_custom_hook('before-ANY')
+ env.set_params(params)
+
+ setup_hadoop()
+ setup_configs()
+ create_javahome_symlink()
+ create_topology_script_and_mapping()
+
+if __name__ == "__main__":
+ BeforeStartHook().execute()
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py
new file mode 100644
index 0000000..d838211
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py
@@ -0,0 +1,326 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import default
+from resource_management.libraries.functions import format_jvm_option
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.version import format_stack_version, compare_versions
+from ambari_commons.os_check import OSCheck
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+
+config = Script.get_config()
+
+# Whether to skip copying fast-hdfs-resource.jar to /var/lib/ambari-agent/lib/
+# This is required if tarballs are going to be copied to HDFS, so set to False
+sysprep_skip_copy_fast_jar_hdfs = default("/configurations/cluster-env/sysprep_skip_copy_fast_jar_hdfs", False)
+
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+dfs_type = default("/commandParams/dfs_type", "")
+stack_root = Script.get_stack_root()
+hadoop_conf_dir = "/etc/hadoop/conf"
+component_list = default("/localComponents", [])
+
+hdfs_tmp_dir = config['configurations']['hadoop-env']['hdfs_tmp_dir']
+
+hadoop_metrics2_properties_content = config['configurations']['hadoop-metrics2.properties']['content']
+
+# hadoop default params
+mapreduce_libs_path = format("{stack_root}/current/hadoop-mapreduce-client/*")
+
+hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
+hadoop_lib_home = stack_select.get_hadoop_dir("lib")
+hadoop_bin = stack_select.get_hadoop_dir("sbin")
+hadoop_home = stack_select.get_hadoop_dir("home")
+create_lib_snappy_symlinks = False
+
+
+current_service = config['serviceName']
+
+#security params
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+#users and groups
+has_hadoop_env = 'hadoop-env' in config['configurations']
+mapred_user = config['configurations']['mapred-env']['mapred_user']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+yarn_user = config['configurations']['yarn-env']['yarn_user']
+
+user_group = config['configurations']['cluster-env']['user_group']
+
+#hosts
+hostname = config["hostname"]
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+rm_host = default("/clusterHostInfo/rm_host", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
+hive_server_host = default("/clusterHostInfo/hive_server_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+hs_host = default("/clusterHostInfo/hs_host", [])
+jtnode_host = default("/clusterHostInfo/jtnode_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
+
+has_namenode = not len(namenode_host) == 0
+has_resourcemanager = not len(rm_host) == 0
+has_slaves = not len(slave_hosts) == 0
+has_oozie_server = not len(oozie_servers) == 0
+has_hcat_server_host = not len(hcat_server_hosts) == 0
+has_hive_server_host = not len(hive_server_host) == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_zk_host = not len(zk_hosts) == 0
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+has_metric_collector = not len(ams_collector_hosts) == 0
+
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+
+if has_ganglia_server:
+ ganglia_server_host = ganglia_server_hosts[0]
+
+metric_collector_port = None
+if has_metric_collector:
+ if 'cluster-env' in config['configurations'] and \
+ 'metrics_collector_vip_port' in config['configurations']['cluster-env']:
+ metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
+ else:
+ metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
+ if metric_collector_web_address.find(':') != -1:
+ metric_collector_port = metric_collector_web_address.split(':')[1]
+ else:
+ metric_collector_port = '6188'
+ if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
+ metric_collector_protocol = 'https'
+ else:
+ metric_collector_protocol = 'http'
+ metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
+ metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
+ metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
+
+ pass
+metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
+metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
+
+# Cluster Zookeeper quorum
+zookeeper_quorum = None
+if has_zk_host:
+ if 'zoo.cfg' in config['configurations'] and 'clientPort' in config['configurations']['zoo.cfg']:
+ zookeeper_clientPort = config['configurations']['zoo.cfg']['clientPort']
+ else:
+ zookeeper_clientPort = '2181'
+ zookeeper_quorum = (':' + zookeeper_clientPort + ',').join(config['clusterHostInfo']['zookeeper_hosts'])
+ # last port config
+ zookeeper_quorum += ':' + zookeeper_clientPort
+
+#hadoop params
+
+if has_namenode or dfs_type == 'HCFS':
+ hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
+ hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
+ task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
+
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+hbase_tmp_dir = "/tmp/hbase-hbase"
+#db params
+server_db_name = config['hostLevelParams']['db_name']
+db_driver_filename = config['hostLevelParams']['db_driver_filename']
+oracle_driver_url = config['hostLevelParams']['oracle_jdbc_url']
+mysql_driver_url = config['hostLevelParams']['mysql_jdbc_url']
+ambari_server_resources = config['hostLevelParams']['jdk_location']
+oracle_driver_symlink_url = format("{ambari_server_resources}oracle-jdbc-driver.jar")
+mysql_driver_symlink_url = format("{ambari_server_resources}mysql-jdbc-driver.jar")
+
+ambari_db_rca_url = config['hostLevelParams']['ambari_db_rca_url'][0]
+ambari_db_rca_driver = config['hostLevelParams']['ambari_db_rca_driver'][0]
+ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username'][0]
+ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password'][0]
+
+if has_namenode and 'rca_enabled' in config['configurations']['hadoop-env']:
+ rca_enabled = config['configurations']['hadoop-env']['rca_enabled']
+else:
+ rca_enabled = False
+rca_disabled_prefix = "###"
+if rca_enabled == True:
+ rca_prefix = ""
+else:
+ rca_prefix = rca_disabled_prefix
+
+#hadoop-env.sh
+java_home = config['hostLevelParams']['java_home']
+
+jsvc_path = "/usr/lib/bigtop-utils"
+
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
+namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
+namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
+
+jtnode_opt_newsize = "200m"
+jtnode_opt_maxnewsize = "200m"
+jtnode_heapsize = "1024m"
+ttnode_heapsize = "1024m"
+
+dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
+mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
+
+#log4j.properties
+
+yarn_log_dir_prefix = default("/configurations/yarn-env/yarn_log_dir_prefix","/var/log/hadoop-yarn")
+
+dfs_hosts = default('/configurations/hdfs-site/dfs.hosts', None)
+
+#log4j.properties
+if (('hdfs-log4j' in config['configurations']) and ('content' in config['configurations']['hdfs-log4j'])):
+ log4j_props = config['configurations']['hdfs-log4j']['content']
+ if (('yarn-log4j' in config['configurations']) and ('content' in config['configurations']['yarn-log4j'])):
+ log4j_props += config['configurations']['yarn-log4j']['content']
+else:
+ log4j_props = None
+
+refresh_topology = False
+command_params = config["commandParams"] if "commandParams" in config else None
+if command_params is not None:
+ refresh_topology = bool(command_params["refresh_topology"]) if "refresh_topology" in command_params else False
+
+ambari_libs_dir = "/var/lib/ambari-agent/lib"
+is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
+#host info
+all_hosts = default("/clusterHostInfo/all_hosts", [])
+all_racks = default("/clusterHostInfo/all_racks", [])
+all_ipv4_ips = default("/clusterHostInfo/all_ipv4_ips", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+
+#topology files
+net_topology_script_file_path = "/etc/hadoop/conf/topology_script.py"
+net_topology_script_dir = os.path.dirname(net_topology_script_file_path)
+net_topology_mapping_data_file_name = 'topology_mappings.data'
+net_topology_mapping_data_file_path = os.path.join(net_topology_script_dir, net_topology_mapping_data_file_name)
+
+#Added logic to create /tmp and /user directory for HCFS stack.
+has_core_site = 'core-site' in config['configurations']
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+kinit_path_local = get_kinit_path()
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', None)
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+smoke_user = config['configurations']['cluster-env']['smokeuser']
+smoke_hdfs_user_dir = format("/user/{smoke_user}")
+smoke_hdfs_user_mode = 0770
+
+
+##### Namenode RPC ports - metrics config section start #####
+
+# Figure out the rpc ports for current namenode
+nn_rpc_client_port = None
+nn_rpc_dn_port = None
+nn_rpc_healthcheck_port = None
+
+namenode_id = None
+namenode_rpc = None
+
+dfs_ha_enabled = False
+dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.internal.nameservices', None)
+if dfs_ha_nameservices is None:
+ dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.nameservices', None)
+dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
+
+dfs_ha_namemodes_ids_list = []
+other_namenode_id = None
+
+if dfs_ha_namenode_ids:
+ dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
+ dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
+ if dfs_ha_namenode_ids_array_len > 1:
+ dfs_ha_enabled = True
+
+if dfs_ha_enabled:
+ for nn_id in dfs_ha_namemodes_ids_list:
+ nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
+ if hostname in nn_host:
+ namenode_id = nn_id
+ namenode_rpc = nn_host
+ pass
+ pass
+else:
+ namenode_rpc = default('/configurations/hdfs-site/dfs.namenode.rpc-address', None)
+
+if namenode_rpc:
+ nn_rpc_client_port = namenode_rpc.split(':')[1].strip()
+
+if dfs_ha_enabled:
+ dfs_service_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.servicerpc-address.{dfs_ha_nameservices}.{namenode_id}'), None)
+ dfs_lifeline_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.lifeline.rpc-address.{dfs_ha_nameservices}.{namenode_id}'), None)
+else:
+ dfs_service_rpc_address = default('/configurations/hdfs-site/dfs.namenode.servicerpc-address', None)
+ dfs_lifeline_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.lifeline.rpc-address'), None)
+
+if dfs_service_rpc_address:
+ nn_rpc_dn_port = dfs_service_rpc_address.split(':')[1].strip()
+
+if dfs_lifeline_rpc_address:
+ nn_rpc_healthcheck_port = dfs_lifeline_rpc_address.split(':')[1].strip()
+
+is_nn_client_port_configured = False if nn_rpc_client_port is None else True
+is_nn_dn_port_configured = False if nn_rpc_dn_port is None else True
+is_nn_healthcheck_port_configured = False if nn_rpc_healthcheck_port is None else True
+
+##### end #####
+
+import functools
+#create partial functions with common arguments for every HdfsResource call
+#to create/delete/copyfromlocal hdfs directories/files we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+ HdfsResource,
+ user=hdfs_user,
+ hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
+ security_enabled = security_enabled,
+ keytab = hdfs_user_keytab,
+ kinit_path_local = kinit_path_local,
+ hadoop_bin_dir = hadoop_bin_dir,
+ hadoop_conf_dir = hadoop_conf_dir,
+ principal_name = hdfs_principal_name,
+ hdfs_site = hdfs_site,
+ default_fs = default_fs,
+ immutable_paths = get_not_managed_resources(),
+ dfs_type = dfs_type
+)
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/rack_awareness.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/rack_awareness.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/rack_awareness.py
new file mode 100644
index 0000000..548f051
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/rack_awareness.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management.core.resources import File
+from resource_management.core.source import StaticFile, Template
+from resource_management.libraries.functions import format
+
+
+def create_topology_mapping():
+ import params
+
+ File(params.net_topology_mapping_data_file_path,
+ content=Template("topology_mappings.data.j2"),
+ owner=params.hdfs_user,
+ group=params.user_group,
+ only_if=format("test -d {net_topology_script_dir}"))
+
+def create_topology_script():
+ import params
+
+ File(params.net_topology_script_file_path,
+ content=StaticFile('topology_script.py'),
+ mode=0755,
+ only_if=format("test -d {net_topology_script_dir}"))
+
+def create_topology_script_and_mapping():
+ import params
+ if params.has_hadoop_env:
+ create_topology_mapping()
+ create_topology_script()
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/shared_initialization.py
new file mode 100644
index 0000000..5dce8e0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/shared_initialization.py
@@ -0,0 +1,191 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+from resource_management.libraries.providers.hdfs_resource import WebHDFSUtil
+
+from resource_management import *
+
+def setup_hadoop():
+ """
+ Setup hadoop files and directories
+ """
+ import params
+
+ Execute(("setenforce","0"),
+ only_if="test -f /selinux/enforce",
+ not_if="(! which getenforce ) || (which getenforce && getenforce | grep -q Disabled)",
+ sudo=True,
+ )
+
+ #directories
+ if params.has_namenode or params.dfs_type == 'HCFS':
+ Directory(params.hdfs_log_dir_prefix,
+ create_parents = True,
+ owner='root',
+ group=params.user_group,
+ mode=0775,
+ cd_access='a',
+ )
+ if params.has_namenode:
+ Directory(params.hadoop_pid_dir_prefix,
+ create_parents = True,
+ owner='root',
+ group='root',
+ cd_access='a',
+ )
+ Directory(params.hadoop_tmp_dir,
+ create_parents = True,
+ owner=params.hdfs_user,
+ cd_access='a',
+ )
+ #files
+ if params.security_enabled:
+ tc_owner = "root"
+ else:
+ tc_owner = params.hdfs_user
+
+ # if WebHDFS is not enabled we need this jar to create hadoop folders and copy tarballs to HDFS.
+ if params.sysprep_skip_copy_fast_jar_hdfs:
+ print "Skipping copying of fast-hdfs-resource.jar as host is sys prepped"
+ elif params.dfs_type == 'HCFS' or not WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
+ # for source-code of jar goto contrib/fast-hdfs-resource
+ File(format("{ambari_libs_dir}/fast-hdfs-resource.jar"),
+ mode=0644,
+ content=StaticFile("fast-hdfs-resource.jar")
+ )
+
+ if os.path.exists(params.hadoop_conf_dir):
+ File(os.path.join(params.hadoop_conf_dir, 'commons-logging.properties'),
+ owner=tc_owner,
+ content=Template('commons-logging.properties.j2')
+ )
+
+ health_check_template_name = "health_check"
+ File(os.path.join(params.hadoop_conf_dir, health_check_template_name),
+ owner=tc_owner,
+ content=Template(health_check_template_name + ".j2")
+ )
+
+ log4j_filename = os.path.join(params.hadoop_conf_dir, "log4j.properties")
+ if (params.log4j_props != None):
+ File(log4j_filename,
+ mode=0644,
+ group=params.user_group,
+ owner=params.hdfs_user,
+ content=params.log4j_props
+ )
+ elif (os.path.exists(format("{params.hadoop_conf_dir}/log4j.properties"))):
+ File(log4j_filename,
+ mode=0644,
+ group=params.user_group,
+ owner=params.hdfs_user,
+ )
+
+ File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"),
+ owner=params.hdfs_user,
+ group=params.user_group,
+ content=InlineTemplate(params.hadoop_metrics2_properties_content)
+ )
+
+ if params.dfs_type == 'HCFS' and params.has_core_site and 'ECS_CLIENT' in params.component_list:
+ create_dirs()
+
+ create_microsoft_r_dir()
+
+
+def setup_configs():
+ """
+ Creates configs for services HDFS mapred
+ """
+ import params
+
+ if params.has_namenode or params.dfs_type == 'HCFS':
+ if os.path.exists(params.hadoop_conf_dir):
+ File(params.task_log4j_properties_location,
+ content=StaticFile("task-log4j.properties"),
+ mode=0755
+ )
+
+ if os.path.exists(os.path.join(params.hadoop_conf_dir, 'configuration.xsl')):
+ File(os.path.join(params.hadoop_conf_dir, 'configuration.xsl'),
+ owner=params.hdfs_user,
+ group=params.user_group
+ )
+ if os.path.exists(os.path.join(params.hadoop_conf_dir, 'masters')):
+ File(os.path.join(params.hadoop_conf_dir, 'masters'),
+ owner=params.hdfs_user,
+ group=params.user_group
+ )
+
+ generate_include_file()
+
+
+def generate_include_file():
+ import params
+
+ if params.has_namenode and params.dfs_hosts and params.has_slaves:
+ include_hosts_list = params.slave_hosts
+ File(params.dfs_hosts,
+ content=Template("include_hosts_list.j2"),
+ owner=params.hdfs_user,
+ group=params.user_group
+ )
+
+def create_javahome_symlink():
+ if os.path.exists("/usr/jdk/jdk1.6.0_31") and not os.path.exists("/usr/jdk64/jdk1.6.0_31"):
+ Directory("/usr/jdk64/",
+ create_parents = True,
+ )
+ Link("/usr/jdk/jdk1.6.0_31",
+ to="/usr/jdk64/jdk1.6.0_31",
+ )
+
+def create_dirs():
+ import params
+ params.HdfsResource(params.hdfs_tmp_dir,
+ type="directory",
+ action="create_on_execute",
+ owner=params.hdfs_user,
+ mode=0777
+ )
+ params.HdfsResource(params.smoke_hdfs_user_dir,
+ type="directory",
+ action="create_on_execute",
+ owner=params.smoke_user,
+ mode=params.smoke_hdfs_user_mode
+ )
+ params.HdfsResource(None,
+ action="execute"
+ )
+
+def create_microsoft_r_dir():
+ import params
+ if 'MICROSOFT_R_NODE_CLIENT' in params.component_list and params.default_fs:
+ directory = '/user/RevoShare'
+ try:
+ params.HdfsResource(directory,
+ type="directory",
+ action="create_on_execute",
+ owner=params.hdfs_user,
+ mode=0777)
+ params.HdfsResource(None, action="execute")
+ except Exception as exception:
+ Logger.warning("Could not check the existence of {0} on DFS while starting {1}, exception: {2}".format(directory, params.current_service, str(exception)))
+
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/commons-logging.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/commons-logging.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/commons-logging.properties.j2
new file mode 100644
index 0000000..2197ba5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/commons-logging.properties.j2
@@ -0,0 +1,43 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+#Logging Implementation
+
+#Log4J
+org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger
+
+#JDK Logger
+#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/exclude_hosts_list.j2
new file mode 100644
index 0000000..1adba80
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/exclude_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in hdfs_exclude_file %}
+{{host}}
+{% endfor %}
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
new file mode 100644
index 0000000..2f3aab6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
@@ -0,0 +1,105 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# syntax: [prefix].[source|sink|jmx].[instance].[options]
+# See package.html for org.apache.hadoop.metrics2 for details
+
+{% if has_ganglia_server %}
+*.period=60
+
+*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+*.sink.ganglia.period=10
+
+# default for supportsparse is false
+*.sink.ganglia.supportsparse=true
+
+.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+# Hook up to the server
+namenode.sink.ganglia.servers={{ganglia_server_host}}:8661
+datanode.sink.ganglia.servers={{ganglia_server_host}}:8659
+jobtracker.sink.ganglia.servers={{ganglia_server_host}}:8662
+tasktracker.sink.ganglia.servers={{ganglia_server_host}}:8658
+maptask.sink.ganglia.servers={{ganglia_server_host}}:8660
+reducetask.sink.ganglia.servers={{ganglia_server_host}}:8660
+resourcemanager.sink.ganglia.servers={{ganglia_server_host}}:8664
+nodemanager.sink.ganglia.servers={{ganglia_server_host}}:8657
+historyserver.sink.ganglia.servers={{ganglia_server_host}}:8666
+journalnode.sink.ganglia.servers={{ganglia_server_host}}:8654
+nimbus.sink.ganglia.servers={{ganglia_server_host}}:8649
+supervisor.sink.ganglia.servers={{ganglia_server_host}}:8650
+
+resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
+
+{% endif %}
+
+{% if has_metric_collector %}
+
+*.period={{metrics_collection_period}}
+*.sink.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
+*.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+*.sink.timeline.period={{metrics_collection_period}}
+*.sink.timeline.sendInterval={{metrics_report_interval}}000
+*.sink.timeline.slave.host.name={{hostname}}
+*.sink.timeline.zookeeper.quorum={{zookeeper_quorum}}
+*.sink.timeline.protocol={{metric_collector_protocol}}
+*.sink.timeline.port={{metric_collector_port}}
+
+# HTTPS properties
+*.sink.timeline.truststore.path = {{metric_truststore_path}}
+*.sink.timeline.truststore.type = {{metric_truststore_type}}
+*.sink.timeline.truststore.password = {{metric_truststore_password}}
+
+datanode.sink.timeline.collector.hosts={{ams_collector_hosts}}
+namenode.sink.timeline.collector.hosts={{ams_collector_hosts}}
+resourcemanager.sink.timeline.collector.hosts={{ams_collector_hosts}}
+nodemanager.sink.timeline.collector.hosts={{ams_collector_hosts}}
+jobhistoryserver.sink.timeline.collector.hosts={{ams_collector_hosts}}
+journalnode.sink.timeline.collector.hosts={{ams_collector_hosts}}
+applicationhistoryserver.sink.timeline.collector.hosts={{ams_collector_hosts}}
+
+resourcemanager.sink.timeline.tagsForPrefix.yarn=Queue
+
+{% if is_nn_client_port_configured %}
+# Namenode rpc ports customization
+namenode.sink.timeline.metric.rpc.client.port={{nn_rpc_client_port}}
+{% endif %}
+{% if is_nn_dn_port_configured %}
+namenode.sink.timeline.metric.rpc.datanode.port={{nn_rpc_dn_port}}
+{% endif %}
+{% if is_nn_healthcheck_port_configured %}
+namenode.sink.timeline.metric.rpc.healthcheck.port={{nn_rpc_healthcheck_port}}
+{% endif %}
+
+{% endif %}
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/health_check.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/health_check.j2 b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/health_check.j2
new file mode 100644
index 0000000..0a03d17
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/health_check.j2
@@ -0,0 +1,81 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#!/bin/bash
+#
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+err=0;
+
+function check_disks {
+
+ for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
+ fsdev=""
+ fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
+ if [ -z "$fsdev" -a "$m" != "/mnt" ] ; then
+ msg_="$msg_ $m(u)"
+ else
+ msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
+ fi
+ done
+
+ if [ -z "$msg_" ] ; then
+ echo "disks ok" ; exit 0
+ else
+ echo "$msg_" ; exit 2
+ fi
+
+}
+
+# Run all checks
+for check in disks ; do
+ msg=`check_${check}` ;
+ if [ $? -eq 0 ] ; then
+ ok_msg="$ok_msg$msg,"
+ else
+ err_msg="$err_msg$msg,"
+ fi
+done
+
+if [ ! -z "$err_msg" ] ; then
+ echo -n "ERROR $err_msg "
+fi
+if [ ! -z "$ok_msg" ] ; then
+ echo -n "OK: $ok_msg"
+fi
+
+echo
+
+# Success!
+exit 0
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/include_hosts_list.j2
new file mode 100644
index 0000000..4a9e713
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/include_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in slave_hosts %}
+{{host}}
+{% endfor %}
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/topology_mappings.data.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/topology_mappings.data.j2 b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/topology_mappings.data.j2
new file mode 100644
index 0000000..15034d6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/topology_mappings.data.j2
@@ -0,0 +1,24 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+ #
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+[network_topology]
+{% for host in all_hosts %}
+{% if host in slave_hosts %}
+{{host}}={{all_racks[loop.index-1]}}
+{{all_ipv4_ips[loop.index-1]}}={{all_racks[loop.index-1]}}
+{% endif %}
+{% endfor %}
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/3.0/kerberos.json
new file mode 100644
index 0000000..9579d0f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/kerberos.json
@@ -0,0 +1,78 @@
+{
+ "properties": {
+ "realm": "${kerberos-env/realm}",
+ "keytab_dir": "/etc/security/keytabs",
+ "additional_realms": ""
+ },
+ "identities": [
+ {
+ "name": "spnego",
+ "principal": {
+ "value": "HTTP/_HOST@${realm}",
+ "type": "service"
+ },
+ "keytab": {
+ "file": "${keytab_dir}/spnego.service.keytab",
+ "owner": {
+ "name": "root",
+ "access": "r"
+ },
+ "group": {
+ "name": "${cluster-env/user_group}",
+ "access": "r"
+ }
+ }
+ },
+ {
+ "name": "smokeuser",
+ "principal": {
+ "value": "${cluster-env/smokeuser}-${cluster_name|toLower()}@${realm}",
+ "type": "user",
+ "configuration": "cluster-env/smokeuser_principal_name",
+ "local_username": "${cluster-env/smokeuser}"
+ },
+ "keytab": {
+ "file": "${keytab_dir}/smokeuser.headless.keytab",
+ "owner": {
+ "name": "${cluster-env/smokeuser}",
+ "access": "r"
+ },
+ "group": {
+ "name": "${cluster-env/user_group}",
+ "access": "r"
+ },
+ "configuration": "cluster-env/smokeuser_keytab"
+ }
+ }
+ ],
+ "services": [
+ {
+ "name": "AMBARI",
+ "components": [
+ {
+ "name": "AMBARI_SERVER",
+ "identities": [
+ {
+ "name": "ambari-server",
+ "principal": {
+ "value": "ambari-server-${cluster_name|toLower()}@${realm}",
+ "type": "user",
+ "configuration": "cluster-env/ambari_principal_name"
+ },
+ "keytab": {
+ "file": "${keytab_dir}/ambari.server.keytab",
+ "owner": {
+ "access": "r"
+ }
+ }
+ },
+ {
+ "name" : "ambari-server_spnego",
+ "reference" : "/spnego"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+}
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/metainfo.xml
new file mode 100644
index 0000000..0364d41
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/metainfo.xml
@@ -0,0 +1,24 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <versions>
+ <active>true</active>
+ </versions>
+ <minJdk>1.7</minJdk>
+ <maxJdk>1.8</maxJdk>
+</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_features.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_features.json b/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_features.json
new file mode 100644
index 0000000..dd87b72
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_features.json
@@ -0,0 +1,323 @@
+{
+ "stack_features": [
+ {
+ "name": "snappy",
+ "description": "Snappy compressor/decompressor support",
+ "min_version": "2.0.0.0",
+ "max_version": "2.2.0.0"
+ },
+ {
+ "name": "lzo",
+ "description": "LZO libraries support",
+ "min_version": "2.2.1.0"
+ },
+ {
+ "name": "express_upgrade",
+ "description": "Express upgrade support",
+ "min_version": "2.1.0.0"
+ },
+ {
+ "name": "rolling_upgrade",
+ "description": "Rolling upgrade support",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "config_versioning",
+ "description": "Configurable versions support",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "datanode_non_root",
+ "description": "DataNode running as non-root support (AMBARI-7615)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "remove_ranger_hdfs_plugin_env",
+ "description": "HDFS removes Ranger env files (AMBARI-14299)",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "ranger",
+ "description": "Ranger Service support",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "ranger_tagsync_component",
+ "description": "Ranger Tagsync component support (AMBARI-14383)",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "phoenix",
+ "description": "Phoenix Service support",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "nfs",
+ "description": "NFS support",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "tez_for_spark",
+ "description": "Tez dependency for Spark",
+ "min_version": "2.2.0.0",
+ "max_version": "2.3.0.0"
+ },
+ {
+ "name": "timeline_state_store",
+ "description": "Yarn application timeline-service supports state store property (AMBARI-11442)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "copy_tarball_to_hdfs",
+ "description": "Copy tarball to HDFS support (AMBARI-12113)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "spark_16plus",
+ "description": "Spark 1.6+",
+ "min_version": "2.4.0.0"
+ },
+ {
+ "name": "spark_thriftserver",
+ "description": "Spark Thrift Server",
+ "min_version": "2.3.2.0"
+ },
+ {
+ "name": "storm_kerberos",
+ "description": "Storm Kerberos support (AMBARI-7570)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "storm_ams",
+ "description": "Storm AMS integration (AMBARI-10710)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "create_kafka_broker_id",
+ "description": "Ambari should create Kafka Broker Id (AMBARI-12678)",
+ "min_version": "2.2.0.0",
+ "max_version": "2.3.0.0"
+ },
+ {
+ "name": "kafka_listeners",
+ "description": "Kafka listeners (AMBARI-10984)",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "kafka_kerberos",
+ "description": "Kafka Kerberos support (AMBARI-10984)",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "pig_on_tez",
+ "description": "Pig on Tez support (AMBARI-7863)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "ranger_usersync_non_root",
+ "description": "Ranger Usersync as non-root user (AMBARI-10416)",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "ranger_audit_db_support",
+ "description": "Ranger Audit to DB support",
+ "min_version": "2.2.0.0",
+ "max_version": "2.5.0.0"
+ },
+ {
+ "name": "accumulo_kerberos_user_auth",
+ "description": "Accumulo Kerberos User Auth (AMBARI-10163)",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "knox_versioned_data_dir",
+ "description": "Use versioned data dir for Knox (AMBARI-13164)",
+ "min_version": "2.3.2.0"
+ },
+ {
+ "name": "knox_sso_topology",
+ "description": "Knox SSO Topology support (AMBARI-13975)",
+ "min_version": "2.3.8.0"
+ },
+ {
+ "name": "atlas_rolling_upgrade",
+ "description": "Rolling upgrade support for Atlas",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "oozie_admin_user",
+ "description": "Oozie install user as an Oozie admin user (AMBARI-7976)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "oozie_create_hive_tez_configs",
+ "description": "Oozie create configs for Ambari Hive and Tez deployments (AMBARI-8074)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "oozie_setup_shared_lib",
+ "description": "Oozie setup tools used to shared Oozie lib to HDFS (AMBARI-7240)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "oozie_host_kerberos",
+ "description": "Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)",
+ "min_version": "2.0.0.0",
+ "max_version": "2.2.0.0"
+ },
+ {
+ "name": "falcon_extensions",
+ "description": "Falcon Extension",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "hive_metastore_upgrade_schema",
+ "description": "Hive metastore upgrade schema support (AMBARI-11176)",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "hive_server_interactive",
+ "description": "Hive server interactive support (AMBARI-15573)",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "hive_webhcat_specific_configs",
+ "description": "Hive webhcat specific configurations support (AMBARI-12364)",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "hive_purge_table",
+ "description": "Hive purge table support (AMBARI-12260)",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "hive_server2_kerberized_env",
+ "description": "Hive server2 working on kerberized environment (AMBARI-13749)",
+ "min_version": "2.2.3.0",
+ "max_version": "2.2.5.0"
+ },
+ {
+ "name": "hive_env_heapsize",
+ "description": "Hive heapsize property defined in hive-env (AMBARI-12801)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "ranger_kms_hsm_support",
+ "description": "Ranger KMS HSM support (AMBARI-15752)",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "ranger_log4j_support",
+ "description": "Ranger supporting log-4j properties (AMBARI-15681)",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "ranger_kerberos_support",
+ "description": "Ranger Kerberos support",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "hive_metastore_site_support",
+ "description": "Hive Metastore site support",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "ranger_usersync_password_jceks",
+ "description": "Saving Ranger Usersync credentials in jceks",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "ranger_install_infra_client",
+ "description": "Ambari Infra Service support",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "falcon_atlas_support_2_3",
+ "description": "Falcon Atlas integration support for 2.3 stack",
+ "min_version": "2.3.99.0",
+ "max_version": "2.4.0.0"
+ },
+ {
+ "name": "falcon_atlas_support",
+ "description": "Falcon Atlas integration",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "hbase_home_directory",
+ "description": "Hbase home directory in HDFS needed for HBASE backup",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "spark_livy",
+ "description": "Livy as slave component of spark",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "atlas_ranger_plugin_support",
+ "description": "Atlas Ranger plugin support",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "atlas_conf_dir_in_path",
+ "description": "Prepend the Atlas conf dir (/etc/atlas/conf) to the classpath of Storm and Falcon",
+ "min_version": "2.3.0.0",
+ "max_version": "2.4.99.99"
+ },
+ {
+ "name": "atlas_upgrade_support",
+ "description": "Atlas supports express and rolling upgrades",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "atlas_hook_support",
+ "description": "Atlas support for hooks in Hive, Storm, Falcon, and Sqoop",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "ranger_pid_support",
+ "description": "Ranger Service support pid generation AMBARI-16756",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "ranger_kms_pid_support",
+ "description": "Ranger KMS Service support pid generation",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "ranger_admin_password_change",
+ "description": "Allow ranger admin credentials to be specified during cluster creation (AMBARI-17000)",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "storm_metrics_apache_classes",
+ "description": "Metrics sink for Storm that uses Apache class names",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "spark_java_opts_support",
+ "description": "Allow Spark to generate java-opts file",
+ "min_version": "2.2.0.0",
+ "max_version": "2.4.0.0"
+ },
+ {
+ "name": "atlas_hbase_setup",
+ "description": "Use script to create Atlas tables in Hbase and set permissions for Atlas user.",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "ranger_hive_plugin_jdbc_url",
+ "description": "Handle Ranger hive repo config jdbc url change for stack 2.5 (AMBARI-18386)",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "zkfc_version_advertised",
+ "description": "ZKFC advertise version",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "phoenix_core_hdfs_site_required",
+ "description": "HDFS and CORE site required for Phoenix",
+ "max_version": "2.5.9.9"
+ }
+ ]
+}
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_tools.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_tools.json b/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_tools.json
new file mode 100644
index 0000000..d1aab4b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_tools.json
@@ -0,0 +1,4 @@
+{
+ "stack_selector": ["hdp-select", "/usr/bin/hdp-select", "hdp-select"],
+ "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"]
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/repos/repoinfo.xml
new file mode 100644
index 0000000..5145064
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/repos/repoinfo.xml
@@ -0,0 +1,132 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<reposinfo>
+ <latest>http://s3.amazonaws.com/dev.hortonworks.com/HDP/hdp_urlinfo.json</latest>
+ <os family="redhat6">
+ <repo>
+ <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/3.x/updates/3.0.0.0</baseurl>
+ <repoid>HDP-3.0</repoid>
+ <reponame>HDP</reponame>
+ <unique>true</unique>
+ </repo>
+ <repo>
+ <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6</baseurl>
+ <repoid>HDP-UTILS-1.1.0.21</repoid>
+ <reponame>HDP-UTILS</reponame>
+ <unique>false</unique>
+ </repo>
+ </os>
+ <os family="redhat7">
+ <repo>
+ <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos7/3.x/updates/3.0.0.0</baseurl>
+ <repoid>HDP-3.0</repoid>
+ <reponame>HDP</reponame>
+ <unique>true</unique>
+ </repo>
+ <repo>
+ <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos7</baseurl>
+ <repoid>HDP-UTILS-1.1.0.21</repoid>
+ <reponame>HDP-UTILS</reponame>
+ <unique>false</unique>
+ </repo>
+ </os>
+ <os family="suse11">
+ <repo>
+ <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/suse11sp3/3.x/updates/3.0.0.0</baseurl>
+ <repoid>HDP-3.0</repoid>
+ <reponame>HDP</reponame>
+ <unique>true</unique>
+ </repo>
+ <repo>
+ <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/suse11sp3</baseurl>
+ <repoid>HDP-UTILS-1.1.0.21</repoid>
+ <reponame>HDP-UTILS</reponame>
+ <unique>false</unique>
+ </repo>
+ </os>
+ <os family="suse12">
+ <repo>
+ <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/sles12/3.x/updates/3.0.0.0</baseurl>
+ <repoid>HDP-3.0</repoid>
+ <reponame>HDP</reponame>
+ <unique>true</unique>
+ </repo>
+ <repo>
+ <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/suse11sp3</baseurl>
+ <repoid>HDP-UTILS-1.1.0.21</repoid>
+ <reponame>HDP-UTILS</reponame>
+ <unique>false</unique>
+ </repo>
+ </os>
+ <os family="ubuntu12">
+ <repo>
+ <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/ubuntu12/3.x/updates/2.3.0.0</baseurl>
+ <repoid>HDP-3.0</repoid>
+ <reponame>HDP</reponame>
+ <unique>true</unique>
+ </repo>
+ <repo>
+ <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/ubuntu12</baseurl>
+ <repoid>HDP-UTILS-1.1.0.21</repoid>
+ <reponame>HDP-UTILS</reponame>
+ <unique>false</unique>
+ </repo>
+ </os>
+ <os family="debian7">
+ <repo>
+ <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/debian7/3.x/updates/2.3.0.0</baseurl>
+ <repoid>HDP-3.0</repoid>
+ <reponame>HDP</reponame>
+ <unique>true</unique>
+ </repo>
+ <repo>
+ <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/debian6</baseurl>
+ <repoid>HDP-UTILS-1.1.0.21</repoid>
+ <reponame>HDP-UTILS</reponame>
+ <unique>false</unique>
+ </repo>
+ </os>
+ <os family="ubuntu14">
+ <repo>
+ <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/ubuntu14/3.x/updates/2.3.0.0</baseurl>
+ <repoid>HDP-3.0</repoid>
+ <reponame>HDP</reponame>
+ <unique>true</unique>
+ </repo>
+ <repo>
+ <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/ubuntu12</baseurl>
+ <repoid>HDP-UTILS-1.1.0.21</repoid>
+ <reponame>HDP-UTILS</reponame>
+ <unique>false</unique>
+ </repo>
+ </os>
+ <os family="ubuntu16">
+ <repo>
+ <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/ubuntu14/3.x/updates/3.0.0.0</baseurl>
+ <repoid>HDP-3.0</repoid>
+ <reponame>HDP</reponame>
+ <unique>true</unique>
+ </repo>
+ <repo>
+ <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/ubuntu12</baseurl>
+ <repoid>HDP-UTILS-1.1.0.21</repoid>
+ <reponame>HDP-UTILS</reponame>
+ <unique>false</unique>
+ </repo>
+ </os>
+</reposinfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/core-site.xml
new file mode 100644
index 0000000..9dcf561
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/core-site.xml
@@ -0,0 +1,56 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+<!-- Put site-specific property overrides in this file. -->
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
+ <!-- These configs were inherited from HDP 2.2 -->
+ <property>
+ <name>hadoop.http.authentication.simple.anonymous.allowed</name>
+ <value>true</value>
+ <description>
+ Indicates if anonymous requests are allowed when using 'simple' authentication.
+ </description>
+ <on-ambari-upgrade add="true"/>
+ </property>
+ <property>
+ <name>hadoop.security.key.provider.path</name>
+ <value/>
+ <value-attributes>
+ <empty-value-valid>true</empty-value-valid>
+ </value-attributes>
+ <depends-on>
+ <property>
+ <type>hadoop-env</type>
+ <name>keyserver_host</name>
+ </property>
+ <property>
+ <type>hadoop-env</type>
+ <name>keyserver_port</name>
+ </property>
+ <property>
+ <type>kms-env</type>
+ <name>kms_port</name>
+ </property>
+ <property>
+ <type>ranger-kms-site</type>
+ <name>ranger.service.https.attrib.ssl.enabled</name>
+ </property>
+ </depends-on>
+ <on-ambari-upgrade add="false"/>
+ </property>
+</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hadoop-env.xml
new file mode 100644
index 0000000..8697740
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hadoop-env.xml
@@ -0,0 +1,200 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+ <!-- These configs were inherited from HDP 2.2 -->
+ <property>
+ <name>keyserver_host</name>
+ <value> </value>
+ <display-name>Key Server Host</display-name>
+ <description>Hostnames where Key Management Server is installed</description>
+ <value-attributes>
+ <type>string</type>
+ </value-attributes>
+ <on-ambari-upgrade add="true"/>
+ </property>
+ <property>
+ <name>keyserver_port</name>
+ <value/>
+ <display-name>Key Server Port</display-name>
+ <description>Port number where Key Management Server is available</description>
+ <value-attributes>
+ <type>int</type>
+ <empty-value-valid>true</empty-value-valid>
+ </value-attributes>
+ <on-ambari-upgrade add="true"/>
+ </property>
+
+ <!-- These configs were inherited from HDP 2.3 -->
+ <!-- hadoop-env.sh -->
+ <property>
+ <name>content</name>
+ <display-name>hadoop-env template</display-name>
+ <description>This is the jinja template for hadoop-env.sh file</description>
+ <value>
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME. All others are
+# optional. When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use. Required.
+export JAVA_HOME={{java_home}}
+export HADOOP_HOME_WARN_SUPPRESS=1
+
+# Hadoop home directory
+export HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+
+# Hadoop Configuration Directory
+
+{# this is different for HDP1 #}
+# Path to jsvc required by secure HDP 2.0 datanode
+export JSVC_HOME={{jsvc_path}}
+
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
+
+# Extra Java runtime options. Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+# Command specific options appended to HADOOP_OPTS when specified
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+
+HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
+
+{% if java_version < 8 %}
+SHARED_HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT"
+export HADOOP_NAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_DATANODE_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\" ${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS"
+
+{% else %}
+SHARED_HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT"
+export HADOOP_NAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_DATANODE_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\" ${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+{% endif %}
+
+HADOOP_NFS3_OPTS="-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
+
+
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}
+
+# Extra ssh options. Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored. $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+
+# History server logs
+export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from. Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands. Unset by default. This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
+export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# History server pid
+export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
+
+YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes. See 'man nice'.
+
+# export HADOOP_NICENESS=10
+
+# Add database libraries
+JAVA_JDBC_LIBS=""
+if [ -d "/usr/share/java" ]; then
+ for jarFile in `ls /usr/share/java | grep -E "(mysql|ojdbc|postgresql|sqljdbc)" 2>/dev/null`
+ do
+ JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+ done
+fi
+
+# Add libraries to the hadoop classpath - some may not need a colon as they already include it
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}
+
+# Setting path to hdfs command line
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+
+# Mostly required for hadoop 2.0
+export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}
+
+export HADOOP_OPTS="-Dhdp.version=$HDP_VERSION $HADOOP_OPTS"
+
+
+# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin.
+# Makes sense to fix only when runing DN as root
+if [ "$command" == "datanode" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then
+ {% if is_datanode_max_locked_memory_set %}
+ ulimit -l {{datanode_max_locked_memory}}
+ {% endif %}
+ ulimit -n {{hdfs_user_nofile_limit}}
+fi
+ </value>
+ <value-attributes>
+ <type>content</type>
+ </value-attributes>
+ <on-ambari-upgrade add="true"/>
+ </property>
+ <property>
+ <name>nfsgateway_heapsize</name>
+ <display-name>NFSGateway maximum Java heap size</display-name>
+ <value>1024</value>
+ <description>Maximum Java heap size for NFSGateway (Java option -Xmx)</description>
+ <value-attributes>
+ <type>int</type>
+ <unit>MB</unit>
+ </value-attributes>
+ <on-ambari-upgrade add="true"/>
+ </property>
+</configuration>