You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ao...@apache.org on 2014/01/31 20:51:15 UTC

[50/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/params.py
new file mode 100644
index 0000000..211c2bb
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/params.py
@@ -0,0 +1,197 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.core.system import System
+import os
+
+config = Script.get_config()
+
+#java params
+artifact_dir = "/tmp/HDP-artifacts/"
+jdk_name = default("/hostLevelParams/jdk_name", None) # None when jdk is already installed by user
+jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
+jce_location = config['hostLevelParams']['jdk_location']
+jdk_location = config['hostLevelParams']['jdk_location']
+#security params
+security_enabled = config['configurations']['global']['security_enabled']
+dfs_journalnode_keytab_file = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
+dfs_web_authentication_kerberos_keytab = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
+dfs_secondary_namenode_keytab_file =  config['configurations']['hdfs-site']['fs.secondary.namenode.keytab.file']
+dfs_datanode_keytab_file =  config['configurations']['hdfs-site']['dfs.datanode.keytab.file']
+dfs_namenode_keytab_file =  config['configurations']['hdfs-site']['dfs.namenode.keytab.file']
+
+dfs_datanode_kerberos_principal = config['configurations']['hdfs-site']['dfs.datanode.kerberos.principal']
+dfs_journalnode_kerberos_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.principal']
+dfs_secondary_namenode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.internal.spnego.principal']
+dfs_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.namenode.kerberos.principal']
+dfs_web_authentication_kerberos_principal = config['configurations']['hdfs-site']['dfs.web.authentication.kerberos.principal']
+dfs_secondary_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.principal']
+dfs_journalnode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.internal.spnego.principal']
+
+#users and groups
+mapred_user = config['configurations']['global']['mapred_user']
+hdfs_user = config['configurations']['global']['hdfs_user']
+yarn_user = config['configurations']['global']['yarn_user']
+
+user_group = config['configurations']['global']['user_group']
+mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
+
+#snmp
+snmp_conf_dir = "/etc/snmp/"
+snmp_source = "0.0.0.0/0"
+snmp_community = "hadoop"
+
+#hosts
+hostname = config["hostname"]
+rm_host = default("/clusterHostInfo/rm_host", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
+hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+hs_host = default("/clusterHostInfo/hs_host", [])
+jtnode_host = default("/clusterHostInfo/jtnode_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+
+has_resourcemanager = not len(rm_host) == 0
+has_slaves = not len(slave_hosts) == 0
+has_nagios = not len(hagios_server_hosts) == 0
+has_oozie_server = not len(oozie_servers)  == 0
+has_hcat_server_host = not len(hcat_server_hosts)  == 0
+has_hive_server_host = not len(hive_server_host)  == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_zk_host = not len(zk_hosts) == 0
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+if has_ganglia_server:
+  ganglia_server_host = ganglia_server_hosts[0]
+#hadoop params
+hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
+hadoop_lib_home = "/usr/lib/hadoop/lib"
+hadoop_conf_dir = "/etc/hadoop/conf"
+hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
+hadoop_home = "/usr"
+hadoop_bin = "/usr/lib/hadoop/bin"
+
+task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
+limits_conf_dir = "/etc/security/limits.d"
+
+hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
+hbase_tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
+#db params
+server_db_name = config['hostLevelParams']['db_name']
+db_driver_filename = config['hostLevelParams']['db_driver_filename']
+oracle_driver_url = config['hostLevelParams']['oracle_jdbc_url']
+mysql_driver_url = config['hostLevelParams']['mysql_jdbc_url']
+
+ambari_db_rca_url = config['hostLevelParams']['ambari_db_rca_url']
+ambari_db_rca_driver = config['hostLevelParams']['ambari_db_rca_driver']
+ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username']
+ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password']
+
+rca_enabled = config['configurations']['global']['rca_enabled']
+rca_disabled_prefix = "###"
+if rca_enabled == True:
+  rca_prefix = ""
+else:
+  rca_prefix = rca_disabled_prefix
+
+#hadoop-env.sh
+java_home = config['hostLevelParams']['java_home']
+if System.get_instance().os_family == "suse":
+  jsvc_path = "/usr/lib/bigtop-utils"
+else:
+  jsvc_path = "/usr/libexec/bigtop-utils"
+
+hadoop_heapsize = config['configurations']['global']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['global']['namenode_heapsize']
+namenode_opt_newsize =  config['configurations']['global']['namenode_opt_newsize']
+namenode_opt_maxnewsize =  config['configurations']['global']['namenode_opt_maxnewsize']
+
+jtnode_opt_newsize = default("jtnode_opt_newsize","200m")
+jtnode_opt_maxnewsize = default("jtnode_opt_maxnewsize","200m")
+jtnode_heapsize =  default("jtnode_heapsize","1024m")
+ttnode_heapsize = "1024m"
+
+dtnode_heapsize = config['configurations']['global']['dtnode_heapsize']
+mapred_pid_dir_prefix = default("mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
+mapred_log_dir_prefix = default("mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
+
+#taskcontroller.cfg
+
+mapred_local_dir = "/tmp/hadoop-mapred/mapred/local"
+
+#log4j.properties
+
+yarn_log_dir_prefix = default("yarn_log_dir_prefix","/var/log/hadoop-yarn")
+
+#hdfs ha properties
+dfs_ha_enabled = False
+dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
+dfs_ha_namenode_ids = default(format("hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
+if dfs_ha_namenode_ids:
+  dfs_ha_namenode_ids_array_len = len(dfs_ha_namenode_ids.split(","))
+  if dfs_ha_namenode_ids_array_len > 1:
+    dfs_ha_enabled = True
+
+if dfs_ha_enabled:
+  for nn_id in dfs_ha_namenode_ids:
+    nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
+    if hostname in nn_host:
+      namenode_id = nn_id
+  namenode_id = None
+
+dfs_hosts = default('/configurations/hdfs-site/dfs.hosts', None)
+
+#log4j.properties
+rca_property_map = {
+  'ambari.jobhistory.database': ambari_db_rca_url,
+  'ambari.jobhistory.driver': ambari_db_rca_driver,
+  'ambari.jobhistory.user': ambari_db_rca_username,
+  'ambari.jobhistory.password': ambari_db_rca_password,
+  'ambari.jobhistory.logger': 'DEBUG,JHA',
+
+  'log4j.appender.JHA': 'org.apache.ambari.log4j.hadoop.mapreduce.jobhistory.JobHistoryAppender',
+  'log4j.appender.JHA.database': '${ambari.jobhistory.database}',
+  'log4j.appender.JHA.driver': '${ambari.jobhistory.driver}',
+  'log4j.appender.JHA.user': '${ambari.jobhistory.user}',
+  'log4j.appender.JHA.password': '${ambari.jobhistory.password}',
+
+  'log4j.logger.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger': '${ambari.jobhistory.logger}',
+  'log4j.additivity.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger': 'true'
+}
+
+if ('hdfs-log4j' in config['configurations']):
+  log4j_props = config['configurations']['hdfs-log4j']
+  if 'mapreduce-log4j' in config['configurations']:
+    log4j_props.update(config['configurations']['mapreduce-log4j'])
+else:
+  log4j_props = None

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/shared_initialization.py
new file mode 100644
index 0000000..d6521ee
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/shared_initialization.py
@@ -0,0 +1,317 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from resource_management import *
+
+def setup_java():
+  """
+  Installs jdk using specific params, that comes from ambari-server
+  """
+  import params
+
+  jdk_curl_target = format("{artifact_dir}/{jdk_name}")
+  java_dir = os.path.dirname(params.java_home)
+  java_exec = format("{java_home}/bin/java")
+  
+  if not params.jdk_name:
+    return
+  
+  Execute(format("mkdir -p {artifact_dir} ; curl -kf --retry 10 {jdk_location}/{jdk_name} -o {jdk_curl_target}"),
+          path = ["/bin","/usr/bin/"],
+          not_if = format("test -e {java_exec}"))
+
+  if params.jdk_name.endswith(".bin"):
+    install_cmd = format("mkdir -p {java_dir} ; chmod +x {jdk_curl_target}; cd {java_dir} ; echo A | {jdk_curl_target} -noregister > /dev/null 2>&1")
+  elif params.jdk_name.endswith(".gz"):
+    install_cmd = format("mkdir -p {java_dir} ; cd {java_dir} ; tar -xf {jdk_curl_target} > /dev/null 2>&1")
+  
+  Execute(install_cmd,
+          path = ["/bin","/usr/bin/"],
+          not_if = format("test -e {java_exec}")
+  )
+  jce_curl_target = format("{artifact_dir}/{jce_policy_zip}")
+  download_jce = format("mkdir -p {artifact_dir}; curl -kf --retry 10 {jce_location}/{jce_policy_zip} -o {jce_curl_target}")
+  Execute( download_jce,
+        path = ["/bin","/usr/bin/"],
+        not_if =format("test -e {jce_curl_target}"),
+        ignore_failures = True
+  )
+  
+  if params.security_enabled:
+    security_dir = format("{java_home}/jre/lib/security")
+    extract_cmd = format("rm -f local_policy.jar; rm -f US_export_policy.jar; unzip -o -j -q {jce_curl_target}")
+    Execute(extract_cmd,
+          only_if = format("test -e {security_dir} && test -f {jce_curl_target}"),
+          cwd  = security_dir,
+          path = ['/bin/','/usr/bin']
+    )
+
+def setup_hadoop():
+  """
+  Setup hadoop files and directories
+  """
+  import params
+
+  File(os.path.join(params.snmp_conf_dir, 'snmpd.conf'),
+       content=Template("snmpd.conf.j2"))
+  Service("snmpd",
+          action = "restart")
+
+  Execute("/bin/echo 0 > /selinux/enforce",
+          only_if="test -f /selinux/enforce"
+  )
+
+  install_snappy()
+
+  #directories
+  Directory(params.hadoop_conf_dir,
+            recursive=True,
+            owner='root',
+            group='root'
+  )
+  Directory(params.hdfs_log_dir_prefix,
+            recursive=True,
+            owner='root',
+            group='root'
+  )
+  Directory(params.hadoop_pid_dir_prefix,
+            recursive=True,
+            owner='root',
+            group='root'
+  )
+
+  #files
+  File(os.path.join(params.limits_conf_dir, 'hdfs.conf'),
+       owner='root',
+       group='root',
+       mode=0644,
+       content=Template("hdfs.conf.j2")
+  )
+  if params.security_enabled:
+    File(os.path.join(params.hadoop_bin, "task-controller"),
+         owner="root",
+         group=params.mapred_tt_group,
+         mode=06050
+    )
+    tc_mode = 0644
+    tc_owner = "root"
+  else:
+    tc_mode = None
+    tc_owner = params.hdfs_user
+
+  if tc_mode:
+    File(os.path.join(params.hadoop_conf_dir, 'taskcontroller.cfg'),
+         owner = tc_owner,
+         mode = tc_mode,
+         group = params.mapred_tt_group,
+         content=Template("taskcontroller.cfg.j2")
+    )
+  else:
+    File(os.path.join(params.hadoop_conf_dir, 'taskcontroller.cfg'),
+         owner=tc_owner,
+         content=Template("taskcontroller.cfg.j2")
+    )
+  for file in ['hadoop-env.sh', 'commons-logging.properties', 'slaves']:
+    File(os.path.join(params.hadoop_conf_dir, file),
+         owner=tc_owner,
+         content=Template(file + ".j2")
+    )
+
+  health_check_template = "health_check" #for stack 1 use 'health_check'
+  File(os.path.join(params.hadoop_conf_dir, "health_check"),
+       owner=tc_owner,
+       content=Template(health_check_template + ".j2")
+  )
+
+  log4j_filename = os.path.join(params.hadoop_conf_dir, "log4j.properties")
+  if (params.log4j_props != None):
+    PropertiesFile(log4j_filename,
+                   properties=params.log4j_props,
+                   mode=0664,
+                   owner=params.hdfs_user,
+                   group=params.user_group,
+    )
+  elif (os.path.exists(format("{params.hadoop_conf_dir}/log4j.properties"))):
+    File(log4j_filename,
+         mode=0644,
+         group=params.user_group,
+         owner=params.hdfs_user,
+    )
+
+  update_log4j_props(log4j_filename)
+
+  File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"),
+       owner=params.hdfs_user,
+       content=Template("hadoop-metrics2.properties.j2")
+  )
+
+  db_driver_dload_cmd = ""
+  if params.server_db_name == 'oracle' and params.oracle_driver_url != "":
+    db_driver_dload_cmd = format(
+      "curl -kf --retry 5 {oracle_driver_url} -o {hadoop_lib_home}/{db_driver_filename}")
+  elif params.server_db_name == 'mysql' and params.mysql_driver_url != "":
+    db_driver_dload_cmd = format(
+      "curl -kf --retry 5 {mysql_driver_url} -o {hadoop_lib_home}/{db_driver_filename}")
+
+  if db_driver_dload_cmd:
+    Execute(db_driver_dload_cmd,
+            not_if =format("test -e {hadoop_lib_home}/{db_driver_filename}")
+    )
+
+
+def setup_configs():
+  """
+  Creates configs for services DHFS mapred
+  """
+  import params
+
+  if "mapred-queue-acls" in params.config['configurations']:
+    XmlConfig("mapred-queue-acls.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations'][
+                'mapred-queue-acls'],
+              owner=params.mapred_user,
+              group=params.user_group
+    )
+  elif os.path.exists(
+      os.path.join(params.hadoop_conf_dir, "mapred-queue-acls.xml")):
+    File(os.path.join(params.hadoop_conf_dir, "mapred-queue-acls.xml"),
+         owner=params.mapred_user,
+         group=params.user_group
+    )
+
+  if "hadoop-policy" in params.config['configurations']:
+    XmlConfig("hadoop-policy.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations']['hadoop-policy'],
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+
+  XmlConfig("core-site.xml",
+            conf_dir=params.hadoop_conf_dir,
+            configurations=params.config['configurations']['core-site'],
+            owner=params.hdfs_user,
+            group=params.user_group
+  )
+
+  if "mapred-site" in params.config['configurations']:
+    XmlConfig("mapred-site.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations']['mapred-site'],
+              owner=params.mapred_user,
+              group=params.user_group
+    )
+
+  File(params.task_log4j_properties_location,
+       content=StaticFile("task-log4j.properties"),
+       mode=0755
+  )
+
+  if "capacity-scheduler" in params.config['configurations']:
+    XmlConfig("capacity-scheduler.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations'][
+                'capacity-scheduler'],
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+
+  XmlConfig("hdfs-site.xml",
+            conf_dir=params.hadoop_conf_dir,
+            configurations=params.config['configurations']['hdfs-site'],
+            owner=params.hdfs_user,
+            group=params.user_group
+  )
+
+  # if params.stack_version[0] == "1":
+  Link('/usr/lib/hadoop/lib/hadoop-tools.jar',
+       to = '/usr/lib/hadoop/hadoop-tools.jar'
+  )
+
+  if os.path.exists(os.path.join(params.hadoop_conf_dir, 'configuration.xsl')):
+    File(os.path.join(params.hadoop_conf_dir, 'configuration.xsl'),
+         owner=params.hdfs_user,
+         group=params.user_group
+    )
+  if os.path.exists(os.path.join(params.hadoop_conf_dir, 'fair-scheduler.xml')):
+    File(os.path.join(params.hadoop_conf_dir, 'fair-scheduler.xml'),
+         owner=params.mapred_user,
+         group=params.user_group
+    )
+  if os.path.exists(os.path.join(params.hadoop_conf_dir, 'masters')):
+    File(os.path.join(params.hadoop_conf_dir, 'masters'),
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+  if os.path.exists(
+      os.path.join(params.hadoop_conf_dir, 'ssl-client.xml.example')):
+    File(os.path.join(params.hadoop_conf_dir, 'ssl-client.xml.example'),
+         owner=params.mapred_user,
+         group=params.user_group
+    )
+  if os.path.exists(
+      os.path.join(params.hadoop_conf_dir, 'ssl-server.xml.example')):
+    File(os.path.join(params.hadoop_conf_dir, 'ssl-server.xml.example'),
+         owner=params.mapred_user,
+         group=params.user_group
+    )
+
+  # generate_include_file()
+
+def update_log4j_props(file):
+  import params
+
+  for key in params.rca_property_map:
+    value = params.rca_property_map[key]
+    Execute(format(
+      "sed -i 's~\\({rca_disabled_prefix}\\)\\?{key}=.*~{rca_prefix}{key}={value}~' {file}")
+    )
+
+
+def generate_include_file():
+  import params
+
+  if params.dfs_hosts and params.has_slaves:
+    include_hosts_list = params.slave_hosts
+    File(params.dfs_hosts,
+         content=Template("include_hosts_list.j2"),
+         owner=params.hdfs_user,
+         group=params.user_group
+    )
+
+
+def install_snappy():
+  import params
+
+  snappy_so = "libsnappy.so"
+  so_target_dir_x86 = format("{hadoop_lib_home}/native/Linux-i386-32")
+  so_target_dir_x64 = format("{hadoop_lib_home}/native/Linux-amd64-64")
+  so_target_x86 = format("{so_target_dir_x86}/{snappy_so}")
+  so_target_x64 = format("{so_target_dir_x64}/{snappy_so}")
+  so_src_dir_x86 = format("{hadoop_home}/lib")
+  so_src_dir_x64 = format("{hadoop_home}/lib64")
+  so_src_x86 = format("{so_src_dir_x86}/{snappy_so}")
+  so_src_x64 = format("{so_src_dir_x64}/{snappy_so}")
+  Execute(
+    format("mkdir -p {so_target_dir_x86}; ln -sf {so_src_x86} {so_target_x86}"))
+  Execute(
+    format("mkdir -p {so_target_dir_x64}; ln -sf {so_src_x64} {so_target_x64}"))

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/commons-logging.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/commons-logging.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/commons-logging.properties.j2
new file mode 100644
index 0000000..77e458f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/commons-logging.properties.j2
@@ -0,0 +1,25 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+#Logging Implementation
+
+#Log4J
+org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger
+
+#JDK Logger
+#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/exclude_hosts_list.j2
new file mode 100644
index 0000000..bb5795b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/exclude_hosts_list.j2
@@ -0,0 +1,3 @@
+{% for host in hdfs_exclude_file %}
+{{host}}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/hadoop-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/hadoop-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/hadoop-env.sh.j2
new file mode 100644
index 0000000..51e2bac
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/hadoop-env.sh.j2
@@ -0,0 +1,121 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME={{java_home}}
+export HADOOP_HOME_WARN_SUPPRESS=1
+
+# Hadoop Configuration Directory
+#TODO: if env var set that can cause problems
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
+
+# this is different for HDP1 #
+# Path to jsvc required by secure HDP 2.0 datanode
+# export JSVC_HOME={{jsvc_path}}
+
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+
+HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
+HADOOP_DATANODE_OPTS="-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER={{hdfs_user}}
+
+# Extra ssh options.  Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+
+# History server logs
+export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
+export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# History server pid
+export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
+
+YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes.  See 'man nice'.
+
+# export HADOOP_NICENESS=10
+
+# Use libraries from standard classpath
+JAVA_JDBC_LIBS=""
+#Add libraries required by mysql connector
+for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by oracle connector
+for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by nodemanager
+MAPREDUCE_LIBS={{mapreduce_libs_path}}
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
+
+# Setting path to hdfs command line
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+
+#Mostly required for hadoop 2.0
+export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/hadoop-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/hadoop-metrics2.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/hadoop-metrics2.properties.j2
new file mode 100644
index 0000000..a6a66ef
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/hadoop-metrics2.properties.j2
@@ -0,0 +1,45 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# syntax: [prefix].[source|sink|jmx].[instance].[options]
+# See package.html for org.apache.hadoop.metrics2 for details
+
+{% if has_ganglia_server %}
+*.period=60
+
+*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+*.sink.ganglia.period=10
+
+# default for supportsparse is false
+*.sink.ganglia.supportsparse=true
+
+.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+# Hook up to the server
+namenode.sink.ganglia.servers={{ganglia_server_host}}:8661
+datanode.sink.ganglia.servers={{ganglia_server_host}}:8659
+jobtracker.sink.ganglia.servers={{ganglia_server_host}}:8662
+tasktracker.sink.ganglia.servers={{ganglia_server_host}}:8658
+maptask.sink.ganglia.servers={{ganglia_server_host}}:8660
+reducetask.sink.ganglia.servers={{ganglia_server_host}}:8660
+resourcemanager.sink.ganglia.servers={{ganglia_server_host}}:8664
+nodemanager.sink.ganglia.servers={{ganglia_server_host}}:8657
+historyserver.sink.ganglia.servers={{ganglia_server_host}}:8666
+journalnode.sink.ganglia.servers={{ganglia_server_host}}:8654
+
+resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
+
+{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/hdfs.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/hdfs.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/hdfs.conf.j2
new file mode 100644
index 0000000..ca7baa2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/hdfs.conf.j2
@@ -0,0 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{hdfs_user}}   - nofile 32768
+{{hdfs_user}}   - nproc  65536

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/health_check-v2.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/health_check-v2.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/health_check-v2.j2
new file mode 100644
index 0000000..cb7b12b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/health_check-v2.j2
@@ -0,0 +1,91 @@
+#!/bin/bash
+#
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+err=0;
+
+function check_disks {
+
+  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
+    fsdev=""
+    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
+    if [ -z "$fsdev" -a "$m" != "/mnt" ] ; then
+      msg_="$msg_ $m(u)"
+    else
+      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
+    fi
+  done
+
+  if [ -z "$msg_" ] ; then
+    echo "disks ok" ; exit 0
+  else
+    echo "$msg_" ; exit 2
+  fi
+
+}
+
+function check_link {
+  snmp=/usr/bin/snmpwalk
+  if [ -e $snmp ] ; then
+    $snmp -t 5 -Oe  -Oq  -Os -v 1 -c public localhost if | \
+    awk ' {
+      split($1,a,".") ;
+      if ( a[1] == "ifIndex" ) { ifIndex[a[2]] = $2 }
+      if ( a[1] == "ifDescr" ) { ifDescr[a[2]] = $2 }
+      if ( a[1] == "ifType" ) { ifType[a[2]] = $2 }
+      if ( a[1] == "ifSpeed" ) { ifSpeed[a[2]] = $2 }
+      if ( a[1] == "ifAdminStatus" ) { ifAdminStatus[a[2]] = $2 }
+      if ( a[1] == "ifOperStatus" ) { ifOperStatus[a[2]] = $2 }
+    }
+    END {
+      up=0;
+      for (i in ifIndex ) {
+      if ( ifType[i] == 6 && ifAdminStatus[i] == 1 && ifOperStatus[i] == 1 && ifSpeed[i] == 1000000000 ) {
+      up=i;
+      }
+      }
+      if ( up == 0 ) { print "check link" ; exit 2 }
+      else { print ifDescr[up],"ok" }
+    }'
+    exit $? ;
+  fi
+}
+
+# Run all checks
+# Disabled 'check_link' for now... 
+for check in disks ; do
+  msg=`check_${check}` ;
+  if [ $? -eq 0 ] ; then
+    ok_msg="$ok_msg$msg,"
+  else
+    err_msg="$err_msg$msg,"
+  fi
+done
+
+if [ ! -z "$err_msg" ] ; then
+  echo -n "ERROR $err_msg "
+fi
+if [ ! -z "$ok_msg" ] ; then
+  echo -n "OK: $ok_msg"
+fi
+
+echo
+
+# Success!
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/health_check.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/health_check.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/health_check.j2
new file mode 100644
index 0000000..b84b336
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/health_check.j2
@@ -0,0 +1,118 @@
+#!/bin/bash
+#
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+err=0;
+
+function check_disks {
+
+  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
+    fsdev=""
+    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
+    if [ -z "$fsdev" ] ; then
+      msg_="$msg_ $m(u)"
+    else
+      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
+    fi
+  done
+
+  if [ -z "$msg_" ] ; then
+    echo "disks ok" ; exit 0
+  else
+    echo "$msg_" ; exit 2
+  fi
+
+}
+
+function check_taskcontroller {
+  if [ "<%=scope.function_hdp_template_var("::hdp::params::security_enabled")%>" == "true" ]; then
+    perm=`stat -c %a:%U:%G <%=scope.function_hdp_template_var("task_bin_exe")%> 2>/dev/null`
+    if [ $? -eq 0 ] && [ "$perm" == "6050:root:hadoop" ] ; then
+      echo "taskcontroller ok"
+    else
+      echo 'check taskcontroller' ; exit 1
+    fi
+  fi
+}
+
+function check_jetty {
+  hname=`hostname`
+  jmx=`curl -s -S -m 5 "http://$hname:<%=scope.function_hdp_template_var("::hdp::tasktracker_port")%>/jmx?qry=Hadoop:service=TaskTracker,name=ShuffleServerMetrics" 2>/dev/null` ;
+  if [ $? -eq 0 ] ; then
+    e=`echo $jmx | awk '/shuffle_exceptions_caught/ {printf"%d",$2}'` ;
+    e=${e:-0} # no jmx servlet ?
+    if [ $e -gt 10 ] ; then
+      echo "check jetty: shuffle_exceptions=$e" ; exit 1
+    else
+      echo "jetty ok"
+    fi
+  else
+    echo "check jetty: ping failed" ; exit 1
+  fi
+}
+
+function check_link {
+  snmp=/usr/bin/snmpwalk
+  if [ -e $snmp ] ; then
+    $snmp -t 5 -Oe  -Oq  -Os -v 1 -c public localhost if | \
+    awk ' {
+      split($1,a,".") ;
+      if ( a[1] == "ifIndex" ) { ifIndex[a[2]] = $2 }
+      if ( a[1] == "ifDescr" ) { ifDescr[a[2]] = $2 }
+      if ( a[1] == "ifType" ) { ifType[a[2]] = $2 }
+      if ( a[1] == "ifSpeed" ) { ifSpeed[a[2]] = $2 }
+      if ( a[1] == "ifAdminStatus" ) { ifAdminStatus[a[2]] = $2 }
+      if ( a[1] == "ifOperStatus" ) { ifOperStatus[a[2]] = $2 }
+    }
+    END {
+      up=0;
+      for (i in ifIndex ) {
+      if ( ifType[i] == 6 && ifAdminStatus[i] == 1 && ifOperStatus[i] == 1 && ifSpeed[i] == 1000000000 ) {
+      up=i;
+      }
+      }
+      if ( up == 0 ) { print "check link" ; exit 2 }
+      else { print ifDescr[up],"ok" }
+    }'
+    exit $? ;
+  fi
+}
+
+# Run all checks
+# Disabled 'check_link' for now... 
+for check in disks taskcontroller jetty; do
+  msg=`check_${check}` ;
+  if [ $? -eq 0 ] ; then
+    ok_msg="$ok_msg$msg,"
+  else
+    err_msg="$err_msg$msg,"
+  fi
+done
+
+if [ ! -z "$err_msg" ] ; then
+  echo -n "ERROR $err_msg "
+fi
+if [ ! -z "$ok_msg" ] ; then
+  echo -n "OK: $ok_msg"
+fi
+
+echo
+
+# Success!
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/include_hosts_list.j2
new file mode 100644
index 0000000..cbcf6c3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/include_hosts_list.j2
@@ -0,0 +1,3 @@
+{% for host in slave_hosts %}
+{{host}}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/slaves.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/slaves.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/slaves.j2
new file mode 100644
index 0000000..cbcf6c3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/slaves.j2
@@ -0,0 +1,3 @@
+{% for host in slave_hosts %}
+{{host}}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/snmpd.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/snmpd.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/snmpd.conf.j2
new file mode 100644
index 0000000..3530444
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/snmpd.conf.j2
@@ -0,0 +1,48 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+com2sec notConfigUser  {{snmp_source}}   {{snmp_community}}
+group   notConfigGroup v1           notConfigUser
+group   notConfigGroup v2c           notConfigUser
+view    systemview    included   .1
+access  notConfigGroup ""      any       noauth    exact  systemview none none
+
+syslocation Hadoop 
+syscontact HadoopMaster 
+dontLogTCPWrappersConnects yes
+
+###############################################################################
+# disk checks
+
+disk / 10000
+
+
+###############################################################################
+# load average checks
+#
+
+# load [1MAX=12.0] [5MAX=12.0] [15MAX=12.0]
+#
+# 1MAX:   If the 1 minute load average is above this limit at query
+#         time, the errorFlag will be set.
+# 5MAX:   Similar, but for 5 min average.
+# 15MAX:  Similar, but for 15 min average.
+
+# Check for loads:
+#load 12 14 14
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/taskcontroller.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/taskcontroller.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/taskcontroller.cfg.j2
new file mode 100644
index 0000000..d01d37e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/taskcontroller.cfg.j2
@@ -0,0 +1,20 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+mapred.local.dir={{mapred_local_dir}}
+mapreduce.tasktracker.group={{mapred_tt_group}}
+hadoop.log.dir={{hdfs_log_dir_prefix}}/{{mapred_user}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/FLUME/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/FLUME/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/FLUME/metainfo.xml
index 185f685..bebb54e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/FLUME/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/FLUME/metainfo.xml
@@ -18,12 +18,13 @@
 <metainfo>
     <user>root</user>
     <comment>Flume is a distributed, reliable, and available system for efficiently collecting, aggregating and moving large amounts of log data from many different sources to a centralized data store.</comment>
-    <version>1.3.1.1.3.2.0</version>
+    <version>1.3.1.1.3.3.0</version>
 
     <components>
         <component>
             <name>FLUME_SERVER</name>
             <category>MASTER</category>
+            <cardinality>1</cardinality>
         </component>
     </components>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/metainfo.xml
index 1a895b8..36381ce 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/metainfo.xml
@@ -16,29 +16,98 @@
    limitations under the License.
 -->
 <metainfo>
-    <user>root</user>
-    <comment>Ganglia Metrics Collection system</comment>
-    <version>3.5.0</version>
-
-    <components>
-        <component>
-            <name>GANGLIA_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>GANGLIA</name>
+      <comment>Ganglia Metrics Collection system</comment>
+      <version>3.5.0</version>
+      <components>
         <component>
-            <name>GANGLIA_MONITOR</name>
-            <category>SLAVE</category>
+          <name>GANGLIA_SERVER</name>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <commandScript>
+            <script>scripts/ganglia_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
 
         <component>
-            <name>MONITOR_WEBSERVER</name>
-            <category>MASTER</category>
+          <name>GANGLIA_MONITOR</name>
+          <category>SLAVE</category>
+          <cardinality>ALL</cardinality>
+          <auto-deploy>
+            <enabled>true</enabled>
+          </auto-deploy>
+          <commandScript>
+            <script>scripts/ganglia_monitor.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
-    </components>
-
-  <configuration-dependencies>
-    <config-type>global</config-type>
-  </configuration-dependencies>
-
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>libganglia-3.5.0-99</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>ganglia-devel-3.5.0-99</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>ganglia-gmetad-3.5.0-99</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>ganglia-web-3.5.7-99.noarch</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>python-rrdtool.x86_64</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>ganglia-gmond-3.5.0-99</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>ganglia-gmond-modules-python-3.5.0-99</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osType>suse</osType>
+          <package>
+            <type>rpm</type>
+            <name>apache2</name>
+          </package>
+          <package>
+            <type>rpm</type>
+            <name>apache2-mod_php5</name>
+          </package>
+        </osSpecific>
+        <osSpecific>
+          <osType>centos5</osType>
+          <package>
+            <type>rpm</type>
+            <name>httpd</name>
+          </package>
+        </osSpecific>
+        <osSpecific>
+          <osType>centos6</osType>
+          <package>
+            <type>rpm</type>
+            <name>httpd</name>
+          </package>
+        </osSpecific>
+      </osSpecifics>
+    </service>
+  </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/checkGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/checkGmetad.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/checkGmetad.sh
new file mode 100644
index 0000000..e60eb31
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/checkGmetad.sh
@@ -0,0 +1,37 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./gmetadLib.sh;
+
+# Before checking gmetad, check rrdcached.
+./checkRrdcached.sh;
+
+gmetadRunningPid=`getGmetadRunningPid`;
+
+if [ -n "${gmetadRunningPid}" ]
+then
+  echo "${GMETAD_BIN} running with PID ${gmetadRunningPid}";
+else
+  echo "Failed to find running ${GMETAD_BIN}";
+  exit 1;
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/checkGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/checkGmond.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/checkGmond.sh
new file mode 100644
index 0000000..0cec8dc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/checkGmond.sh
@@ -0,0 +1,62 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
+source ./gmondLib.sh;
+
+function checkGmondForCluster()
+{
+    gmondClusterName=${1};
+
+    gmondCoreConfFileName=`getGmondCoreConfFileName ${gmondClusterName}`;
+
+    # Skip over (purported) Clusters that don't have their core conf file present.
+    if [ -e "${gmondCoreConfFileName}" ]
+    then 
+      gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
+
+      if [ -n "${gmondRunningPid}" ]
+      then
+        echo "${GMOND_BIN} for cluster ${gmondClusterName} running with PID ${gmondRunningPid}";
+      else
+        echo "Failed to find running ${GMOND_BIN} for cluster ${gmondClusterName}";
+        exit 1;
+      fi
+    fi
+}
+
+# main()
+gmondClusterName=${1};
+
+if [ "x" == "x${gmondClusterName}" ]
+then
+    # No ${gmondClusterName} passed in as command-line arg, so check
+    # all the gmonds we know about.
+    for gmondClusterName in `getConfiguredGangliaClusterNames`
+    do
+        checkGmondForCluster ${gmondClusterName};
+    done
+else
+    # Just check the one ${gmondClusterName} that was asked for.
+    checkGmondForCluster ${gmondClusterName};
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/checkRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/checkRrdcached.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/checkRrdcached.sh
new file mode 100644
index 0000000..d94db5d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/checkRrdcached.sh
@@ -0,0 +1,34 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./rrdcachedLib.sh;
+
+rrdcachedRunningPid=`getRrdcachedRunningPid`;
+
+if [ -n "${rrdcachedRunningPid}" ]
+then
+  echo "${RRDCACHED_BIN} running with PID ${rrdcachedRunningPid}";
+else
+  echo "Failed to find running ${RRDCACHED_BIN}";
+  exit 1;
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/gmetad.init
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/gmetad.init b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/gmetad.init
new file mode 100644
index 0000000..20b388e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/gmetad.init
@@ -0,0 +1,73 @@
+#!/bin/sh
+# chkconfig: 2345 70 40
+# description: hdp-gmetad startup script
+# processname: hdp-gmetad
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Remember to keep this in-sync with the definition of 
+# GANGLIA_RUNTIME_COMPONENTS_UNPACK_DIR in monrpmInstaller.sh.
+HDP_GANGLIA_RUNTIME_COMPONENTS_DIR=/usr/libexec/hdp/ganglia
+HDP_GANLIA_GMETAD_STARTER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/startGmetad.sh
+HDP_GANLIA_GMETAD_STOPPER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/stopGmetad.sh
+HDP_GANLIA_GMETAD_CHECKER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/checkGmetad.sh
+
+RETVAL=0
+
+case "$1" in
+   start)
+      echo "============================="
+      echo "Starting hdp-gmetad..."
+      echo "============================="
+      [ -f ${HDP_GANLIA_GMETAD_STARTER} ] || exit 1
+      eval "${HDP_GANLIA_GMETAD_STARTER}"
+      RETVAL=$?
+      echo
+      [ $RETVAL -eq 0 ] && touch /var/lock/subsys/hdp-gmetad
+      ;;
+
+  stop)
+      echo "=================================="
+      echo "Shutting down hdp-gmetad..."
+      echo "=================================="
+      [ -f ${HDP_GANLIA_GMETAD_STOPPER} ] || exit 1
+      eval "${HDP_GANLIA_GMETAD_STOPPER}"
+      RETVAL=$?
+      echo
+      [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hdp-gmetad
+      ;;
+
+  restart|reload)
+   	$0 stop
+   	$0 start
+   	RETVAL=$?
+	;;
+  status)
+      echo "======================================="
+      echo "Checking status of hdp-gmetad..."
+      echo "======================================="
+      [ -f ${HDP_GANLIA_GMETAD_CHECKER} ] || exit 1
+      eval "${HDP_GANLIA_GMETAD_CHECKER}"
+      RETVAL=$?
+      ;;
+  *)
+	echo "Usage: $0 {start|stop|restart|status}"
+	exit 1
+esac
+
+exit $RETVAL

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/gmetadLib.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/gmetadLib.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/gmetadLib.sh
new file mode 100644
index 0000000..e28610e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/gmetadLib.sh
@@ -0,0 +1,204 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Slurp in all our user-customizable settings.
+source ./gangliaEnv.sh;
+
+# Get access to Ganglia-wide constants etc.
+source ./gangliaLib.sh;
+
+GMETAD_BIN=/usr/sbin/gmetad;
+GMETAD_CONF_FILE=${GANGLIA_CONF_DIR}/gmetad.conf;
+GMETAD_PID_FILE=${GANGLIA_RUNTIME_DIR}/gmetad.pid;
+
+function getGmetadLoggedPid()
+{
+    if [ -e "${GMETAD_PID_FILE}" ]
+    then
+        echo `cat ${GMETAD_PID_FILE}`;
+    fi
+}
+
+function getGmetadRunningPid()
+{
+    gmetadLoggedPid=`getGmetadLoggedPid`;
+
+    if [ -n "${gmetadLoggedPid}" ]
+    then
+        echo `ps -o pid=MYPID -p ${gmetadLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
+    fi
+}
+
+function generateGmetadConf()
+{
+    now=`date`;
+
+    cat <<END_OF_GMETAD_CONF_1
+#################### Generated by ${0} on ${now} ####################
+#
+#-------------------------------------------------------------------------------
+# Setting the debug_level to 1 will keep daemon in the forground and
+# show only error messages. Setting this value higher than 1 will make 
+# gmetad output debugging information and stay in the foreground.
+# default: 0
+# debug_level 10
+#
+#-------------------------------------------------------------------------------
+# What to monitor. The most important section of this file. 
+#
+# The data_source tag specifies either a cluster or a grid to
+# monitor. If we detect the source is a cluster, we will maintain a complete
+# set of RRD databases for it, which can be used to create historical 
+# graphs of the metrics. If the source is a grid (it comes from another gmetad),
+# we will only maintain summary RRDs for it.
+#
+# Format: 
+# data_source "my cluster" [polling interval] address1:port addreses2:port ...
+# 
+# The keyword 'data_source' must immediately be followed by a unique
+# string which identifies the source, then an optional polling interval in 
+# seconds. The source will be polled at this interval on average. 
+# If the polling interval is omitted, 15sec is asssumed. 
+#
+# If you choose to set the polling interval to something other than the default,
+# note that the web frontend determines a host as down if its TN value is less
+# than 4 * TMAX (20sec by default).  Therefore, if you set the polling interval
+# to something around or greater than 80sec, this will cause the frontend to
+# incorrectly display hosts as down even though they are not.
+#
+# A list of machines which service the data source follows, in the 
+# format ip:port, or name:port. If a port is not specified then 8649
+# (the default gmond port) is assumed.
+# default: There is no default value
+#
+# data_source "my cluster" 10 localhost  my.machine.edu:8649  1.2.3.5:8655
+# data_source "my grid" 50 1.3.4.7:8655 grid.org:8651 grid-backup.org:8651
+# data_source "another source" 1.3.4.7:8655  1.3.4.8
+END_OF_GMETAD_CONF_1
+
+    # Get info about all the configured Ganglia clusters.
+    getGangliaClusterInfo | while read gangliaClusterInfoLine
+    do
+        # From each, parse out ${gmondClusterName}, ${gmondMasterIP} and ${gmondPort}... 
+        read gmondClusterName gmondMasterIP gmondPort <<<`echo ${gangliaClusterInfoLine}`;
+        # ...and generate a corresponding data_source line for gmetad.conf. 
+        echo "data_source \"${gmondClusterName}\" ${gmondMasterIP}:${gmondPort}";
+    done
+
+    cat <<END_OF_GMETAD_CONF_2
+#
+# Round-Robin Archives
+# You can specify custom Round-Robin archives here (defaults are listed below)
+#
+# Old Default RRA: Keep 1 hour of metrics at 15 second resolution. 1 day at 6 minute
+# RRAs "RRA:AVERAGE:0.5:1:244" "RRA:AVERAGE:0.5:24:244" "RRA:AVERAGE:0.5:168:244" "RRA:AVERAGE:0.5:672:244" \
+#      "RRA:AVERAGE:0.5:5760:374"
+# New Default RRA
+# Keep 5856 data points at 15 second resolution assuming 15 second (default) polling. That's 1 day
+# Two weeks of data points at 1 minute resolution (average)
+#RRAs "RRA:AVERAGE:0.5:1:5856" "RRA:AVERAGE:0.5:4:20160" "RRA:AVERAGE:0.5:40:52704"
+# Retaining existing resolution
+RRAs "RRA:AVERAGE:0.5:1:244" "RRA:AVERAGE:0.5:24:244" "RRA:AVERAGE:0.5:168:244" "RRA:AVERAGE:0.5:672:244" \
+     "RRA:AVERAGE:0.5:5760:374"
+#
+#-------------------------------------------------------------------------------
+# Scalability mode. If on, we summarize over downstream grids, and respect
+# authority tags. If off, we take on 2.5.0-era behavior: we do not wrap our output
+# in <GRID></GRID> tags, we ignore all <GRID> tags we see, and always assume
+# we are the "authority" on data source feeds. This approach does not scale to
+# large groups of clusters, but is provided for backwards compatibility.
+# default: on
+# scalable off
+#
+#-------------------------------------------------------------------------------
+# The name of this Grid. All the data sources above will be wrapped in a GRID
+# tag with this name.
+# default: unspecified
+gridname "HDP_GRID"
+#
+#-------------------------------------------------------------------------------
+# The authority URL for this grid. Used by other gmetads to locate graphs
+# for our data sources. Generally points to a ganglia/
+# website on this machine.
+# default: "http://hostname/ganglia/",
+#   where hostname is the name of this machine, as defined by gethostname().
+# authority "http://mycluster.org/newprefix/"
+#
+#-------------------------------------------------------------------------------
+# List of machines this gmetad will share XML with. Localhost
+# is always trusted. 
+# default: There is no default value
+# trusted_hosts 127.0.0.1 169.229.50.165 my.gmetad.org
+#
+#-------------------------------------------------------------------------------
+# If you want any host which connects to the gmetad XML to receive
+# data, then set this value to "on"
+# default: off
+# all_trusted on
+#
+#-------------------------------------------------------------------------------
+# If you don't want gmetad to setuid then set this to off
+# default: on
+# setuid off
+#
+#-------------------------------------------------------------------------------
+# User gmetad will setuid to (defaults to "nobody")
+# default: "nobody"
+setuid_username "${GMETAD_USER}"
+#
+#-------------------------------------------------------------------------------
+# Umask to apply to created rrd files and grid directory structure
+# default: 0 (files are public)
+# umask 022
+#
+#-------------------------------------------------------------------------------
+# The port gmetad will answer requests for XML
+# default: 8651
+# xml_port 8651
+#
+#-------------------------------------------------------------------------------
+# The port gmetad will answer queries for XML. This facility allows
+# simple subtree and summation views of the XML tree.
+# default: 8652
+# interactive_port 8652
+#
+#-------------------------------------------------------------------------------
+# The number of threads answering XML requests
+# default: 4
+# server_threads 10
+#
+#-------------------------------------------------------------------------------
+# Where gmetad stores its round-robin databases
+# default: "/var/lib/ganglia/rrds"
+# rrd_rootdir "/some/other/place"
+#
+#-------------------------------------------------------------------------------
+# In earlier versions of gmetad, hostnames were handled in a case
+# sensitive manner
+# If your hostname directories have been renamed to lower case,
+# set this option to 0 to disable backward compatibility.
+# From version 3.2, backwards compatibility will be disabled by default.
+# default: 1   (for gmetad < 3.2)
+# default: 0   (for gmetad >= 3.2)
+case_sensitive_hostnames 1
+END_OF_GMETAD_CONF_2
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/gmond.init
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/gmond.init b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/gmond.init
new file mode 100644
index 0000000..afb7026
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/gmond.init
@@ -0,0 +1,73 @@
+#!/bin/sh
+# chkconfig: 2345 70 40
+# description: hdp-gmond startup script
+# processname: hdp-gmond
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Remember to keep this in-sync with the definition of 
+# GANGLIA_RUNTIME_COMPONENTS_UNPACK_DIR in monrpmInstaller.sh.
+HDP_GANGLIA_RUNTIME_COMPONENTS_DIR=/usr/libexec/hdp/ganglia
+HDP_GANLIA_GMOND_STARTER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/startGmond.sh
+HDP_GANLIA_GMOND_STOPPER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/stopGmond.sh
+HDP_GANLIA_GMOND_CHECKER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/checkGmond.sh
+
+RETVAL=0
+
+case "$1" in
+   start)
+      echo "============================="
+      echo "Starting hdp-gmond..."
+      echo "============================="
+      [ -f ${HDP_GANLIA_GMOND_STARTER} ] || exit 1
+      eval "${HDP_GANLIA_GMOND_STARTER}"
+      RETVAL=$?
+      echo
+      [ $RETVAL -eq 0 ] && touch /var/lock/subsys/hdp-gmond
+      ;;
+
+  stop)
+      echo "=================================="
+      echo "Shutting down hdp-gmond..."
+      echo "=================================="
+      [ -f ${HDP_GANLIA_GMOND_STOPPER} ] || exit 1
+      eval "${HDP_GANLIA_GMOND_STOPPER}"
+      RETVAL=$?
+      echo
+      [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hdp-gmond
+      ;;
+
+  restart|reload)
+   	$0 stop
+   	$0 start
+   	RETVAL=$?
+	;;
+  status)
+      echo "======================================="
+      echo "Checking status of hdp-gmond..."
+      echo "======================================="
+      [ -f ${HDP_GANLIA_GMOND_CHECKER} ] || exit 1
+      eval "${HDP_GANLIA_GMOND_CHECKER}"
+      RETVAL=$?
+      ;;
+  *)
+	echo "Usage: $0 {start|stop|restart|status}"
+	exit 1
+esac
+
+exit $RETVAL

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/gmondLib.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/gmondLib.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/gmondLib.sh
new file mode 100644
index 0000000..87da4dd
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/gmondLib.sh
@@ -0,0 +1,545 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Slurp in all our user-customizable settings.
+source ./gangliaEnv.sh;
+
+# Get access to Ganglia-wide constants etc.
+source ./gangliaLib.sh;
+
+GMOND_BIN=/usr/sbin/gmond;
+GMOND_CORE_CONF_FILE=gmond.core.conf;
+GMOND_MASTER_CONF_FILE=gmond.master.conf;
+GMOND_SLAVE_CONF_FILE=gmond.slave.conf;
+GMOND_PID_FILE=gmond.pid;
+
+# Functions.
+function getGmondCoreConfFileName()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # ${clusterName} is not empty. 
+        echo "${GANGLIA_CONF_DIR}/${clusterName}/${GMOND_CORE_CONF_FILE}";
+    else
+        echo "${GANGLIA_CONF_DIR}/${GMOND_CORE_CONF_FILE}";
+    fi
+}
+
+function getGmondMasterConfFileName()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # ${clusterName} is not empty. 
+        echo "${GANGLIA_CONF_DIR}/${clusterName}/conf.d/${GMOND_MASTER_CONF_FILE}";
+    else
+        echo "${GANGLIA_CONF_DIR}/conf.d/${GMOND_MASTER_CONF_FILE}";
+    fi
+}
+
+function getGmondSlaveConfFileName()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # ${clusterName} is not empty. 
+        echo "${GANGLIA_CONF_DIR}/${clusterName}/conf.d/${GMOND_SLAVE_CONF_FILE}";
+    else
+        echo "${GANGLIA_CONF_DIR}/conf.d/${GMOND_SLAVE_CONF_FILE}";
+    fi
+}
+
+function getGmondPidFileName()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # ${clusterName} is not empty. 
+        echo "${GANGLIA_RUNTIME_DIR}/${clusterName}/${GMOND_PID_FILE}";
+    else
+        echo "${GANGLIA_RUNTIME_DIR}/${GMOND_PID_FILE}";
+    fi
+}
+
+function getGmondLoggedPid()
+{
+    gmondPidFile=`getGmondPidFileName ${1}`;
+
+    if [ -e "${gmondPidFile}" ]
+    then
+        echo `cat ${gmondPidFile}`;
+    fi
+}
+
+function getGmondRunningPid()
+{
+    gmondLoggedPid=`getGmondLoggedPid ${1}`;
+
+    if [ -n "${gmondLoggedPid}" ]
+    then
+        echo `ps -o pid=MYPID -p ${gmondLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
+    fi
+}
+
+function generateGmondCoreConf()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
+
+        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
+        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
+        then
+            now=`date`;
+
+            cat << END_OF_GMOND_CORE_CONF
+#################### Generated by ${0} on ${now} ####################
+#
+/* This configuration is as close to 2.5.x default behavior as possible
+   The values closely match ./gmond/metric.h definitions in 2.5.x */
+globals {
+  daemonize = yes
+  setuid = yes
+  user = ${GMOND_USER}
+  debug_level = 0
+  max_udp_msg_len = 1472
+  mute = no
+  deaf = no 
+  allow_extra_data = yes
+  host_dmax = 0 /*secs */
+  host_tmax = 20 /*secs */
+  cleanup_threshold = 300 /*secs */
+  gexec = no
+  send_metadata_interval = 30 /*secs */
+}
+
+/*
+ * The cluster attributes specified will be used as part of the <CLUSTER>
+ * tag that will wrap all hosts collected by this instance.
+ */
+cluster {
+  name = "${gmondClusterName}"
+  owner = "unspecified"
+  latlong = "unspecified"
+  url = "unspecified"
+}
+
+/* The host section describes attributes of the host, like the location */
+host {
+  location = "unspecified"
+}
+
+/* You can specify as many tcp_accept_channels as you like to share
+ * an XML description of the state of the cluster.
+ *
+ * At the very least, every gmond must expose its XML state to 
+ * queriers from localhost.
+ */
+tcp_accept_channel {
+  bind = localhost
+  port = ${gmondPort}
+}
+
+/* Each metrics module that is referenced by gmond must be specified and
+   loaded. If the module has been statically linked with gmond, it does
+   not require a load path. However all dynamically loadable modules must
+   include a load path. */
+modules {
+  module {
+    name = "core_metrics"
+  }
+  module {
+    name = "cpu_module"
+    path = "modcpu.so"
+  }
+  module {
+    name = "disk_module"
+    path = "moddisk.so"
+  }
+  module {
+    name = "load_module"
+    path = "modload.so"
+  }
+  module {
+    name = "mem_module"
+    path = "modmem.so"
+  }
+  module {
+    name = "net_module"
+    path = "modnet.so"
+  }
+  module {
+    name = "proc_module"
+    path = "modproc.so"
+  }
+  module {
+    name = "sys_module"
+    path = "modsys.so"
+  }
+}
+
+/* The old internal 2.5.x metric array has been replaced by the following
+   collection_group directives.  What follows is the default behavior for
+   collecting and sending metrics that is as close to 2.5.x behavior as
+   possible. */
+
+/* This collection group will cause a heartbeat (or beacon) to be sent every
+   20 seconds.  In the heartbeat is the GMOND_STARTED data which expresses
+   the age of the running gmond. */
+collection_group {
+  collect_once = yes
+  time_threshold = 20
+  metric {
+    name = "heartbeat"
+  }
+}
+
+/* This collection group will send general info about this host total memory every
+   180 secs.
+   This information doesn't change between reboots and is only collected
+   once. This information needed for heatmap showing */
+ collection_group {
+   collect_once = yes
+   time_threshold = 180
+   metric {
+    name = "mem_total"
+    title = "Memory Total"
+   }
+ }
+
+/* This collection group will send general info about this host every
+   1200 secs.
+   This information doesn't change between reboots and is only collected
+   once. */
+collection_group {
+  collect_once = yes
+  time_threshold = 1200
+  metric {
+    name = "cpu_num"
+    title = "CPU Count"
+  }
+  metric {
+    name = "cpu_speed"
+    title = "CPU Speed"
+  }
+  /* Should this be here? Swap can be added/removed between reboots. */
+  metric {
+    name = "swap_total"
+    title = "Swap Space Total"
+  }
+  metric {
+    name = "boottime"
+    title = "Last Boot Time"
+  }
+  metric {
+    name = "machine_type"
+    title = "Machine Type"
+  }
+  metric {
+    name = "os_name"
+    title = "Operating System"
+  }
+  metric {
+    name = "os_release"
+    title = "Operating System Release"
+  }
+  metric {
+    name = "location"
+    title = "Location"
+  }
+}
+
+/* This collection group will send the status of gexecd for this host
+   every 300 secs.*/
+/* Unlike 2.5.x the default behavior is to report gexecd OFF. */
+collection_group {
+  collect_once = yes
+  time_threshold = 300
+  metric {
+    name = "gexec"
+    title = "Gexec Status"
+  }
+}
+
+/* This collection group will collect the CPU status info every 20 secs.
+   The time threshold is set to 90 seconds.  In honesty, this
+   time_threshold could be set significantly higher to reduce
+   unneccessary  network chatter. */
+collection_group {
+  collect_every = 20
+  time_threshold = 90
+  /* CPU status */
+  metric {
+    name = "cpu_user"
+    value_threshold = "1.0"
+    title = "CPU User"
+  }
+  metric {
+    name = "cpu_system"
+    value_threshold = "1.0"
+    title = "CPU System"
+  }
+  metric {
+    name = "cpu_idle"
+    value_threshold = "5.0"
+    title = "CPU Idle"
+  }
+  metric {
+    name = "cpu_nice"
+    value_threshold = "1.0"
+    title = "CPU Nice"
+  }
+  metric {
+    name = "cpu_aidle"
+    value_threshold = "5.0"
+    title = "CPU aidle"
+  }
+  metric {
+    name = "cpu_wio"
+    value_threshold = "1.0"
+    title = "CPU wio"
+  }
+  /* The next two metrics are optional if you want more detail...
+     ... since they are accounted for in cpu_system.
+  metric {
+    name = "cpu_intr"
+    value_threshold = "1.0"
+    title = "CPU intr"
+  }
+  metric {
+    name = "cpu_sintr"
+    value_threshold = "1.0"
+    title = "CPU sintr"
+  }
+  */
+}
+
+collection_group {
+  collect_every = 20
+  time_threshold = 90
+  /* Load Averages */
+  metric {
+    name = "load_one"
+    value_threshold = "1.0"
+    title = "One Minute Load Average"
+  }
+  metric {
+    name = "load_five"
+    value_threshold = "1.0"
+    title = "Five Minute Load Average"
+  }
+  metric {
+    name = "load_fifteen"
+    value_threshold = "1.0"
+    title = "Fifteen Minute Load Average"
+  }
+}
+
+/* This group collects the number of running and total processes */
+collection_group {
+  collect_every = 80
+  time_threshold = 950
+  metric {
+    name = "proc_run"
+    value_threshold = "1.0"
+    title = "Total Running Processes"
+  }
+  metric {
+    name = "proc_total"
+    value_threshold = "1.0"
+    title = "Total Processes"
+  }
+}
+
+/* This collection group grabs the volatile memory metrics every 40 secs and
+   sends them at least every 180 secs.  This time_threshold can be increased
+   significantly to reduce unneeded network traffic. */
+collection_group {
+  collect_every = 40
+  time_threshold = 180
+  metric {
+    name = "mem_free"
+    value_threshold = "1024.0"
+    title = "Free Memory"
+  }
+  metric {
+    name = "mem_shared"
+    value_threshold = "1024.0"
+    title = "Shared Memory"
+  }
+  metric {
+    name = "mem_buffers"
+    value_threshold = "1024.0"
+    title = "Memory Buffers"
+  }
+  metric {
+    name = "mem_cached"
+    value_threshold = "1024.0"
+    title = "Cached Memory"
+  }
+  metric {
+    name = "swap_free"
+    value_threshold = "1024.0"
+    title = "Free Swap Space"
+  }
+}
+
+collection_group {
+  collect_every = 40
+  time_threshold = 300
+  metric {
+    name = "bytes_out"
+    value_threshold = 4096
+    title = "Bytes Sent"
+  }
+  metric {
+    name = "bytes_in"
+    value_threshold = 4096
+    title = "Bytes Received"
+  }
+  metric {
+    name = "pkts_in"
+    value_threshold = 256
+    title = "Packets Received"
+  }
+  metric {
+    name = "pkts_out"
+    value_threshold = 256
+    title = "Packets Sent"
+  }
+}
+
+
+collection_group {
+  collect_every = 40
+  time_threshold = 180
+  metric {
+    name = "disk_free"
+    value_threshold = 1.0
+    title = "Disk Space Available"
+  }
+  metric {
+    name = "part_max_used"
+    value_threshold = 1.0
+    title = "Maximum Disk Space Used"
+  }
+  metric {
+    name = "disk_total"
+    value_threshold = 1.0
+    title = "Total Disk Space"
+  }
+}
+
+udp_recv_channel {
+    port = 0
+}
+
+
+include ("${GANGLIA_CONF_DIR}/${gmondClusterName}/conf.d/*.conf")
+END_OF_GMOND_CORE_CONF
+        else
+            return 2;
+        fi
+    else
+        return 1;
+    fi
+}
+
+function generateGmondMasterConf
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
+
+        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
+        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
+        then
+            now=`date`;
+
+            cat << END_OF_GMOND_MASTER_CONF
+#################### Generated by ${0} on ${now} ####################
+/* Masters only receive; they never send. */
+udp_recv_channel {
+  bind = ${gmondMasterIP}
+  port = ${gmondPort}
+}
+
+/* The gmond cluster master must additionally provide an XML 
+ * description of the cluster to the gmetad that will query it.
+ */
+tcp_accept_channel {
+  bind = ${gmondMasterIP}
+  port = ${gmondPort}
+}
+END_OF_GMOND_MASTER_CONF
+        else
+            return 2;
+        fi
+    else
+        return 1;
+    fi
+}
+
+function generateGmondSlaveConf
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
+
+        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
+        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
+        then
+            now=`date`;
+
+            cat << END_OF_GMOND_SLAVE_CONF
+#################### Generated by ${0} on ${now} ####################
+/* Slaves only send; they never receive. */
+udp_send_channel {
+  #bind_hostname = yes # Highly recommended, soon to be default.
+                       # This option tells gmond to use a source address
+                       # that resolves to the machine's hostname.  Without
+                       # this, the metrics may appear to come from any
+                       # interface and the DNS names associated with
+                       # those IPs will be used to create the RRDs.
+  host = ${gmondMasterIP}
+  port = ${gmondPort}
+  ttl = 1
+}
+END_OF_GMOND_SLAVE_CONF
+        else
+            return 2;
+        fi
+    else
+        return 1;
+    fi
+}