You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by sw...@apache.org on 2016/03/17 07:31:46 UTC
ambari git commit: AMBARI-15448. Report AMS / Grafana Per Disk
Metrics and other changes. (swagle)
Repository: ambari
Updated Branches:
refs/heads/branch-2.2 2b1e7b75d -> 395bca848
AMBARI-15448. Report AMS / Grafana Per Disk Metrics and other changes. (swagle)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/395bca84
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/395bca84
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/395bca84
Branch: refs/heads/branch-2.2
Commit: 395bca848fc8e502b8174bacd84dc24562f8d794
Parents: 2b1e7b7
Author: Siddharth Wagle <sw...@hortonworks.com>
Authored: Wed Mar 16 18:16:04 2016 -0700
Committer: Siddharth Wagle <sw...@hortonworks.com>
Committed: Wed Mar 16 18:16:04 2016 -0700
----------------------------------------------------------------------
.../src/main/python/core/config_reader.py | 8 +-
.../src/main/python/core/controller.py | 21 ++--
.../src/main/python/core/host_info.py | 110 +++++++++++--------
.../src/main/python/core/metric_collector.py | 6 +-
.../src/test/python/core/TestHostInfo.py | 77 +++++++++++--
.../package/scripts/metrics_grafana_util.py | 6 +-
.../package/templates/metric_monitor.ini.j2 | 2 +-
.../templates/hadoop-metrics2.properties.j2 | 5 +-
8 files changed, 154 insertions(+), 81 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/395bca84/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
index a053955..02f0ce3 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
@@ -163,7 +163,8 @@ class Configuration:
self.config.readfp(StringIO.StringIO(config_content))
pass
if os.path.exists(METRIC_FILE_PATH):
- self.metric_groups = json.load(open(METRIC_FILE_PATH))
+ with open(METRIC_FILE_PATH, 'r') as f:
+ self.metric_groups = json.load(f)
else:
print 'No metric configs found at {0}'.format(METRIC_FILE_PATH)
self.metric_groups = \
@@ -201,7 +202,7 @@ class Configuration:
return int(self.get("emitter", "send_interval", 60))
def get_collector_sleep_interval(self):
- return int(self.get("collector", "collector_sleep_interval", 5))
+ return int(self.get("collector", "collector_sleep_interval", 10))
def get_hostname_config(self):
return self.get("default", "hostname", None)
@@ -231,5 +232,4 @@ class Configuration:
return 6188
def get_ca_certs(self):
- return self._ca_cert_file_path
-
+ return self._ca_cert_file_path
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/395bca84/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/controller.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/controller.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/controller.py
index c04a61b..c2ee448 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/controller.py
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/controller.py
@@ -57,14 +57,14 @@ class Controller(threading.Thread):
self.start_emitter()
- # Wake every 5 seconds to push events to the queue
+ # Wake every 5 seconds to push events to the queue
while True:
if (self.event_queue.full()):
logger.warn('Event Queue full!! Suspending further collections.')
else:
self.enqueque_events()
pass
- #Wait for the service stop event instead of sleeping blindly
+ # Wait for the service stop event instead of sleeping blindly
if 0 == self._stop_handler.wait(self.sleep_interval):
logger.info('Shutting down Controller thread')
break
@@ -73,7 +73,8 @@ class Controller(threading.Thread):
self._t.cancel()
self._t.join(5)
- #The emitter thread should have stopped by now, just ensure it has shut down properly
+ # The emitter thread should have stopped by now, just ensure it has shut
+ # down properly
self.emitter.join(5)
pass
@@ -103,13 +104,13 @@ class Controller(threading.Thread):
pass
pass
- if process_metrics_groups:
- for name, properties in process_metrics_groups.iteritems():
- event = ProcessMetricCollectEvent(properties, name)
- logger.info('Adding event to cache, {0} : {1}'.format(name, properties))
- #self.events_cache.append(event)
- pass
- pass
+ # if process_metrics_groups:
+ # for name, properties in process_metrics_groups.iteritems():
+ # event = ProcessMetricCollectEvent(properties, name)
+ # logger.info('Adding event to cache, {0} : {1}'.format(name, properties))
+ # #self.events_cache.append(event)
+ # pass
+ # pass
pass
http://git-wip-us.apache.org/repos/asf/ambari/blob/395bca84/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/host_info.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/host_info.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/host_info.py
index 2e8c442..ccc1366 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/host_info.py
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/host_info.py
@@ -21,12 +21,11 @@ limitations under the License.
import logging
import psutil
import os
-from collections import namedtuple
import platform
import time
import threading
import socket
-import subprocess
+import operator
logger = logging.getLogger()
cached_hostname = None
@@ -111,7 +110,6 @@ class HostInfo():
mem_stats = psutil.virtual_memory()
swap_stats = psutil.swap_memory()
- disk_usage = self.get_combined_disk_usage()
mem_total = self.__host_static_info.get('mem_total')
swap_total = self.__host_static_info.get('swap_total')
@@ -119,17 +117,18 @@ class HostInfo():
return {
'mem_total': bytes2kilobytes(mem_total) if mem_total else 0,
- 'mem_used': bytes2kilobytes(mem_stats.used) if hasattr(mem_stats, 'used') else 0,
- 'mem_free': bytes2kilobytes(mem_stats.free) if hasattr(mem_stats, 'free') else 0,
+ 'mem_used': bytes2kilobytes(mem_stats.used - mem_stats.cached) if hasattr(mem_stats, 'used') and hasattr(mem_stats, 'cached') else 0, # Used memory w/o cached
+ 'mem_free': bytes2kilobytes(mem_stats.available) if hasattr(mem_stats, 'available') else 0, # the actual amount of available memory
'mem_shared': bytes2kilobytes(mem_stats.shared) if hasattr(mem_stats, 'shared') else 0,
'mem_buffered': bytes2kilobytes(mem_stats.buffers) if hasattr(mem_stats, 'buffers') else 0,
'mem_cached': bytes2kilobytes(mem_stats.cached) if hasattr(mem_stats, 'cached') else 0,
'swap_free': bytes2kilobytes(swap_stats.free) if hasattr(swap_stats, 'free') else 0,
+ 'swap_used': bytes2kilobytes(swap_stats.used) if hasattr(swap_stats, 'used') else 0,
'swap_total': bytes2kilobytes(swap_total) if swap_total else 0,
- 'disk_free' : disk_usage.get("disk_free"),
+ 'swap_in': bytes2kilobytes(swap_stats.sin) if hasattr(swap_stats, 'sin') else 0,
+ 'swap_out': bytes2kilobytes(swap_stats.sout) if hasattr(swap_stats, 'sout') else 0,
# todo: cannot send string
#'part_max_used' : disk_usage.get("max_part_used")[0],
- 'disk_total' : disk_usage.get("disk_total")
}
pass
@@ -166,14 +165,13 @@ class HostInfo():
# Faster version
def get_combined_disk_usage(self):
- disk_usage = namedtuple('disk_usage', [ 'total', 'used', 'free',
- 'percent', 'part_max_used' ])
combined_disk_total = 0
combined_disk_used = 0
combined_disk_free = 0
combined_disk_percent = 0
max_percent_usage = ('', 0)
+ partition_count = 0
for part in psutil.disk_partitions(all=False):
if os.name == 'nt':
if 'cdrom' in part.opts or part.fstype == '':
@@ -188,18 +186,23 @@ class HostInfo():
combined_disk_total += usage.total if hasattr(usage, 'total') else 0
combined_disk_used += usage.used if hasattr(usage, 'used') else 0
combined_disk_free += usage.free if hasattr(usage, 'free') else 0
- combined_disk_percent += usage.percent if hasattr(usage, 'percent') else 0
+ if hasattr(usage, 'percent'):
+ combined_disk_percent += usage.percent
+ partition_count += 1
if hasattr(usage, 'percent') and max_percent_usage[1] < int(usage.percent):
max_percent_usage = (part.mountpoint, usage.percent)
pass
pass
+ if partition_count > 0:
+ combined_disk_percent /= partition_count
+
return { "disk_total" : bytes2human(combined_disk_total),
"disk_used" : bytes2human(combined_disk_used),
"disk_free" : bytes2human(combined_disk_free),
- "disk_percent" : bytes2human(combined_disk_percent)
- # todo: cannot send string
+ "disk_percent" : combined_disk_percent
+ # todo: cannot send string
#"max_part_used" : max_percent_usage }
}
pass
@@ -211,49 +214,18 @@ class HostInfo():
swap_stats = psutil.swap_memory()
mem_info = psutil.virtual_memory()
+ # No ability to store strings
return {
'cpu_num' : cpu_count_logical,
- 'cpu_speed' : '',
'swap_total' : swap_stats.total,
'boottime' : boot_time,
- 'machine_type' : platform.processor(),
- 'os_name' : platform.system(),
- 'os_release' : platform.release(),
- 'location' : '',
+ # 'machine_type' : platform.processor(),
+ # 'os_name' : platform.system(),
+ # 'os_release' : platform.release(),
'mem_total' : mem_info.total
}
-
-
- def get_disk_usage(self):
- disk_usage = {}
-
- for part in psutil.disk_partitions(all=False):
- if os.name == 'nt':
- if 'cdrom' in part.opts or part.fstype == '':
- # skip cd-rom drives with no disk in it; they may raise
- # ENOENT, pop-up a Windows GUI error for a non-ready
- # partition or just hang.
- continue
- pass
- pass
- usage = psutil.disk_usage(part.mountpoint)
- disk_usage.update(
- { part.device :
- {
- "total" : bytes2human(usage.total),
- "user" : bytes2human(usage.used),
- "free" : bytes2human(usage.free),
- "percent" : int(usage.percent),
- "fstype" : part.fstype,
- "mount" : part.mountpoint
- }
- }
- )
- pass
- pass
-
- def get_disk_io_counters(self):
+ def get_combined_disk_io_counters(self):
# read_count: number of reads
# write_count: number of writes
# read_bytes: number of bytes read
@@ -287,6 +259,48 @@ class HostInfo():
new_disk_stats['write_bps'] = write_bps
return new_disk_stats
+ def get_disk_io_counters_per_disk(self):
+ # Return a normalized disk name with the counters per disk
+ disk_io_counters = psutil.disk_io_counters(True)
+ per_disk_io_counters = {}
+
+ sortByKey = lambda x: sorted(x.items(), key=operator.itemgetter(0))
+
+ disk_counter = 0
+ if disk_io_counters:
+ # Sort disks lexically, best chance for similar disk topologies getting
+ # aggregated correctly
+ disk_io_counters = sortByKey(disk_io_counters)
+ for item in disk_io_counters:
+ disk_counter += 1
+ disk = item[0]
+ logger.debug('Adding disk counters for %s' % str(disk))
+ sdiskio = item[1]
+ prefix = 'disk_{0}_'.format(disk_counter)
+ counter_dict = {
+ prefix + 'read_count' : sdiskio.read_count if hasattr(sdiskio, 'read_count') else 0,
+ prefix + 'write_count' : sdiskio.write_count if hasattr(sdiskio, 'write_count') else 0,
+ prefix + 'read_bytes' : sdiskio.read_bytes if hasattr(sdiskio, 'read_bytes') else 0,
+ prefix + 'write_bytes' : sdiskio.write_bytes if hasattr(sdiskio, 'write_bytes') else 0,
+ prefix + 'read_time' : sdiskio.read_time if hasattr(sdiskio, 'read_time') else 0,
+ prefix + 'write_time' : sdiskio.write_time if hasattr(sdiskio, 'write_time') else 0
+ }
+ # Optional platform specific attributes
+ if hasattr(sdiskio, 'busy_time'):
+ counter_dict[ prefix + 'busy_time' ] = sdiskio.busy_time
+ if hasattr(sdiskio, 'read_merged_count'):
+ counter_dict[ prefix + 'read_merged_count' ] = sdiskio.read_merged_count
+ if hasattr(sdiskio, 'write_merged_count'):
+ counter_dict[ prefix + 'write_merged_count' ] = sdiskio.write_merged_count
+
+ per_disk_io_counters.update(counter_dict)
+ pass
+ pass
+ # Send total disk count as a metric
+ per_disk_io_counters[ 'disk_num' ] = disk_counter
+
+ return per_disk_io_counters
+
def get_hostname(self):
global cached_hostname
if cached_hostname is not None:
http://git-wip-us.apache.org/repos/asf/ambari/blob/395bca84/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/metric_collector.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/metric_collector.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/metric_collector.py
index c28fd03..84a4d76 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/metric_collector.py
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/metric_collector.py
@@ -58,7 +58,8 @@ class MetricsCollector():
elif 'disk' in event.get_group_name():
metrics = self.host_info.get_combined_disk_usage()
- metrics.update(self.host_info.get_disk_io_counters())
+ metrics.update(self.host_info.get_combined_disk_io_counters())
+ metrics.update(self.host_info.get_disk_io_counters_per_disk())
elif 'network' in event.get_group_name():
metrics = self.host_info.get_network_info()
@@ -76,7 +77,8 @@ class MetricsCollector():
metrics.update(self.host_info.get_network_info())
metrics.update(self.host_info.get_mem_info())
metrics.update(self.host_info.get_process_info())
- metrics.update(self.host_info.get_disk_io_counters())
+ metrics.update(self.host_info.get_combined_disk_io_counters())
+ metrics.update(self.host_info.get_disk_io_counters_per_disk())
else:
logger.warn('Unknown metric group.')
http://git-wip-us.apache.org/repos/asf/ambari/blob/395bca84/ambari-metrics/ambari-metrics-host-monitoring/src/test/python/core/TestHostInfo.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/test/python/core/TestHostInfo.py b/ambari-metrics/ambari-metrics-host-monitoring/src/test/python/core/TestHostInfo.py
index f07b573..d3d3f05 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/src/test/python/core/TestHostInfo.py
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/test/python/core/TestHostInfo.py
@@ -62,31 +62,30 @@ class TestHostInfo(TestCase):
self.assertAlmostEqual(cpu['cpu_intr'], 0)
self.assertAlmostEqual(cpu['cpu_sintr'], 0)
-
- @patch("psutil.disk_usage")
- @patch("psutil.disk_partitions")
+
@patch("psutil.swap_memory")
@patch("psutil.virtual_memory")
- def testMemInfo(self, vm_mock, sw_mock, dm_mock, du_mock):
+ def testMemInfo(self, vm_mock, sw_mock):
vm = vm_mock.return_value
vm.free = 2312043
vm.shared = 1243
vm.buffers = 23435
vm.cached = 23545
+ vm.available = 2312043
sw = sw_mock.return_value
sw.free = 2341234
hostinfo = HostInfo(MagicMock())
- cpu = hostinfo.get_mem_info()
+ mem = hostinfo.get_mem_info()
- self.assertAlmostEqual(cpu['mem_free'], 2257)
- self.assertAlmostEqual(cpu['mem_shared'], 1)
- self.assertAlmostEqual(cpu['mem_buffered'], 22)
- self.assertAlmostEqual(cpu['mem_cached'], 22)
- self.assertAlmostEqual(cpu['swap_free'], 2286)
+ self.assertAlmostEqual(mem['mem_free'], 2257)
+ self.assertAlmostEqual(mem['mem_shared'], 1)
+ self.assertAlmostEqual(mem['mem_buffered'], 22)
+ self.assertAlmostEqual(mem['mem_cached'], 22)
+ self.assertAlmostEqual(mem['swap_free'], 2286)
@patch("psutil.process_iter")
@@ -129,7 +128,7 @@ class TestHostInfo(TestCase):
self.assertEqual(cdu['disk_total'], "0.00")
self.assertEqual(cdu['disk_used'], "0.00")
self.assertEqual(cdu['disk_free'], "0.00")
- self.assertEqual(cdu['disk_percent'], "0.00")
+ self.assertEqual(cdu['disk_percent'], 0)
@patch("psutil.disk_io_counters")
def testDiskIOCounters(self, io_mock):
@@ -141,7 +140,7 @@ class TestHostInfo(TestCase):
hostinfo = HostInfo(MagicMock())
- disk_counters = hostinfo.get_disk_io_counters()
+ disk_counters = hostinfo.get_combined_disk_io_counters()
self.assertEqual(disk_counters['read_count'], 0)
self.assertEqual(disk_counters['write_count'], 1)
@@ -150,4 +149,58 @@ class TestHostInfo(TestCase):
self.assertEqual(disk_counters['read_time'], 4)
self.assertEqual(disk_counters['write_time'], 5)
+ @patch("psutil.disk_io_counters")
+ def test_get_disk_io_counters_per_disk(self, io_counters_mock):
+ Counters = collections.namedtuple('sdiskio', ['read_count', 'write_count',
+ 'read_bytes', 'write_bytes',
+ 'read_time', 'write_time',
+ 'busy_time', 'read_merged_count',
+ 'write_merged_count'
+ ])
+
+ disk_counters1 = Counters(read_count = 0, write_count = 1,
+ read_bytes = 2, write_bytes = 3,
+ read_time = 4, write_time = 5,
+ busy_time = 6, read_merged_count = 7,
+ write_merged_count = 8
+ )
+
+ disk_counters2 = Counters(read_count = 9, write_count = 10,
+ read_bytes = 11, write_bytes = 12,
+ read_time = 13, write_time = 14,
+ busy_time = 15, read_merged_count = 16,
+ write_merged_count = 17
+ )
+
+ counters_per_disk = { 'sdb1' : disk_counters2, 'sda1' : disk_counters1 }
+ io_counters_mock.return_value = counters_per_disk
+
+ hostinfo = HostInfo(MagicMock())
+
+ disk_counter_per_disk = hostinfo.get_disk_io_counters_per_disk()
+
+ # Assert for sda1
+ self.assertEqual(disk_counter_per_disk['disk_1_read_count'], 0)
+ self.assertEqual(disk_counter_per_disk['disk_1_write_count'], 1)
+ self.assertEqual(disk_counter_per_disk['disk_1_read_bytes'], 2)
+ self.assertEqual(disk_counter_per_disk['disk_1_write_bytes'], 3)
+ self.assertEqual(disk_counter_per_disk['disk_1_read_time'], 4)
+ self.assertEqual(disk_counter_per_disk['disk_1_write_time'], 5)
+ self.assertEqual(disk_counter_per_disk['disk_1_busy_time'], 6)
+ self.assertEqual(disk_counter_per_disk['disk_1_read_merged_count'], 7)
+ self.assertEqual(disk_counter_per_disk['disk_1_write_merged_count'], 8)
+
+ # Assert for sdb1
+
+ self.assertEqual(disk_counter_per_disk['disk_2_read_count'], 9)
+ self.assertEqual(disk_counter_per_disk['disk_2_write_count'], 10)
+ self.assertEqual(disk_counter_per_disk['disk_2_read_bytes'], 11)
+ self.assertEqual(disk_counter_per_disk['disk_2_write_bytes'], 12)
+ self.assertEqual(disk_counter_per_disk['disk_2_read_time'], 13)
+ self.assertEqual(disk_counter_per_disk['disk_2_write_time'], 14)
+ self.assertEqual(disk_counter_per_disk['disk_2_busy_time'], 15)
+ self.assertEqual(disk_counter_per_disk['disk_2_read_merged_count'], 16)
+ self.assertEqual(disk_counter_per_disk['disk_2_write_merged_count'], 17)
+
+
http://git-wip-us.apache.org/repos/asf/ambari/blob/395bca84/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_grafana_util.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_grafana_util.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_grafana_util.py
index e5020a5..13a2c26 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_grafana_util.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_grafana_util.py
@@ -283,7 +283,11 @@ def create_ams_dashboards():
if "id" in dashboard_def:
dashboard_def['id'] = None
# Set correct tags
- dashboard_def['tags'] = [ 'builtin', version ]
+ if 'tags' in dashboard_def:
+ dashboard_def['tags'].append('builtin', version)
+ else:
+ dashboard_def['tags'] = [ 'builtin', version ]
+
dashboard_def['overwrite'] = True
for dashboard in existing_dashboards:
http://git-wip-us.apache.org/repos/asf/ambari/blob/395bca84/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metric_monitor.ini.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metric_monitor.ini.j2 b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metric_monitor.ini.j2
index 0b0932a..383a0de 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metric_monitor.ini.j2
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metric_monitor.ini.j2
@@ -26,7 +26,7 @@ enable_value_threshold = false
send_interval = {{metrics_report_interval}}
[collector]
-collector_sleep_interval = 5
+collector_sleep_interval = 10
max_queue_size = 5000
host = {{metric_collector_host}}
port = {{metric_collector_port}}
http://git-wip-us.apache.org/repos/asf/ambari/blob/395bca84/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2
index 47b504f..fcd9b23 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2
@@ -82,12 +82,11 @@ datanode.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collec
namenode.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
resourcemanager.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
nodemanager.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
-historyserver.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+jobhistoryserver.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
journalnode.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
-nimbus.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
-supervisor.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
maptask.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
reducetask.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+applicationhistoryserver.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
resourcemanager.sink.timeline.tagsForPrefix.yarn=Queue