You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by av...@apache.org on 2016/09/14 22:03:47 UTC
[1/5] ambari git commit: AMBARI-18185 : Selecting one host when topN
is set, throws an error. (avijayan)
Repository: ambari
Updated Branches:
refs/heads/trunk 1b89d6866 -> f5ad1de86
AMBARI-18185 : Selecting one host when topN is set, throws an error. (avijayan)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4a099342
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4a099342
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4a099342
Branch: refs/heads/trunk
Commit: 4a099342c1b5a8c1b7dbf77fda3ef0f24bfd26cd
Parents: 1b89d68
Author: Aravindan Vijayan <av...@hortonworks.com>
Authored: Tue Sep 13 20:16:03 2016 -0700
Committer: Aravindan Vijayan <av...@hortonworks.com>
Committed: Wed Sep 14 15:03:33 2016 -0700
----------------------------------------------------------------------
.../metrics/timeline/HBaseTimelineMetricStore.java | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/4a099342/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
index 60ebdcf..a6f8e2f 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
@@ -228,7 +228,8 @@ public class HBaseTimelineMetricStore extends AbstractService implements Timelin
.grouped(groupedByHosts);
if (topNConfig != null) {
- if (TopNCondition.isTopNHostCondition(metricNames, hostnames) || TopNCondition.isTopNMetricCondition(metricNames, hostnames)) {
+ if (TopNCondition.isTopNHostCondition(metricNames, hostnames) ^ //Only 1 condition should be true.
+ TopNCondition.isTopNMetricCondition(metricNames, hostnames)) {
conditionBuilder.topN(topNConfig.getTopN());
conditionBuilder.isBottomN(topNConfig.getIsBottomN());
Function.ReadFunction readFunction = Function.ReadFunction.getFunction(topNConfig.getTopNFunction());
[2/5] ambari git commit: AMBARI-18289 : Invalid negative values for
some AMS metrics. (avijayan)
Posted by av...@apache.org.
AMBARI-18289 : Invalid negative values for some AMS metrics. (avijayan)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9455b52d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9455b52d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9455b52d
Branch: refs/heads/trunk
Commit: 9455b52dd3126248ccea0d077d5da4512f4e2f21
Parents: aa0528e
Author: Aravindan Vijayan <av...@hortonworks.com>
Authored: Tue Sep 13 20:22:56 2016 -0700
Committer: Aravindan Vijayan <av...@hortonworks.com>
Committed: Wed Sep 14 15:03:34 2016 -0700
----------------------------------------------------------------------
.../conf/unix/metric_monitor.ini | 1 +
.../src/main/python/core/config_reader.py | 3 ++
.../src/main/python/core/host_info.py | 39 ++++++++++++++++----
.../0.1.0/configuration/ams-env.xml | 9 +++++
.../0.1.0/package/scripts/params.py | 1 +
.../package/templates/metric_monitor.ini.j2 | 1 +
6 files changed, 46 insertions(+), 8 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/9455b52d/ambari-metrics/ambari-metrics-host-monitoring/conf/unix/metric_monitor.ini
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/conf/unix/metric_monitor.ini b/ambari-metrics/ambari-metrics-host-monitoring/conf/unix/metric_monitor.ini
index 59ca5d7..e98c65c 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/conf/unix/metric_monitor.ini
+++ b/ambari-metrics/ambari-metrics-host-monitoring/conf/unix/metric_monitor.ini
@@ -21,6 +21,7 @@ debug_level = INFO
metrics_servers = localhost
enable_time_threshold = false
enable_value_threshold = false
+skip_disk_patterns =
[emitter]
send_interval = 60
http://git-wip-us.apache.org/repos/asf/ambari/blob/9455b52d/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
index 44692f6..2e8a170 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
@@ -245,3 +245,6 @@ class Configuration:
def get_ca_certs(self):
return self._ca_cert_file_path
+
+ def get_disk_metrics_skip_pattern(self):
+ return self.get("default", "skip_disk_patterns")
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/9455b52d/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/host_info.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/host_info.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/host_info.py
index 845b270..f79cacd 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/host_info.py
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/host_info.py
@@ -26,6 +26,8 @@ import time
import threading
import socket
import operator
+import re
+from collections import namedtuple
logger = logging.getLogger()
cached_hostname = None
@@ -244,16 +246,37 @@ class HostInfo():
if delta <= 0:
delta = float("inf")
- io_counters = psutil.disk_io_counters()
+ skip_disk_patterns = self.__config.get_disk_metrics_skip_pattern()
+ logger.debug('skip_disk_patterns: %s' % skip_disk_patterns)
+ if not skip_disk_patterns or skip_disk_patterns == 'None':
+ io_counters = psutil.disk_io_counters()
+ else:
+ sdiskio = namedtuple('sdiskio', ['read_count', 'write_count',
+ 'read_bytes', 'write_bytes',
+ 'read_time', 'write_time'])
+ skip_disk_pattern_list = skip_disk_patterns.split(',')
+ rawdict = psutil.disk_io_counters(True)
+ if not rawdict:
+ raise RuntimeError("Couldn't find any physical disk")
+ trimmed_dict = {}
+ for disk, fields in rawdict.items():
+ ignore_disk = False
+ for p in skip_disk_pattern_list:
+ if re.match(p, disk):
+ ignore_disk = True
+ if not ignore_disk:
+ trimmed_dict[disk] = sdiskio(*fields)
+ io_counters = sdiskio(*[sum(x) for x in zip(*trimmed_dict.values())])
new_disk_stats = {
- 'read_count' : io_counters.read_count if hasattr(io_counters, 'read_count') else 0,
- 'write_count' : io_counters.write_count if hasattr(io_counters, 'write_count') else 0,
- 'read_bytes' : io_counters.read_bytes if hasattr(io_counters, 'read_bytes') else 0,
- 'write_bytes' : io_counters.write_bytes if hasattr(io_counters, 'write_bytes') else 0,
- 'read_time' : io_counters.read_time if hasattr(io_counters, 'read_time') else 0,
- 'write_time' : io_counters.write_time if hasattr(io_counters, 'write_time') else 0
- }
+ 'read_count' : io_counters.read_count if hasattr(io_counters, 'read_count') else 0,
+ 'write_count' : io_counters.write_count if hasattr(io_counters, 'write_count') else 0,
+ 'read_bytes' : io_counters.read_bytes if hasattr(io_counters, 'read_bytes') else 0,
+ 'write_bytes' : io_counters.write_bytes if hasattr(io_counters, 'write_bytes') else 0,
+ 'read_time' : io_counters.read_time if hasattr(io_counters, 'read_time') else 0,
+ 'write_time' : io_counters.write_time if hasattr(io_counters, 'write_time') else 0
+ }
+
if not self.__last_disk_data:
self.__last_disk_data = new_disk_stats
read_bps = (new_disk_stats['read_bytes'] - self.__last_disk_data['read_bytes']) / delta
http://git-wip-us.apache.org/repos/asf/ambari/blob/9455b52d/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml
index b6ce3a2..a52cb82 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml
@@ -92,6 +92,15 @@
<on-ambari-upgrade add="true"/>
</property>
<property>
+ <name>timeline.metrics.skip.disk.metrics.patterns</name>
+ <value>true</value>
+ <description>
+ Comma separated list of disk patterns to be ignored while collecting aggregate disk usage and counter metrics.
+ For example, volume groups managed by docker can be ignored by using the pattern "dm-[0-9]+"
+ </description>
+ <on-ambari-upgrade add="true"/>
+ </property>
+ <property>
<name>content</name>
<display-name>ams-env template</display-name>
<value>
http://git-wip-us.apache.org/repos/asf/ambari/blob/9455b52d/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
index 3908b27..61b89f8 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
@@ -192,6 +192,7 @@ metrics_collector_heapsize = default('/configurations/ams-env/metrics_collector_
host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
+skip_disk_metrics_patterns = default("/configurations/ams-env/timeline.metrics.skip.disk.metrics.patterns", None)
hbase_log_dir = config['configurations']['ams-hbase-env']['hbase_log_dir']
hbase_classpath_additional = default("/configurations/ams-hbase-env/hbase_classpath_additional", None)
http://git-wip-us.apache.org/repos/asf/ambari/blob/9455b52d/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metric_monitor.ini.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metric_monitor.ini.j2 b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metric_monitor.ini.j2
index 65129f8..b011fd5 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metric_monitor.ini.j2
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metric_monitor.ini.j2
@@ -21,6 +21,7 @@ debug_level = INFO
metrics_servers = {{ams_collector_hosts}}
enable_time_threshold = false
enable_value_threshold = false
+skip_disk_patterns = {{skip_disk_metrics_patterns}}
[emitter]
send_interval = {{metrics_report_interval}}
[5/5] ambari git commit: AMBARI-18154 : Ambari Dashboard,
Cluster load widget - Incorrect value in Nodes._avg metric. (avijayan)
Posted by av...@apache.org.
AMBARI-18154 : Ambari Dashboard, Cluster load widget - Incorrect value in Nodes._avg metric. (avijayan)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/aa0528ec
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/aa0528ec
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/aa0528ec
Branch: refs/heads/trunk
Commit: aa0528ecab47850f44dd5575ab7861083e67c6ea
Parents: 4a09934
Author: Aravindan Vijayan <av...@hortonworks.com>
Authored: Tue Sep 13 20:17:40 2016 -0700
Committer: Aravindan Vijayan <av...@hortonworks.com>
Committed: Wed Sep 14 15:03:34 2016 -0700
----------------------------------------------------------------------
.../TimelineMetricClusterAggregatorSecond.java | 34 ++++++++++++++++++--
...melineMetricClusterAggregatorSecondTest.java | 6 ++--
.../src/main/resources/ganglia_properties.json | 2 +-
3 files changed, 36 insertions(+), 6 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/aa0528ec/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecond.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecond.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecond.java
index 6731eb3..5f40d21 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecond.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecond.java
@@ -34,11 +34,15 @@ import java.io.IOException;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
+import java.util.HashSet;
import java.util.List;
import java.util.Map;
+import java.util.Set;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.HOST_APP_ID;
import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.SERVER_SIDE_TIMESIFT_ADJUSTMENT;
import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.TIMELINE_METRICS_CLUSTER_AGGREGATOR_INTERPOLATION_ENABLED;
import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.GET_METRIC_SQL;
@@ -130,6 +134,7 @@ public class TimelineMetricClusterAggregatorSecond extends AbstractTimelineAggre
throws SQLException, IOException {
Map<TimelineClusterMetric, MetricClusterAggregate> aggregateClusterMetrics =
new HashMap<TimelineClusterMetric, MetricClusterAggregate>();
+ int numLiveHosts = 0;
TimelineMetric metric = null;
if (rs.next()) {
@@ -145,18 +150,25 @@ public class TimelineMetricClusterAggregatorSecond extends AbstractTimelineAggre
metric.addMetricValues(nextMetric.getMetricValues());
} else {
// Process the current metric
- processAggregateClusterMetrics(aggregateClusterMetrics, metric, timeSlices);
+ int numHosts = processAggregateClusterMetrics(aggregateClusterMetrics, metric, timeSlices);
+ numLiveHosts = Math.max(numHosts, numLiveHosts);
metric = nextMetric;
}
}
}
// Process last metric
if (metric != null) {
- processAggregateClusterMetrics(aggregateClusterMetrics, metric, timeSlices);
+ int numHosts = processAggregateClusterMetrics(aggregateClusterMetrics, metric, timeSlices);
+ numLiveHosts = Math.max(numHosts, numLiveHosts);
}
// Add app level aggregates to save
aggregateClusterMetrics.putAll(appAggregator.getAggregateClusterMetrics());
+
+ // Add liveHosts metric.
+ long timestamp = timeSlices.get(timeSlices.size() - 1)[1];
+ processLiveHostsMetric(aggregateClusterMetrics, numLiveHosts, timestamp);
+
return aggregateClusterMetrics;
}
@@ -165,10 +177,11 @@ public class TimelineMetricClusterAggregatorSecond extends AbstractTimelineAggre
* timeline.metrics.cluster.aggregator.minute.timeslice.interval
* Normalize value by averaging them within the interval
*/
- protected void processAggregateClusterMetrics(Map<TimelineClusterMetric, MetricClusterAggregate> aggregateClusterMetrics,
+ protected int processAggregateClusterMetrics(Map<TimelineClusterMetric, MetricClusterAggregate> aggregateClusterMetrics,
TimelineMetric metric, List<Long[]> timeSlices) {
// Create time slices
Map<TimelineClusterMetric, Double> clusterMetrics = sliceFromTimelineMetric(metric, timeSlices);
+ int numHosts = 0;
if (clusterMetrics != null && !clusterMetrics.isEmpty()) {
for (Map.Entry<TimelineClusterMetric, Double> clusterMetricEntry :
@@ -188,10 +201,13 @@ public class TimelineMetricClusterAggregatorSecond extends AbstractTimelineAggre
aggregate.updateMax(avgValue);
aggregate.updateMin(avgValue);
}
+
+ numHosts = aggregate.getNumberOfHosts();
// Update app level aggregates
appAggregator.processTimelineClusterMetric(clusterMetric, metric.getHostName(), avgValue);
}
}
+ return numHosts;
}
protected Map<TimelineClusterMetric, Double> sliceFromTimelineMetric(
@@ -374,4 +390,16 @@ public class TimelineMetricClusterAggregatorSecond extends AbstractTimelineAggre
return -1l;
}
+ private void processLiveHostsMetric(Map<TimelineClusterMetric, MetricClusterAggregate> aggregateClusterMetrics,
+ int numLiveHosts, long timestamp) {
+
+ TimelineClusterMetric timelineClusterMetric = new TimelineClusterMetric(
+ "live_hosts", HOST_APP_ID, null, timestamp, null);
+
+ MetricClusterAggregate metricClusterAggregate = new MetricClusterAggregate((double) numLiveHosts,
+ 1, null, (double) numLiveHosts, (double) numLiveHosts);
+
+ aggregateClusterMetrics.put(timelineClusterMetric, metricClusterAggregate);
+ }
+
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/aa0528ec/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecondTest.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecondTest.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecondTest.java
index 0f93bab..014772f 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecondTest.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecondTest.java
@@ -259,8 +259,9 @@ public class TimelineMetricClusterAggregatorSecondTest {
aggregateClusterMetrics.clear();
timelineMetric.setType("COUNTER");
- secondAggregator.processAggregateClusterMetrics(aggregateClusterMetrics, timelineMetric, timeslices);
+ int liveHosts = secondAggregator.processAggregateClusterMetrics(aggregateClusterMetrics, timelineMetric, timeslices);
+ Assert.assertEquals(liveHosts, 1);
Assert.assertEquals(aggregateClusterMetrics.size(), 4);
timelineClusterMetric.setTimestamp(startTime + 30*seconds);
Assert.assertTrue(aggregateClusterMetrics.containsKey(timelineClusterMetric));
@@ -294,8 +295,9 @@ public class TimelineMetricClusterAggregatorSecondTest {
aggregateClusterMetrics.clear();
timelineMetric.setType("COUNTER");
- secondAggregator.processAggregateClusterMetrics(aggregateClusterMetrics, timelineMetric, timeslices);
+ liveHosts = secondAggregator.processAggregateClusterMetrics(aggregateClusterMetrics, timelineMetric, timeslices);
+ Assert.assertEquals(liveHosts, 1);
Assert.assertEquals(aggregateClusterMetrics.size(), 4);
timelineClusterMetric.setTimestamp(startTime + 60*seconds);
Assert.assertTrue(aggregateClusterMetrics.containsKey(timelineClusterMetric));
http://git-wip-us.apache.org/repos/asf/ambari/blob/aa0528ec/ambari-server/src/main/resources/ganglia_properties.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/ganglia_properties.json b/ambari-server/src/main/resources/ganglia_properties.json
index 5632bc0..bd2a547 100644
--- a/ambari-server/src/main/resources/ganglia_properties.json
+++ b/ambari-server/src/main/resources/ganglia_properties.json
@@ -47,7 +47,7 @@
"metric":"load_report.Nodes",
"pointInTime":false,
"temporal":true,
- "amsId":"cpu_num"
+ "amsId":"live_hosts"
},
"metrics/load/Procs":{
"metric":"load_report.Procs",
[4/5] ambari git commit: AMBARI-18125 : Allow for certain metrics to
skip aggregation determined by client. (avijayan)
Posted by av...@apache.org.
AMBARI-18125 : Allow for certain metrics to skip aggregation determined by client. (avijayan)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f5ad1de8
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f5ad1de8
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f5ad1de8
Branch: refs/heads/trunk
Commit: f5ad1de8689fc45980e5439d873a9331ea6f4723
Parents: 0e34358
Author: Aravindan Vijayan <av...@hortonworks.com>
Authored: Wed Sep 14 15:03:23 2016 -0700
Committer: Aravindan Vijayan <av...@hortonworks.com>
Committed: Wed Sep 14 15:03:34 2016 -0700
----------------------------------------------------------------------
.../metrics2/sink/timeline/TimelineMetric.java | 11 ++
.../timeline/HadoopTimelineMetricsSink.java | 35 ++++--
.../timeline/HadoopTimelineMetricsSinkTest.java | 4 +-
.../src/main/python/core/host_info.py | 4 +-
.../src/test/python/core/TestHostInfo.py | 43 +++----
.../timeline/TimelineMetricConfiguration.java | 3 +
.../TimelineMetricClusterAggregatorSecond.java | 30 ++++-
.../TimelineMetricMetadataManager.java | 9 +-
.../metrics/timeline/query/Condition.java | 1 +
.../timeline/query/DefaultCondition.java | 84 +++++++++++---
.../metrics/timeline/query/EmptyCondition.java | 6 +
.../query/SplitByMetricNamesCondition.java | 6 +
.../timeline/TestPhoenixTransactSQL.java | 50 ++++----
...melineMetricClusterAggregatorSecondTest.java | 5 +
.../timeline/query/DefaultConditionTest.java | 116 +++++++++++++++++++
.../0.1.0/configuration/ams-site.xml | 12 ++
16 files changed, 340 insertions(+), 79 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/f5ad1de8/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetric.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetric.java b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetric.java
index 98f4978..44c9d4a 100644
--- a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetric.java
+++ b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetric.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.metrics2.sink.timeline;
+import java.util.HashMap;
import java.util.Map;
import java.util.TreeMap;
@@ -44,6 +45,7 @@ public class TimelineMetric implements Comparable<TimelineMetric> {
private String type;
private String units;
private TreeMap<Long, Double> metricValues = new TreeMap<Long, Double>();
+ private Map<String, String> metadata = new HashMap<>();
// default
public TimelineMetric() {
@@ -148,6 +150,15 @@ public class TimelineMetric implements Comparable<TimelineMetric> {
this.metricValues.putAll(metricValues);
}
+ @XmlElement(name = "metadata")
+ public Map<String,String> getMetadata () {
+ return metadata;
+ }
+
+ public void setMetadata (Map<String,String> metadata) {
+ this.metadata = metadata;
+ }
+
@Override
public boolean equals(Object o) {
if (this == o) return true;
http://git-wip-us.apache.org/repos/asf/ambari/blob/f5ad1de8/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java
index c534121..b720ba9 100644
--- a/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java
+++ b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java
@@ -34,6 +34,7 @@ import java.io.IOException;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.Collection;
+import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
@@ -228,6 +229,7 @@ public class HadoopTimelineMetricsSink extends AbstractTimelineMetricsSink imple
String contextName = record.context();
StringBuilder sb = new StringBuilder();
+ boolean skipAggregation = false;
// Transform ipc.8020 -> ipc.client, ipc.8040 -> ipc.datanode, etc.
if (contextName.startsWith("ipc.")) {
@@ -239,17 +241,19 @@ public class HadoopTimelineMetricsSink extends AbstractTimelineMetricsSink imple
sb.append(contextName);
sb.append('.');
- // Similar to GangliaContext adding processName to distinguish jvm
- // metrics for co-hosted daemons. We only do this for HBase since the
- // appId is shared for Master and RS.
- if (contextName.equals("jvm")) {
- if (record.tags() != null) {
- for (MetricsTag tag : record.tags()) {
- if (tag.info().name().equalsIgnoreCase("processName") &&
- (tag.value().equals("RegionServer") || tag.value().equals("Master"))) {
- sb.append(tag.value());
- sb.append('.');
- }
+
+ if (record.tags() != null) {
+ for (MetricsTag tag : record.tags()) {
+ if (StringUtils.isNotEmpty(tag.name()) && tag.name().equals("skipAggregation")) {
+ skipAggregation = String.valueOf(true).equals(tag.value());
+ }
+ // Similar to GangliaContext adding processName to distinguish jvm
+ // metrics for co-hosted daemons. We only do this for HBase since the
+ // appId is shared for Master and RS.
+ if (contextName.equals("jvm") && tag.info().name().equalsIgnoreCase("processName") &&
+ (tag.value().equals("RegionServer") || tag.value().equals("Master"))) {
+ sb.append(tag.value());
+ sb.append('.');
}
}
}
@@ -281,6 +285,12 @@ public class HadoopTimelineMetricsSink extends AbstractTimelineMetricsSink imple
Collection<AbstractMetric> metrics = (Collection<AbstractMetric>) record.metrics();
List<TimelineMetric> metricList = new ArrayList<TimelineMetric>();
+ Map<String, String> metadata = null;
+ if (skipAggregation) {
+ metadata = Collections.singletonMap("skipAggregation", "true");
+ }
+
+
long startTime = record.timestamp();
for (AbstractMetric metric : metrics) {
@@ -294,6 +304,9 @@ public class HadoopTimelineMetricsSink extends AbstractTimelineMetricsSink imple
timelineMetric.setStartTime(startTime);
timelineMetric.setType(metric.type() != null ? metric.type().name() : null);
timelineMetric.getMetricValues().put(startTime, value.doubleValue());
+ if (metadata != null) {
+ timelineMetric.setMetadata(metadata);
+ }
// Put intermediate values into the cache until it is time to send
boolean isCounter = MetricType.COUNTER == metric.type();
metricsCache.putTimelineMetric(timelineMetric, isCounter);
http://git-wip-us.apache.org/repos/asf/ambari/blob/f5ad1de8/ambari-metrics/ambari-metrics-hadoop-sink/src/test/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSinkTest.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-sink/src/test/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSinkTest.java b/ambari-metrics/ambari-metrics-hadoop-sink/src/test/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSinkTest.java
index ea7f72d..f44ca35 100644
--- a/ambari-metrics/ambari-metrics-hadoop-sink/src/test/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSinkTest.java
+++ b/ambari-metrics/ambari-metrics-hadoop-sink/src/test/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSinkTest.java
@@ -362,8 +362,8 @@ public class HadoopTimelineMetricsSinkTest {
}
}, "8040")
);
- expect(record.tags()).andReturn(tags1).times(6);
- expect(record.tags()).andReturn(tags2).times(6);
+ expect(record.tags()).andReturn(tags1).times(12);
+ expect(record.tags()).andReturn(tags2).times(12);
sink.appendPrefix(eq(record), (StringBuilder) anyObject());
expectLastCall().anyTimes().andStubAnswer(new IAnswer<Object>() {
http://git-wip-us.apache.org/repos/asf/ambari/blob/f5ad1de8/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/host_info.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/host_info.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/host_info.py
index f79cacd..632c86b 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/host_info.py
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/host_info.py
@@ -248,8 +248,10 @@ class HostInfo():
skip_disk_patterns = self.__config.get_disk_metrics_skip_pattern()
logger.debug('skip_disk_patterns: %s' % skip_disk_patterns)
+ print skip_disk_patterns
if not skip_disk_patterns or skip_disk_patterns == 'None':
io_counters = psutil.disk_io_counters()
+ print io_counters
else:
sdiskio = namedtuple('sdiskio', ['read_count', 'write_count',
'read_bytes', 'write_bytes',
@@ -303,7 +305,7 @@ class HostInfo():
disk = item[0]
logger.debug('Adding disk counters for %s' % str(disk))
sdiskio = item[1]
- prefix = 'disk_{0}_'.format(disk_counter)
+ prefix = 'sdisk_{0}_'.format(disk)
counter_dict = {
prefix + 'read_count' : sdiskio.read_count if hasattr(sdiskio, 'read_count') else 0,
prefix + 'write_count' : sdiskio.write_count if hasattr(sdiskio, 'write_count') else 0,
http://git-wip-us.apache.org/repos/asf/ambari/blob/f5ad1de8/ambari-metrics/ambari-metrics-host-monitoring/src/test/python/core/TestHostInfo.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/test/python/core/TestHostInfo.py b/ambari-metrics/ambari-metrics-host-monitoring/src/test/python/core/TestHostInfo.py
index d3d3f05..63a1ae1 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/src/test/python/core/TestHostInfo.py
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/test/python/core/TestHostInfo.py
@@ -23,6 +23,7 @@ from host_info import HostInfo
import platform
from unittest import TestCase
from mock.mock import patch, MagicMock
+from core.config_reader import Configuration
import collections
logger = logging.getLogger()
@@ -138,7 +139,9 @@ class TestHostInfo(TestCase):
'read_time', 'write_time'])
io_mock.return_value = Counters(0, 1, 2, 3, 4, 5)
- hostinfo = HostInfo(MagicMock())
+ c = MagicMock()
+ c.get_disk_metrics_skip_pattern.return_value = None
+ hostinfo = HostInfo(c)
disk_counters = hostinfo.get_combined_disk_io_counters()
@@ -179,28 +182,28 @@ class TestHostInfo(TestCase):
disk_counter_per_disk = hostinfo.get_disk_io_counters_per_disk()
- # Assert for sda1
- self.assertEqual(disk_counter_per_disk['disk_1_read_count'], 0)
- self.assertEqual(disk_counter_per_disk['disk_1_write_count'], 1)
- self.assertEqual(disk_counter_per_disk['disk_1_read_bytes'], 2)
- self.assertEqual(disk_counter_per_disk['disk_1_write_bytes'], 3)
- self.assertEqual(disk_counter_per_disk['disk_1_read_time'], 4)
- self.assertEqual(disk_counter_per_disk['disk_1_write_time'], 5)
- self.assertEqual(disk_counter_per_disk['disk_1_busy_time'], 6)
- self.assertEqual(disk_counter_per_disk['disk_1_read_merged_count'], 7)
- self.assertEqual(disk_counter_per_disk['disk_1_write_merged_count'], 8)
+ # Assert for sdisk_sda1
+ self.assertEqual(disk_counter_per_disk['sdisk_sda1_read_count'], 0)
+ self.assertEqual(disk_counter_per_disk['sdisk_sda1_write_count'], 1)
+ self.assertEqual(disk_counter_per_disk['sdisk_sda1_read_bytes'], 2)
+ self.assertEqual(disk_counter_per_disk['sdisk_sda1_write_bytes'], 3)
+ self.assertEqual(disk_counter_per_disk['sdisk_sda1_read_time'], 4)
+ self.assertEqual(disk_counter_per_disk['sdisk_sda1_write_time'], 5)
+ self.assertEqual(disk_counter_per_disk['sdisk_sda1_busy_time'], 6)
+ self.assertEqual(disk_counter_per_disk['sdisk_sda1_read_merged_count'], 7)
+ self.assertEqual(disk_counter_per_disk['sdisk_sda1_write_merged_count'], 8)
# Assert for sdb1
- self.assertEqual(disk_counter_per_disk['disk_2_read_count'], 9)
- self.assertEqual(disk_counter_per_disk['disk_2_write_count'], 10)
- self.assertEqual(disk_counter_per_disk['disk_2_read_bytes'], 11)
- self.assertEqual(disk_counter_per_disk['disk_2_write_bytes'], 12)
- self.assertEqual(disk_counter_per_disk['disk_2_read_time'], 13)
- self.assertEqual(disk_counter_per_disk['disk_2_write_time'], 14)
- self.assertEqual(disk_counter_per_disk['disk_2_busy_time'], 15)
- self.assertEqual(disk_counter_per_disk['disk_2_read_merged_count'], 16)
- self.assertEqual(disk_counter_per_disk['disk_2_write_merged_count'], 17)
+ self.assertEqual(disk_counter_per_disk['sdisk_sdb1_read_count'], 9)
+ self.assertEqual(disk_counter_per_disk['sdisk_sdb1_write_count'], 10)
+ self.assertEqual(disk_counter_per_disk['sdisk_sdb1_read_bytes'], 11)
+ self.assertEqual(disk_counter_per_disk['sdisk_sdb1_write_bytes'], 12)
+ self.assertEqual(disk_counter_per_disk['sdisk_sdb1_read_time'], 13)
+ self.assertEqual(disk_counter_per_disk['sdisk_sdb1_write_time'], 14)
+ self.assertEqual(disk_counter_per_disk['sdisk_sdb1_busy_time'], 15)
+ self.assertEqual(disk_counter_per_disk['sdisk_sdb1_read_merged_count'], 16)
+ self.assertEqual(disk_counter_per_disk['sdisk_sdb1_write_merged_count'], 17)
http://git-wip-us.apache.org/repos/asf/ambari/blob/f5ad1de8/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
index 1da68ba..22710b0 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
@@ -255,6 +255,9 @@ public class TimelineMetricConfiguration {
public static final String DEFAULT_TOPN_HOSTS_LIMIT =
"timeline.metrics.default.topn.hosts.limit";
+ public static final String TIMELINE_METRIC_AGGREGATION_SQL_FILTERS =
+ "timeline.metrics.cluster.aggregation.sql.filters";
+
public static final String HOST_APP_ID = "HOST";
public static final String DEFAULT_INSTANCE_PORT = "12001";
http://git-wip-us.apache.org/repos/asf/ambari/blob/f5ad1de8/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecond.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecond.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecond.java
index 5f40d21..6bc0f18 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecond.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecond.java
@@ -23,9 +23,11 @@ import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.metrics2.sink.timeline.PostProcessingUtil;
import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetricMetadata;
import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixHBaseAccessor;
import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.availability.AggregationTaskRunner.AGGREGATOR_NAME;
import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.availability.MetricCollectorHAController;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.discovery.TimelineMetricMetadataKey;
import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.discovery.TimelineMetricMetadataManager;
import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.Condition;
import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.DefaultCondition;
@@ -34,6 +36,7 @@ import java.io.IOException;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
@@ -47,6 +50,7 @@ import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.ti
import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.TIMELINE_METRICS_CLUSTER_AGGREGATOR_INTERPOLATION_ENABLED;
import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.GET_METRIC_SQL;
import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.METRICS_RECORD_TABLE_NAME;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.TIMELINE_METRIC_AGGREGATION_SQL_FILTERS;
/**
* Aggregates a metric across all hosts in the cluster. Reads metrics from
@@ -60,6 +64,8 @@ public class TimelineMetricClusterAggregatorSecond extends AbstractTimelineAggre
// 1 minute client side buffering adjustment
private final Long serverTimeShiftAdjustment;
private final boolean interpolationEnabled;
+ private TimelineMetricMetadataManager metadataManagerInstance;
+ private String skipAggrPatternStrings;
public TimelineMetricClusterAggregatorSecond(AGGREGATOR_NAME aggregatorName,
TimelineMetricMetadataManager metadataManager,
@@ -78,10 +84,12 @@ public class TimelineMetricClusterAggregatorSecond extends AbstractTimelineAggre
sleepIntervalMillis, checkpointCutOffMultiplier, aggregatorDisabledParam,
tableName, outputTableName, nativeTimeRangeDelay, haController);
+ this.metadataManagerInstance = metadataManager;
appAggregator = new TimelineMetricAppAggregator(metadataManager, metricsConf);
this.timeSliceIntervalMillis = timeSliceInterval;
this.serverTimeShiftAdjustment = Long.parseLong(metricsConf.get(SERVER_SIDE_TIMESIFT_ADJUSTMENT, "90000"));
this.interpolationEnabled = Boolean.parseBoolean(metricsConf.get(TIMELINE_METRICS_CLUSTER_AGGREGATOR_INTERPOLATION_ENABLED, "true"));
+ this.skipAggrPatternStrings = metricsConf.get(TIMELINE_METRIC_AGGREGATION_SQL_FILTERS);
}
@Override
@@ -103,8 +111,19 @@ public class TimelineMetricClusterAggregatorSecond extends AbstractTimelineAggre
@Override
protected Condition prepareMetricQueryCondition(long startTime, long endTime) {
- Condition condition = new DefaultCondition(null, null, null, null, startTime - serverTimeShiftAdjustment,
+
+ List<String> metricNames = new ArrayList<>();
+ boolean metricNamesNotCondition = false;
+
+ if (!StringUtils.isEmpty(skipAggrPatternStrings)) {
+ LOG.info("Skipping aggregation for metric patterns : " + skipAggrPatternStrings);
+ metricNames.addAll(Arrays.asList(skipAggrPatternStrings.split(",")));
+ metricNamesNotCondition = true;
+ }
+
+ Condition condition = new DefaultCondition(metricNames, null, null, null, startTime - serverTimeShiftAdjustment,
endTime, null, null, true);
+ condition.setMetricNamesNotCondition(metricNamesNotCondition);
condition.setNoLimit();
condition.setFetchSize(resultsetFetchSize);
condition.setStatement(String.format(GET_METRIC_SQL,
@@ -180,6 +199,15 @@ public class TimelineMetricClusterAggregatorSecond extends AbstractTimelineAggre
protected int processAggregateClusterMetrics(Map<TimelineClusterMetric, MetricClusterAggregate> aggregateClusterMetrics,
TimelineMetric metric, List<Long[]> timeSlices) {
// Create time slices
+
+ TimelineMetricMetadataKey appKey = new TimelineMetricMetadataKey(metric.getMetricName(), metric.getAppId());
+ TimelineMetricMetadata metricMetadata = metadataManagerInstance.getMetadataCacheValue(appKey);
+
+ if (metricMetadata != null && !metricMetadata.isSupportsAggregates()) {
+ LOG.debug("Skipping cluster aggregation for " + metric.getMetricName());
+ return 0;
+ }
+
Map<TimelineClusterMetric, Double> clusterMetrics = sliceFromTimelineMetric(metric, timeSlices);
int numHosts = 0;
http://git-wip-us.apache.org/repos/asf/ambari/blob/f5ad1de8/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TimelineMetricMetadataManager.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TimelineMetricMetadataManager.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TimelineMetricMetadataManager.java
index fd471fb..d0d1dbf 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TimelineMetricMetadataManager.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TimelineMetricMetadataManager.java
@@ -17,6 +17,8 @@
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.discovery;
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.commons.collections.MapUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -202,7 +204,7 @@ public class TimelineMetricMetadataManager {
timelineMetric.getUnits(),
timelineMetric.getType(),
timelineMetric.getStartTime(),
- true
+ supportAggregates(timelineMetric)
);
}
@@ -229,4 +231,9 @@ public class TimelineMetricMetadataManager {
Map<String, Set<String>> getHostedAppsFromStore() throws SQLException {
return hBaseAccessor.getHostedAppsMetadata();
}
+
+ private boolean supportAggregates(TimelineMetric metric) {
+ return MapUtils.isEmpty(metric.getMetadata()) ||
+ !(String.valueOf(true).equals(metric.getMetadata().get("skipAggregation")));
+ }
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/f5ad1de8/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/Condition.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/Condition.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/Condition.java
index 4873c24..9aa64bd 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/Condition.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/Condition.java
@@ -44,4 +44,5 @@ public interface Condition {
void addOrderByColumn(String column);
void setNoLimit();
boolean doUpdate();
+ void setMetricNamesNotCondition(boolean metricNamesNotCondition);
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/f5ad1de8/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/DefaultCondition.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/DefaultCondition.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/DefaultCondition.java
index 0851e8f..a4f7014 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/DefaultCondition.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/DefaultCondition.java
@@ -16,12 +16,14 @@
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query;
+import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.metrics2.sink.timeline.TopNConfig;
import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixHBaseAccessor;
import org.apache.hadoop.metrics2.sink.timeline.Precision;
+import java.util.ArrayList;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Set;
@@ -40,6 +42,7 @@ public class DefaultCondition implements Condition {
Integer fetchSize;
String statement;
Set<String> orderByColumns = new LinkedHashSet<String>();
+ boolean metricNamesNotCondition = false;
private static final Log LOG = LogFactory.getLog(DefaultCondition.class);
@@ -215,44 +218,85 @@ public class DefaultCondition implements Condition {
protected boolean appendMetricNameClause(StringBuilder sb) {
boolean appendConjunction = false;
- StringBuilder metricsLike = new StringBuilder();
- StringBuilder metricsIn = new StringBuilder();
+ List<String> metricsLike = new ArrayList<>();
+ List<String> metricsIn = new ArrayList<>();
if (getMetricNames() != null) {
for (String name : getMetricNames()) {
if (name.contains("%")) {
- if (metricsLike.length() > 1) {
- metricsLike.append(" OR ");
- }
- metricsLike.append("METRIC_NAME LIKE ?");
+ metricsLike.add(name);
} else {
- if (metricsIn.length() > 0) {
- metricsIn.append(", ");
- }
- metricsIn.append("?");
+ metricsIn.add(name);
}
}
- if (metricsIn.length() > 0) {
- sb.append("(METRIC_NAME IN (");
- sb.append(metricsIn);
+ // Put a '(' first
+ sb.append("(");
+
+ //IN clause
+ // METRIC_NAME (NOT) IN (?,?,?,?)
+ if (CollectionUtils.isNotEmpty(metricsIn)) {
+ sb.append("METRIC_NAME");
+ if (metricNamesNotCondition) {
+ sb.append(" NOT");
+ }
+ sb.append(" IN (");
+ //Append ?,?,?,?
+ for (int i = 0; i < metricsIn.size(); i++) {
+ sb.append("?");
+ if (i < metricsIn.size() - 1) {
+ sb.append(", ");
+ }
+ }
sb.append(")");
appendConjunction = true;
}
- if (metricsLike.length() > 0) {
- if (appendConjunction) {
- sb.append(" OR ");
+ //Put an OR/AND if both types are present
+ if (CollectionUtils.isNotEmpty(metricsIn) &&
+ CollectionUtils.isNotEmpty(metricsLike)) {
+ if (metricNamesNotCondition) {
+ sb.append(" AND ");
} else {
- sb.append("(");
+ sb.append(" OR ");
+ }
+ }
+
+ //LIKE clause
+ // METRIC_NAME (NOT) LIKE ? OR(AND) METRIC_NAME LIKE ?
+ if (CollectionUtils.isNotEmpty(metricsLike)) {
+
+ for (int i = 0; i < metricsLike.size(); i++) {
+ sb.append("METRIC_NAME");
+ if (metricNamesNotCondition) {
+ sb.append(" NOT");
+ }
+ sb.append(" LIKE ");
+ sb.append("?");
+
+ if (i < metricsLike.size() - 1) {
+ if (metricNamesNotCondition) {
+ sb.append(" AND ");
+ } else {
+ sb.append(" OR ");
+ }
+ }
}
- sb.append(metricsLike);
appendConjunction = true;
}
+ // Finish with a ')'
if (appendConjunction) {
sb.append(")");
}
+
+ metricNames.clear();
+ if (CollectionUtils.isNotEmpty(metricsIn)) {
+ metricNames.addAll(metricsIn);
+ }
+ if (CollectionUtils.isNotEmpty(metricsLike)) {
+ metricNames.addAll(metricsLike);
+ }
}
return appendConjunction;
}
@@ -333,4 +377,8 @@ public class DefaultCondition implements Condition {
}
return false;
}
+
+ public void setMetricNamesNotCondition(boolean metricNamesNotCondition) {
+ this.metricNamesNotCondition = metricNamesNotCondition;
+ }
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/f5ad1de8/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/EmptyCondition.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/EmptyCondition.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/EmptyCondition.java
index 34174e2..43ab88c 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/EmptyCondition.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/EmptyCondition.java
@@ -27,6 +27,7 @@ import java.util.List;
public class EmptyCondition implements Condition {
String statement;
boolean doUpdate = false;
+ boolean metricNamesNotCondition = false;
@Override
public boolean isEmpty() {
@@ -144,4 +145,9 @@ public class EmptyCondition implements Condition {
" doUpdate = " + this.doUpdate() +
" }";
}
+
+ @Override
+ public void setMetricNamesNotCondition(boolean metricNamesNotCondition) {
+ this.metricNamesNotCondition = metricNamesNotCondition;
+ }
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/f5ad1de8/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/SplitByMetricNamesCondition.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/SplitByMetricNamesCondition.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/SplitByMetricNamesCondition.java
index b8ca599..bb4dced 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/SplitByMetricNamesCondition.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/SplitByMetricNamesCondition.java
@@ -25,6 +25,7 @@ import java.util.List;
public class SplitByMetricNamesCondition implements Condition {
private final Condition adaptee;
private String currentMetric;
+ private boolean metricNamesNotCondition = false;
public SplitByMetricNamesCondition(Condition condition){
this.adaptee = condition;
@@ -180,4 +181,9 @@ public class SplitByMetricNamesCondition implements Condition {
public void setCurrentMetric(String currentMetric) {
this.currentMetric = currentMetric;
}
+
+ @Override
+ public void setMetricNamesNotCondition(boolean metricNamesNotCondition) {
+ this.metricNamesNotCondition = metricNamesNotCondition;
+ }
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/f5ad1de8/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestPhoenixTransactSQL.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestPhoenixTransactSQL.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestPhoenixTransactSQL.java
index a95655d..e988a61 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestPhoenixTransactSQL.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestPhoenixTransactSQL.java
@@ -47,7 +47,7 @@ public class TestPhoenixTransactSQL {
@Test
public void testConditionClause() throws Exception {
Condition condition = new DefaultCondition(
- Arrays.asList("cpu_user", "mem_free"), Collections.singletonList("h1"),
+ new ArrayList<>(Arrays.asList("cpu_user", "mem_free")), Collections.singletonList("h1"),
"a1", "i1", 1407959718L, 1407959918L, null, null, false);
String preparedClause = condition.getConditionClause().toString();
@@ -78,7 +78,7 @@ public class TestPhoenixTransactSQL {
@Test
public void testLikeConditionClause() throws Exception {
Condition condition = new DefaultCondition(
- Arrays.asList("cpu_user", "some=%.metric"),
+ new ArrayList<>(Arrays.asList("cpu_user", "some=%.metric")),
Collections.singletonList("h1"), "a1", "i1", 1407959718L, 1407959918L,
null, null, false);
@@ -115,7 +115,7 @@ public class TestPhoenixTransactSQL {
condition = new DefaultCondition(
- Arrays.asList("some=%.metric"), Collections.singletonList("h1"), "a1", "i1",
+ new ArrayList<>(Arrays.asList("some=%.metric")), Collections.singletonList("h1"), "a1", "i1",
1407959718L, 1407959918L, null, null, false);
preparedClause = condition.getConditionClause().toString();
@@ -127,7 +127,7 @@ public class TestPhoenixTransactSQL {
condition = new DefaultCondition(
- Arrays.asList("some=%.metric1", "some=%.metric2", "some=%.metric3"),
+ new ArrayList<>(Arrays.asList("some=%.metric1", "some=%.metric2", "some=%.metric3")),
Collections.singletonList("h1"), "a1", "i1",
1407959718L, 1407959918L, null, null, false);
@@ -142,7 +142,7 @@ public class TestPhoenixTransactSQL {
@Test
public void testPrepareGetAggregatePrecisionMINUTES() throws SQLException {
Condition condition = new DefaultCondition(
- Arrays.asList("cpu_user", "mem_free"), Collections.singletonList("h1"),
+ new ArrayList<>(Arrays.asList("cpu_user", "mem_free")), Collections.singletonList("h1"),
"a1", "i1", 1407959718L, 1407959918L, Precision.MINUTES, null, false);
Connection connection = createNiceMock(Connection.class);
PreparedStatement preparedStatement = createNiceMock(PreparedStatement.class);
@@ -163,7 +163,7 @@ public class TestPhoenixTransactSQL {
Long startTime = 1407959718L;
//SECONDS precision
Condition condition = new DefaultCondition(
- Arrays.asList("cpu_user", "mem_free"), Collections.singletonList("h1"),
+ new ArrayList<>(Arrays.asList("cpu_user", "mem_free")), Collections.singletonList("h1"),
"a1", "i1", startTime, endTime, null, null, false);
Connection connection = createNiceMock(Connection.class);
PreparedStatement preparedStatement = createNiceMock(PreparedStatement.class);
@@ -181,7 +181,7 @@ public class TestPhoenixTransactSQL {
// MINUTES precision
startTime = endTime-PhoenixTransactSQL.DAY/1000;
condition = new DefaultCondition(
- Arrays.asList("cpu_user", "mem_free"), Collections.singletonList("h1"),
+ new ArrayList<>(Arrays.asList("cpu_user", "mem_free")), Collections.singletonList("h1"),
"a1", "i1", startTime, endTime, null, null, false);
connection = createNiceMock(Connection.class);
preparedStatement = createNiceMock(PreparedStatement.class);
@@ -199,7 +199,7 @@ public class TestPhoenixTransactSQL {
// HOURS precision
startTime = endTime-PhoenixTransactSQL.DAY*30/1000;
condition = new DefaultCondition(
- Arrays.asList("cpu_user", "mem_free"), Collections.singletonList("h1"),
+ new ArrayList<>(Arrays.asList("cpu_user", "mem_free")), Collections.singletonList("h1"),
"a1", "i1", startTime, endTime, null, null, false);
connection = createNiceMock(Connection.class);
preparedStatement = createNiceMock(PreparedStatement.class);
@@ -217,7 +217,7 @@ public class TestPhoenixTransactSQL {
// DAYS precision
startTime = endTime-PhoenixTransactSQL.DAY*30*2/1000;
condition = new DefaultCondition(
- Arrays.asList("cpu_user", "mem_free"), Collections.singletonList("h1"),
+ new ArrayList<>(Arrays.asList("cpu_user", "mem_free")), Collections.singletonList("h1"),
"a1", "i1", startTime, endTime, null, null, false);
connection = createNiceMock(Connection.class);
preparedStatement = createNiceMock(PreparedStatement.class);
@@ -236,7 +236,7 @@ public class TestPhoenixTransactSQL {
@Test
public void testPrepareGetAggregatePrecisionHours() throws SQLException {
Condition condition = new DefaultCondition(
- Arrays.asList("cpu_user", "mem_free"), Collections.singletonList("h1"),
+ new ArrayList<>(Arrays.asList("cpu_user", "mem_free")), Collections.singletonList("h1"),
"a1", "i1", 1407959718L, 1407959918L, Precision.HOURS, null, false);
Connection connection = createNiceMock(Connection.class);
PreparedStatement preparedStatement = createNiceMock(PreparedStatement.class);
@@ -254,7 +254,7 @@ public class TestPhoenixTransactSQL {
@Test
public void testPrepareGetMetricsPrecisionMinutes() throws SQLException {
Condition condition = new DefaultCondition(
- Arrays.asList("cpu_user", "mem_free"), Collections.singletonList("h1"),
+ new ArrayList<>(Arrays.asList("cpu_user", "mem_free")), Collections.singletonList("h1"),
"a1", "i1", 1407959718L, 1407959918L, Precision.MINUTES, null, false);
Connection connection = createNiceMock(Connection.class);
PreparedStatement preparedStatement = createNiceMock(PreparedStatement.class);
@@ -275,7 +275,7 @@ public class TestPhoenixTransactSQL {
Long startTime = endTime - 200;
// SECONDS precision
Condition condition = new DefaultCondition(
- Arrays.asList("cpu_user", "mem_free"), Collections.singletonList("h1"),
+ new ArrayList<>(Arrays.asList("cpu_user", "mem_free")), Collections.singletonList("h1"),
"a1", "i1", startTime, endTime, null, null, false);
Connection connection = createNiceMock(Connection.class);
PreparedStatement preparedStatement = createNiceMock(PreparedStatement.class);
@@ -294,7 +294,7 @@ public class TestPhoenixTransactSQL {
// SECONDS precision
startTime = endTime-PhoenixTransactSQL.HOUR*2/1000;
condition = new DefaultCondition(
- Arrays.asList("cpu_user", "mem_free"), Collections.singletonList("h1"),
+ new ArrayList<>(Arrays.asList("cpu_user", "mem_free")), Collections.singletonList("h1"),
"a1", "i1", startTime, endTime, null, null, false);
connection = createNiceMock(Connection.class);
preparedStatement = createNiceMock(PreparedStatement.class);
@@ -311,7 +311,7 @@ public class TestPhoenixTransactSQL {
// MINUTES precision
startTime = endTime-PhoenixTransactSQL.DAY/1000;
condition = new DefaultCondition(
- Arrays.asList("cpu_user", "mem_free"), Collections.singletonList("h1"),
+ new ArrayList<>(Arrays.asList("cpu_user", "mem_free")), Collections.singletonList("h1"),
"a1", "i1", startTime, endTime, null, null, false);
connection = createNiceMock(Connection.class);
preparedStatement = createNiceMock(PreparedStatement.class);
@@ -328,7 +328,7 @@ public class TestPhoenixTransactSQL {
// HOURS precision
startTime = endTime-PhoenixTransactSQL.DAY*30/1000;
condition = new DefaultCondition(
- Arrays.asList("cpu_user", "mem_free"), Collections.singletonList("h1"),
+ new ArrayList<>(Arrays.asList("cpu_user", "mem_free")), Collections.singletonList("h1"),
"a1", "i1", startTime, endTime, null, null, false);
connection = createNiceMock(Connection.class);
preparedStatement = createNiceMock(PreparedStatement.class);
@@ -345,7 +345,7 @@ public class TestPhoenixTransactSQL {
// DAYS precision
startTime = endTime-PhoenixTransactSQL.DAY*30*2/1000;
condition = new DefaultCondition(
- Arrays.asList("cpu_user", "mem_free"), Collections.singletonList("h1"),
+ new ArrayList<>(Arrays.asList("cpu_user", "mem_free")), Collections.singletonList("h1"),
"a1", "i1", startTime, endTime, null, null, false);
connection = createNiceMock(Connection.class);
preparedStatement = createNiceMock(PreparedStatement.class);
@@ -364,7 +364,7 @@ public class TestPhoenixTransactSQL {
@Test
public void testPrepareGetLatestMetricSqlStmtMultipleHostNames() throws SQLException {
Condition condition = new DefaultCondition(
- Arrays.asList("cpu_user", "mem_free"), Arrays.asList("h1", "h2"),
+ new ArrayList<>(Arrays.asList("cpu_user", "mem_free")), Arrays.asList("h1", "h2"),
"a1", "i1", null, null, null, null, false);
Connection connection = createNiceMock(Connection.class);
PreparedStatement preparedStatement = createNiceMock(PreparedStatement.class);
@@ -390,7 +390,7 @@ public class TestPhoenixTransactSQL {
public void testPrepareGetLatestMetricSqlStmtSortMergeJoinAlgorithm()
throws SQLException {
Condition condition = new DefaultCondition(
- Arrays.asList("cpu_user", "mem_free"), Arrays.asList("h1"),
+ new ArrayList<>(Arrays.asList("cpu_user", "mem_free")), Arrays.asList("h1"),
"a1", "i1", null, null, null, null, false);
Connection connection = createNiceMock(Connection.class);
PreparedStatement preparedStatement = createNiceMock(PreparedStatement.class);
@@ -413,7 +413,7 @@ public class TestPhoenixTransactSQL {
@Test
public void testPrepareGetMetricsPrecisionHours() throws SQLException {
Condition condition = new DefaultCondition(
- Arrays.asList("cpu_user", "mem_free"), Collections.singletonList("h1"),
+ new ArrayList<>(Arrays.asList("cpu_user", "mem_free")), Collections.singletonList("h1"),
"a1", "i1", 1407959718L, 1407959918L, Precision.HOURS, null, false);
Connection connection = createNiceMock(Connection.class);
PreparedStatement preparedStatement = createNiceMock(PreparedStatement.class);
@@ -561,7 +561,7 @@ public class TestPhoenixTransactSQL {
List<String> hosts = Arrays.asList("h1", "h2", "h3", "h4");
Condition condition = new TopNCondition(
- Arrays.asList("cpu_user"), hosts,
+ new ArrayList<>(Collections.singletonList("cpu_user")), hosts,
"a1", "i1", 1407959718L, 1407959918L, null, null, false, 2, null, false);
String conditionClause = condition.getConditionClause().toString();
@@ -580,7 +580,7 @@ public class TestPhoenixTransactSQL {
@Test
public void testTopNMetricsConditionClause() throws Exception {
- List<String> metricNames = Arrays.asList("m1", "m2", "m3");
+ List<String> metricNames = new ArrayList<>(Arrays.asList("m1", "m2", "m3"));
Condition condition = new TopNCondition(
metricNames, Collections.singletonList("h1"),
@@ -602,7 +602,7 @@ public class TestPhoenixTransactSQL {
@Test
public void testTopNMetricsIllegalConditionClause() throws Exception {
- List<String> metricNames = Arrays.asList("m1", "m2");
+ List<String> metricNames = new ArrayList<>(Arrays.asList("m1", "m2"));
List<String> hosts = Arrays.asList("h1", "h2");
@@ -616,7 +616,7 @@ public class TestPhoenixTransactSQL {
@Test
public void testHostsRegexpConditionClause() {
Condition condition = new TopNCondition(
- Arrays.asList("m1"), Arrays.asList("%.ambari", "host1.apache"),
+ new ArrayList<>(Arrays.asList("m1")), Arrays.asList("%.ambari", "host1.apache"),
"a1", "i1", 1407959718L, 1407959918L, null, null, false, 2, null, false);
String conditionClause = condition.getConditionClause().toString();
@@ -629,7 +629,7 @@ public class TestPhoenixTransactSQL {
Assert.assertEquals(expectedClause, conditionClause);
condition = new TopNCondition(
- Arrays.asList("m1"), Arrays.asList("%.ambari"),
+ new ArrayList<>(Arrays.asList("m1")), Arrays.asList("%.ambari"),
"a1", "i1", 1407959718L, 1407959918L, null, null, false, 2, null, false);
conditionClause = condition.getConditionClause().toString();
@@ -643,7 +643,7 @@ public class TestPhoenixTransactSQL {
Assert.assertEquals(expectedClause, conditionClause);
condition = new TopNCondition(
- Arrays.asList("m1", "m2", "m3"), Arrays.asList("h1.ambari"),
+ new ArrayList<>(Arrays.asList("m1", "m2", "m3")), Arrays.asList("h1.ambari"),
"a1", "i1", 1407959718L, 1407959918L, null, null, false, 2, null, false);
conditionClause = condition.getConditionClause().toString();
http://git-wip-us.apache.org/repos/asf/ambari/blob/f5ad1de8/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecondTest.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecondTest.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecondTest.java
index 014772f..97b6258 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecondTest.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecondTest.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline
import junit.framework.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.discovery.TimelineMetricMetadataKey;
import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.discovery.TimelineMetricMetadataManager;
import org.easymock.EasyMock;
import org.junit.Test;
@@ -115,6 +116,10 @@ public class TimelineMetricClusterAggregatorSecondTest {
Configuration configuration = new Configuration();
TimelineMetricMetadataManager metricMetadataManagerMock = EasyMock.createNiceMock(TimelineMetricMetadataManager.class);
+ EasyMock.expect(metricMetadataManagerMock.getMetadataCacheValue((TimelineMetricMetadataKey)EasyMock.anyObject()))
+ .andReturn(null).anyTimes();
+ EasyMock.replay(metricMetadataManagerMock);
+
TimelineMetricClusterAggregatorSecond secondAggregator = new TimelineMetricClusterAggregatorSecond(
METRIC_AGGREGATE_SECOND, metricMetadataManagerMock, null, configuration, null,
aggregatorInterval, 2, "false", "", "", aggregatorInterval, sliceInterval, null
http://git-wip-us.apache.org/repos/asf/ambari/blob/f5ad1de8/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/DefaultConditionTest.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/DefaultConditionTest.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/DefaultConditionTest.java
new file mode 100644
index 0000000..e4e9225
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/DefaultConditionTest.java
@@ -0,0 +1,116 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query;
+
+import junit.framework.Assert;
+import org.apache.commons.collections.CollectionUtils;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+public class DefaultConditionTest {
+
+ @Test
+ public void testMetricNameWhereCondition() {
+ List<String> metricNames = new ArrayList<>();
+
+ //Only IN clause.
+
+ metricNames.add("M1");
+ DefaultCondition condition = new DefaultCondition(metricNames,null,null,null,null,null,null,null,true);
+ StringBuilder sb = new StringBuilder();
+ condition.appendMetricNameClause(sb);
+ Assert.assertEquals(sb.toString(), "(METRIC_NAME IN (?))");
+ Assert.assertTrue(CollectionUtils.isEqualCollection(metricNames, condition.getMetricNames()));
+
+ metricNames.add("m2");
+ condition = new DefaultCondition(metricNames,null,null,null,null,null,null,null,true);
+ sb = new StringBuilder();
+ condition.appendMetricNameClause(sb);
+ Assert.assertEquals(sb.toString(), "(METRIC_NAME IN (?, ?))");
+ Assert.assertTrue(CollectionUtils.isEqualCollection(metricNames, condition.getMetricNames()));
+
+ // Only NOT IN clause
+ condition = new DefaultCondition(metricNames,null,null,null,null,null,null,null,true);
+ condition.setMetricNamesNotCondition(true);
+ sb = new StringBuilder();
+ condition.appendMetricNameClause(sb);
+ Assert.assertEquals(sb.toString(), "(METRIC_NAME NOT IN (?, ?))");
+ Assert.assertTrue(CollectionUtils.isEqualCollection(metricNames, condition.getMetricNames()));
+
+ metricNames.clear();
+
+ //Only LIKE clause
+ metricNames.add("disk%");
+ condition = new DefaultCondition(metricNames,null,null,null,null,null,null,null,true);
+ sb = new StringBuilder();
+ condition.appendMetricNameClause(sb);
+ Assert.assertEquals(sb.toString(), "(METRIC_NAME LIKE ?)");
+ Assert.assertTrue(CollectionUtils.isEqualCollection(metricNames, condition.getMetricNames()));
+
+ metricNames.add("cpu%");
+ condition = new DefaultCondition(metricNames,null,null,null,null,null,null,null,true);
+ sb = new StringBuilder();
+ condition.appendMetricNameClause(sb);
+ Assert.assertEquals(sb.toString(), "(METRIC_NAME LIKE ? OR METRIC_NAME LIKE ?)");
+ Assert.assertTrue(CollectionUtils.isEqualCollection(metricNames, condition.getMetricNames()));
+
+ //Only NOT LIKE clause
+ condition = new DefaultCondition(metricNames,null,null,null,null,null,null,null,true);
+ condition.setMetricNamesNotCondition(true);
+ sb = new StringBuilder();
+ condition.appendMetricNameClause(sb);
+ Assert.assertEquals(sb.toString(), "(METRIC_NAME NOT LIKE ? AND METRIC_NAME NOT LIKE ?)");
+ Assert.assertTrue(CollectionUtils.isEqualCollection(metricNames, condition.getMetricNames()));
+
+ metricNames.clear();
+
+ // IN followed by LIKE clause
+ metricNames.add("M1");
+ metricNames.add("disk%");
+ metricNames.add("M2");
+ condition = new DefaultCondition(metricNames,null,null,null,null,null,null,null,true);
+ sb = new StringBuilder();
+ condition.appendMetricNameClause(sb);
+ Assert.assertEquals(sb.toString(), "(METRIC_NAME IN (?, ?) OR METRIC_NAME LIKE ?)");
+ Assert.assertEquals(metricNames.get(2), "disk%");
+
+ metricNames.clear();
+ //NOT IN followed by NOT LIKE clause
+ metricNames.add("disk%");
+ metricNames.add("metric1");
+ metricNames.add("cpu%");
+ condition = new DefaultCondition(metricNames,null,null,null,null,null,null,null,true);
+ sb = new StringBuilder();
+ condition.setMetricNamesNotCondition(true);
+ condition.appendMetricNameClause(sb);
+ Assert.assertEquals(sb.toString(), "(METRIC_NAME NOT IN (?) AND METRIC_NAME NOT LIKE ? AND METRIC_NAME NOT LIKE ?)");
+ Assert.assertEquals(metricNames.get(0), "metric1");
+
+ //Empty
+ metricNames.clear();
+ condition = new DefaultCondition(metricNames,null,null,null,null,null,null,null,true);
+ sb = new StringBuilder();
+ condition.appendMetricNameClause(sb);
+ Assert.assertEquals(sb.toString(), "");
+
+ }
+}
+
http://git-wip-us.apache.org/repos/asf/ambari/blob/f5ad1de8/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
index 68b53f2..2f2e114 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
@@ -744,4 +744,16 @@
</description>
<on-ambari-upgrade add="true"/>
</property>
+ <property>
+ <name>timeline.metrics.cluster.aggregation.sql.filters</name>
+ <value>sdisk\_%,boottime</value>
+ <description>
+ Commas separated list of Metric names or Phoenix 'LIKE' class expressions that match metric names
+ which prevents certain metrics from being aggregated across hosts.
+ </description>
+ <on-ambari-upgrade add="true"/>
+ <value-attributes>
+ <empty-value-valid>true</empty-value-valid>
+ </value-attributes>
+ </property>
</configuration>
[3/5] ambari git commit: AMBARI-18132 : Remove FIFO and Normalizer in
ams-env config. (avijayan)
Posted by av...@apache.org.
AMBARI-18132 : Remove FIFO and Normalizer in ams-env config. (avijayan)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/0e343585
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/0e343585
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/0e343585
Branch: refs/heads/trunk
Commit: 0e3435856a2aee7951f765e90e55e027b6455c1a
Parents: 9455b52
Author: Aravindan Vijayan <av...@hortonworks.com>
Authored: Tue Sep 13 20:32:07 2016 -0700
Committer: Aravindan Vijayan <av...@hortonworks.com>
Committed: Wed Sep 14 15:03:34 2016 -0700
----------------------------------------------------------------------
.../server/upgrade/UpgradeCatalog250.java | 62 ++++++++++-
.../0.1.0/configuration/ams-env.xml | 6 -
.../0.1.0/package/scripts/params.py | 2 -
.../server/upgrade/UpgradeCatalog250Test.java | 110 +++++++++++++++++++
4 files changed, 171 insertions(+), 9 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/0e343585/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
index 43f489b..35c773a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
@@ -18,9 +18,18 @@
package org.apache.ambari.server.upgrade;
import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.controller.AmbariManagementController;
import org.apache.ambari.server.orm.dao.DaoUtils;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Config;
+import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -33,7 +42,7 @@ import com.google.inject.Injector;
public class UpgradeCatalog250 extends AbstractUpgradeCatalog {
protected static final String HOST_VERSION_TABLE = "host_version";
-
+ private static final String AMS_ENV = "ams-env";
/**
* Logger.
*/
@@ -97,6 +106,7 @@ public class UpgradeCatalog250 extends AbstractUpgradeCatalog {
*/
@Override
protected void executeDMLUpdates() throws AmbariException, SQLException {
+ updateAMSConfigs();
}
protected void updateHostVersionTable() throws SQLException {
@@ -106,6 +116,56 @@ public class UpgradeCatalog250 extends AbstractUpgradeCatalog {
dbAccessor.addUniqueConstraint(HOST_VERSION_TABLE, "UQ_host_repo", "repo_version_id", "host_id");
}
+ protected void updateAMSConfigs() throws AmbariException {
+ AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
+ Clusters clusters = ambariManagementController.getClusters();
+
+ if (clusters != null) {
+ Map<String, Cluster> clusterMap = clusters.getClusters();
+
+ if (clusterMap != null && !clusterMap.isEmpty()) {
+ for (final Cluster cluster : clusterMap.values()) {
+
+ Config amsEnv = cluster.getDesiredConfigByType(AMS_ENV);
+ if (amsEnv != null) {
+ Map<String, String> amsEnvProperties = amsEnv.getProperties();
+ String content = amsEnvProperties.get("content");
+ Map<String, String> newProperties = new HashMap<>();
+ newProperties.put("content", updateAmsEnvContent(content));
+ updateConfigurationPropertiesForCluster(cluster, AMS_ENV, newProperties, true, true);
+ }
+
+ }
+ }
+ }
+ }
+
+
+ protected String updateAmsEnvContent(String content) {
+ if (content == null) {
+ return null;
+ }
+
+ List<String> toReplaceList = new ArrayList<>();
+ toReplaceList.add("\n# HBase normalizer enabled\n");
+ toReplaceList.add("\n# HBase compaction policy enabled\n");
+ toReplaceList.add("export AMS_HBASE_NORMALIZER_ENABLED={{ams_hbase_normalizer_enabled}}\n");
+ toReplaceList.add("export AMS_HBASE_FIFO_COMPACTION_ENABLED={{ams_hbase_fifo_compaction_enabled}}\n");
+
+ //Because of AMBARI-15331 : AMS HBase FIFO compaction policy and Normalizer settings are not handled correctly
+ toReplaceList.add("export HBASE_NORMALIZATION_ENABLED={{ams_hbase_normalizer_enabled}}\n");
+ toReplaceList.add("export HBASE_FIFO_COMPACTION_POLICY_ENABLED={{ams_hbase_fifo_compaction_policy_enabled}}\n");
+
+
+ for (String toReplace : toReplaceList) {
+ if (content.contains(toReplace)) {
+ content = content.replace(toReplace, StringUtils.EMPTY);
+ }
+ }
+
+ return content;
+ }
+
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/0e343585/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml
index a52cb82..48232af 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml
@@ -130,12 +130,6 @@ export AMS_HBASE_PID_DIR={{hbase_pid_dir}}
# AMS Collector heapsize
export AMS_COLLECTOR_HEAPSIZE={{metrics_collector_heapsize}}
-# HBase normalizer enabled
-export AMS_HBASE_NORMALIZER_ENABLED={{ams_hbase_normalizer_enabled}}
-
-# HBase compaction policy enabled
-export AMS_HBASE_FIFO_COMPACTION_ENABLED={{ams_hbase_fifo_compaction_enabled}}
-
# HBase Tables Initialization check enabled
export AMS_HBASE_INIT_CHECK_ENABLED={{ams_hbase_init_check_enabled}}
http://git-wip-us.apache.org/repos/asf/ambari/blob/0e343585/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
index 61b89f8..22024bb 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
@@ -159,8 +159,6 @@ ams_grafana_cert_key = default("/configurations/ams-grafana-ini/cert_key", '/etc
ams_hbase_home_dir = "/usr/lib/ams-hbase/"
-ams_hbase_normalizer_enabled = default("/configurations/ams-hbase-site/hbase.normalizer.enabled", None)
-ams_hbase_fifo_compaction_enabled = default("/configurations/ams-site/timeline.metrics.hbase.fifo.compaction.enabled", None)
ams_hbase_init_check_enabled = default("/configurations/ams-site/timeline.metrics.hbase.init.check.enabled", True)
#hadoop params
http://git-wip-us.apache.org/repos/asf/ambari/blob/0e343585/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
index 6dadb22..c4e0a7c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
@@ -28,8 +28,21 @@ import static org.easymock.EasyMock.verify;
import javax.persistence.EntityManager;
+import com.google.common.collect.Maps;
+import com.google.gson.Gson;
+import org.apache.ambari.server.actionmanager.ActionManager;
+import org.apache.ambari.server.controller.AmbariManagementController;
+import org.apache.ambari.server.controller.AmbariManagementControllerImpl;
+import org.apache.ambari.server.controller.KerberosHelper;
+import org.apache.ambari.server.controller.MaintenanceStateHelper;
import org.apache.ambari.server.orm.DBAccessor;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Config;
import org.apache.ambari.server.state.stack.OsFamily;
+import org.easymock.Capture;
+import org.easymock.EasyMock;
+import org.easymock.EasyMockSupport;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@@ -40,6 +53,15 @@ import com.google.inject.Injector;
import com.google.inject.Module;
import com.google.inject.Provider;
+import java.lang.reflect.Method;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.anyString;
+import static org.easymock.EasyMock.capture;
+import static org.easymock.EasyMock.createMockBuilder;
+import static org.junit.Assert.assertTrue;
/**
* {@link UpgradeCatalog250} unit tests.
*/
@@ -86,4 +108,92 @@ public class UpgradeCatalog250Test {
verify(dbAccessor);
}
+ @Test
+ public void testExecuteDMLUpdates() throws Exception {
+ Method updateAmsConfigs = UpgradeCatalog250.class.getDeclaredMethod("updateAMSConfigs");
+
+ UpgradeCatalog250 upgradeCatalog250 = createMockBuilder(UpgradeCatalog250.class)
+ .addMockedMethod(updateAmsConfigs)
+ .createMock();
+
+ upgradeCatalog250.updateAMSConfigs();
+ expectLastCall().once();
+
+ replay(upgradeCatalog250);
+
+ upgradeCatalog250.executeDMLUpdates();
+
+ verify(upgradeCatalog250);
+ }
+
+ @Test
+ public void testAmsEnvUpdateConfigs() throws Exception{
+
+ Map<String, String> oldPropertiesAmsEnv = new HashMap<String, String>() {
+ {
+ put("content", "\n" +
+ "# AMS Collector heapsize\n" +
+ "export AMS_COLLECTOR_HEAPSIZE={{metrics_collector_heapsize}}\n" +
+ "\n" +
+ "# HBase normalizer enabled\n" +
+ "export AMS_HBASE_NORMALIZER_ENABLED={{ams_hbase_normalizer_enabled}}\n" +
+ "\n" +
+ "# HBase compaction policy enabled\n" +
+ "export HBASE_FIFO_COMPACTION_POLICY_ENABLED={{ams_hbase_fifo_compaction_policy_enabled}}\n" +
+ "\n" +
+ "# HBase Tables Initialization check enabled\n" +
+ "export AMS_HBASE_INIT_CHECK_ENABLED={{ams_hbase_init_check_enabled}}\n");
+ }
+ };
+ Map<String, String> newPropertiesAmsEnv = new HashMap<String, String>() {
+ {
+ put("content", "\n" +
+ "# AMS Collector heapsize\n" +
+ "export AMS_COLLECTOR_HEAPSIZE={{metrics_collector_heapsize}}\n" +
+ "\n" +
+ "# HBase Tables Initialization check enabled\n" +
+ "export AMS_HBASE_INIT_CHECK_ENABLED={{ams_hbase_init_check_enabled}}\n");
+ }
+ };
+ EasyMockSupport easyMockSupport = new EasyMockSupport();
+
+ Clusters clusters = easyMockSupport.createNiceMock(Clusters.class);
+ final Cluster cluster = easyMockSupport.createNiceMock(Cluster.class);
+ Config mockAmsEnv = easyMockSupport.createNiceMock(Config.class);
+
+ expect(clusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
+ put("normal", cluster);
+ }}).once();
+ expect(cluster.getDesiredConfigByType("ams-env")).andReturn(mockAmsEnv).atLeastOnce();
+ expect(mockAmsEnv.getProperties()).andReturn(oldPropertiesAmsEnv).anyTimes();
+
+ Injector injector = easyMockSupport.createNiceMock(Injector.class);
+ expect(injector.getInstance(Gson.class)).andReturn(null).anyTimes();
+ expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(null).anyTimes();
+ expect(injector.getInstance(KerberosHelper.class)).andReturn(createNiceMock(KerberosHelper.class)).anyTimes();
+
+ replay(injector, clusters, mockAmsEnv, cluster);
+
+ AmbariManagementControllerImpl controller = createMockBuilder(AmbariManagementControllerImpl.class)
+ .addMockedMethod("createConfiguration")
+ .addMockedMethod("getClusters", new Class[] { })
+ .addMockedMethod("createConfig")
+ .withConstructor(createNiceMock(ActionManager.class), clusters, injector)
+ .createNiceMock();
+
+ Injector injector2 = easyMockSupport.createNiceMock(Injector.class);
+ Capture<Map> propertiesCapture = EasyMock.newCapture();
+
+ expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
+ expect(controller.getClusters()).andReturn(clusters).anyTimes();
+ expect(controller.createConfig(anyObject(Cluster.class), anyString(), capture(propertiesCapture), anyString(),
+ anyObject(Map.class))).andReturn(createNiceMock(Config.class)).once();
+
+ replay(controller, injector2);
+ new UpgradeCatalog250(injector2).updateAMSConfigs();
+ easyMockSupport.verifyAll();
+
+ Map<String, String> updatedProperties = propertiesCapture.getValue();
+ assertTrue(Maps.difference(newPropertiesAmsEnv, updatedProperties).areEqual());
+ }
}