You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by sw...@apache.org on 2015/04/03 00:00:46 UTC

[1/7] ambari git commit: AMBARI-10290. Expose avaialble host metrics across hostcomponents. Support HostComponent host metrics. (swagle)

Repository: ambari
Updated Branches:
  refs/heads/trunk 81f311baf -> 23faabc4a


http://git-wip-us.apache.org/repos/asf/ambari/blob/23faabc4/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/RestMetricsPropertyProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/RestMetricsPropertyProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/RestMetricsPropertyProviderTest.java
index c50ccba..b6af863 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/RestMetricsPropertyProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/RestMetricsPropertyProviderTest.java
@@ -72,14 +72,14 @@ public class RestMetricsPropertyProviderTest {
     metricsProperties.put("port_config_type", "storm-site");
     metricsProperties.put("port_property_name", "storm.port");
     metricsProperties.put("protocol", "http");
-    componentMetrics.put("metrics/api/cluster/summary/tasks.total", new Metric("/api/cluster/summary##tasks.total", false, false));
-    componentMetrics.put("metrics/api/cluster/summary/slots.total", new Metric("/api/cluster/summary##slots.total", false, false));
-    componentMetrics.put("metrics/api/cluster/summary/slots.free", new Metric("/api/cluster/summary##slots.free", false, false));
-    componentMetrics.put("metrics/api/cluster/summary/supervisors", new Metric("/api/cluster/summary##supervisors", false, false));
-    componentMetrics.put("metrics/api/cluster/summary/executors.total", new Metric("/api/cluster/summary##executors.total", false, false));
-    componentMetrics.put("metrics/api/cluster/summary/slots.used", new Metric("/api/cluster/summary##slots.used", false, false));
-    componentMetrics.put("metrics/api/cluster/summary/topologies", new Metric("/api/cluster/summary##topologies", false, false));
-    componentMetrics.put("metrics/api/cluster/summary/nimbus.uptime", new Metric("/api/cluster/summary##nimbus.uptime", false, false));
+    componentMetrics.put("metrics/api/cluster/summary/tasks.total", new Metric("/api/cluster/summary##tasks.total", false, false, false));
+    componentMetrics.put("metrics/api/cluster/summary/slots.total", new Metric("/api/cluster/summary##slots.total", false, false, false));
+    componentMetrics.put("metrics/api/cluster/summary/slots.free", new Metric("/api/cluster/summary##slots.free", false, false, false));
+    componentMetrics.put("metrics/api/cluster/summary/supervisors", new Metric("/api/cluster/summary##supervisors", false, false, false));
+    componentMetrics.put("metrics/api/cluster/summary/executors.total", new Metric("/api/cluster/summary##executors.total", false, false, false));
+    componentMetrics.put("metrics/api/cluster/summary/slots.used", new Metric("/api/cluster/summary##slots.used", false, false, false));
+    componentMetrics.put("metrics/api/cluster/summary/topologies", new Metric("/api/cluster/summary##topologies", false, false, false));
+    componentMetrics.put("metrics/api/cluster/summary/nimbus.uptime", new Metric("/api/cluster/summary##nimbus.uptime", false, false, false));
   }
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/23faabc4/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/ganglia/TestStreamProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/ganglia/TestStreamProvider.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/ganglia/TestStreamProvider.java
index f6ac02b..07fa7c7 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/ganglia/TestStreamProvider.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/ganglia/TestStreamProvider.java
@@ -24,8 +24,8 @@ import java.io.IOException;
 import java.io.InputStream;
 
 public class TestStreamProvider implements StreamProvider {
-
-  private final String fileName;
+  // Allow for filename to be set at runtime
+  protected String fileName;
   private String lastSpec;
   private boolean isLastSpecUpdated;
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/23faabc4/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/timeline/AMSPropertyProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/timeline/AMSPropertyProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/timeline/AMSPropertyProviderTest.java
index 4e2ed98..052f86c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/timeline/AMSPropertyProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/timeline/AMSPropertyProviderTest.java
@@ -33,6 +33,8 @@ import org.junit.Assert;
 import org.junit.Test;
 
 import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -46,6 +48,7 @@ import static org.mockito.Mockito.mock;
 public class AMSPropertyProviderTest {
   private static final String PROPERTY_ID1 = PropertyHelper.getPropertyId("metrics/cpu", "cpu_user");
   private static final String PROPERTY_ID2 = PropertyHelper.getPropertyId("metrics/memory", "mem_free");
+  private static final String PROPERTY_ID3 = PropertyHelper.getPropertyId("metrics/dfs/datanode", "blocks_replicated");
   private static final String CLUSTER_NAME_PROPERTY_ID = PropertyHelper.getPropertyId("HostRoles", "cluster_name");
   private static final String HOST_NAME_PROPERTY_ID = PropertyHelper.getPropertyId("HostRoles", "host_name");
   private static final String COMPONENT_NAME_PROPERTY_ID = PropertyHelper.getPropertyId("HostRoles", "component_name");
@@ -98,8 +101,7 @@ public class AMSPropertyProviderTest {
   }
 
   @Test
-  public void testPopulateResourcesForSingleHostMetricPointInTime() throws
-    Exception {
+  public void testPopulateResourcesForSingleHostMetricPointInTime() throws Exception {
 
     // given
     TestStreamProvider streamProvider = new TestStreamProvider(SINGLE_HOST_METRICS_FILE_PATH);
@@ -176,7 +178,6 @@ public class AMSPropertyProviderTest {
     uriBuilder2.addParameter("metricNames", "mem_free,cpu_user");
     uriBuilder2.addParameter("hostname", "h1");
     uriBuilder2.addParameter("appId", "HOST");
-    System.out.println(streamProvider.getLastSpec());
     Assert.assertTrue(uriBuilder.toString().equals(streamProvider.getLastSpec())
         || uriBuilder2.toString().equals(streamProvider.getLastSpec()));
     Double val1 = (Double) res.getPropertyValue(PROPERTY_ID1);
@@ -185,7 +186,6 @@ public class AMSPropertyProviderTest {
     Assert.assertEquals(2.47025664E8, val2, 0.1);
   }
 
-
   @Test
   public void testPopulateResourcesForMultipleHostMetrics() throws Exception {
     TestStreamProvider streamProvider = new TestStreamProvider(MULTIPLE_HOST_METRICS_FILE_PATH);
@@ -236,7 +236,6 @@ public class AMSPropertyProviderTest {
     Assert.assertEquals(86, val.length);
   }
 
-
   @Test
   public void testPopulateResourcesForRegexpMetrics() throws Exception {
     TestStreamProvider streamProvider = new TestStreamProvider(MULTIPLE_COMPONENT_REGEXP_METRICS_FILE_PATH);
@@ -369,6 +368,103 @@ public class AMSPropertyProviderTest {
     Assert.assertEquals(188, val.length);
   }
 
+  static class TestStreamProviderForHostComponentHostMetricsTest extends TestStreamProvider {
+    String hostMetricFilePath = FILE_PATH_PREFIX + "single_host_metric.json";
+    String hostComponentMetricFilePath = FILE_PATH_PREFIX + "single_host_component_metrics.json";
+    Set<String> specs = new HashSet<String>();
+
+    public TestStreamProviderForHostComponentHostMetricsTest(String fileName) {
+      super(fileName);
+    }
+
+    @Override
+    public InputStream readFrom(String spec) throws IOException {
+      if (spec.contains("HOST")) {
+        this.fileName = hostMetricFilePath;
+      } else {
+        this.fileName = hostComponentMetricFilePath;
+      }
+
+      specs.add(spec);
+
+      return super.readFrom(spec);
+    }
+
+    public Set<String> getAllSpecs() {
+      return specs;
+    }
+  }
+
+  @Test
+  public void testPopulateResourcesForHostComponentHostMetrics() throws Exception {
+    TestStreamProviderForHostComponentHostMetricsTest streamProvider =
+      new TestStreamProviderForHostComponentHostMetricsTest(null);
+    TestMetricHostProvider metricHostProvider = new TestMetricHostProvider();
+    ComponentSSLConfiguration sslConfiguration = mock(ComponentSSLConfiguration.class);
+
+    Map<String, Map<String, PropertyInfo>> propertyIds = PropertyHelper.getMetricPropertyIds(Resource.Type.HostComponent);
+    AMSPropertyProvider propertyProvider = new AMSHostComponentPropertyProvider(
+      propertyIds,
+      streamProvider,
+      sslConfiguration,
+      metricHostProvider,
+      CLUSTER_NAME_PROPERTY_ID,
+      HOST_NAME_PROPERTY_ID,
+      COMPONENT_NAME_PROPERTY_ID
+    );
+
+    Resource resource = new ResourceImpl(Resource.Type.Host);
+    resource.setProperty(HOST_NAME_PROPERTY_ID, "h1");
+    resource.setProperty(COMPONENT_NAME_PROPERTY_ID, "DATANODE");
+    Map<String, TemporalInfo> temporalInfoMap = new HashMap<String, TemporalInfo>();
+    temporalInfoMap.put(PROPERTY_ID1, new TemporalInfoImpl(1416445244701L, 1416445251802L, 1L));
+    temporalInfoMap.put(PROPERTY_ID3, new TemporalInfoImpl(1416445244701L, 1416445251802L, 1L));
+    Request request = PropertyHelper.getReadRequest(
+      new HashSet<String>() {{ add(PROPERTY_ID1); add(PROPERTY_ID3); }},
+      temporalInfoMap);
+    Set<Resource> resources =
+      propertyProvider.populateResources(Collections.singleton(resource), request, null);
+    Assert.assertEquals(1, resources.size());
+    Resource res = resources.iterator().next();
+    Map<String, Object> properties = PropertyHelper.getProperties(resources.iterator().next());
+    Assert.assertNotNull(properties);
+
+    Set<String> specs = streamProvider.getAllSpecs();
+    Assert.assertEquals(2, specs.size());
+    String hostMetricSpec = null;
+    String hostComponentMetricsSpec = null;
+    for (String spec : specs) {
+      if (spec.contains("HOST")) {
+        hostMetricSpec = spec;
+      } else {
+        hostComponentMetricsSpec = spec;
+      }
+    }
+    Assert.assertNotNull(hostMetricSpec);
+    Assert.assertNotNull(hostComponentMetricsSpec);
+    // Verify calls
+    URIBuilder uriBuilder1 = AMSPropertyProvider.getAMSUriBuilder("localhost", 8188);
+    uriBuilder1.addParameter("metricNames", "dfs.datanode.BlocksReplicated");
+    uriBuilder1.addParameter("hostname", "h1");
+    uriBuilder1.addParameter("appId", "DATANODE");
+    uriBuilder1.addParameter("startTime", "1416445244701");
+    uriBuilder1.addParameter("endTime", "1416445251802");
+    Assert.assertEquals(uriBuilder1.toString(), hostComponentMetricsSpec);
+
+    URIBuilder uriBuilder2 = AMSPropertyProvider.getAMSUriBuilder("localhost", 8188);
+    uriBuilder2.addParameter("metricNames", "cpu_user");
+    uriBuilder2.addParameter("hostname", "h1");
+    uriBuilder2.addParameter("appId", "HOST");
+    uriBuilder2.addParameter("startTime", "1416445244701");
+    uriBuilder2.addParameter("endTime", "1416445251802");
+    Assert.assertEquals(uriBuilder2.toString(), hostMetricSpec);
+
+    Number[][] val = (Number[][]) res.getPropertyValue(PROPERTY_ID1);
+    Assert.assertEquals(111, val.length);
+    val = (Number[][]) res.getPropertyValue(PROPERTY_ID3);
+    Assert.assertEquals(8, val.length);
+  }
+
   public static class TestMetricHostProvider implements MetricHostProvider {
 
     @Override

http://git-wip-us.apache.org/repos/asf/ambari/blob/23faabc4/ambari-server/src/test/resources/ams/single_host_component_metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/ams/single_host_component_metrics.json b/ambari-server/src/test/resources/ams/single_host_component_metrics.json
new file mode 100644
index 0000000..f3e6554
--- /dev/null
+++ b/ambari-server/src/test/resources/ams/single_host_component_metrics.json
@@ -0,0 +1,21 @@
+{
+  "metrics": [
+  {
+    "timestamp": 1416445244801,
+    "type": "Long",
+    "metricname": "dfs.datanode.BlocksReplicated",
+    "appid": "datanode",
+    "hostname": "c6402.ambari.apache.org",
+    "starttime": 1416445244801,
+    "metrics": {
+      "1416445244801": 0.0,
+      "1416445245801": 0.0,
+      "1416445246801": 0.0,
+      "1416445247801": 0.0,
+      "1416445248801": 0.0,
+      "1416445249801": 0.0,
+      "1416445250801": 0.0,
+      "1416445251801": 0.0
+    }
+  }
+]}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/23faabc4/ambari-server/src/test/resources/ams/single_host_metric.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/ams/single_host_metric.json b/ambari-server/src/test/resources/ams/single_host_metric.json
index e8b7c14..83f2cdf 100644
--- a/ambari-server/src/test/resources/ams/single_host_metric.json
+++ b/ambari-server/src/test/resources/ams/single_host_metric.json
@@ -1,122 +1,124 @@
-{"metrics": [
-    {
-        "timestamp": 1416445244801,
-        "metricname": "cpu_user",
-        "appid": "HOST",
-        "starttime": 1416445244801,
-        "metrics": {
-          "1416445244801": 4006.085,
-          "1416445259801": 4006.29,
-          "1416445274801": 4006.6475,
-          "1416445289801": 4006.9474999999998,
-          "1416445304801": 4007.1025,
-          "1416445319801": 4007.21,
-          "1416445364801": 4007.8375,
-          "1416445379801": 4008.0425000000005,
-          "1416445394801": 4008.37,
-          "1416445409801": 4008.9474999999998,
-          "1416445424801": 4009.08,
-          "1416445439801": 4009.185,
-          "1416445484801": 4009.9525,
-          "1416445499801": 4010.19,
-          "1416445514801": 4010.8725,
-          "1416445529801": 4011.3424999999997,
-          "1416445544801": 4011.4975000000004,
-          "1416445559801": 4011.665,
-          "1416445604801": 4012.57,
-          "1416445619801": 4012.9325,
-          "1416445634801": 4013.3424999999997,
-          "1416445649801": 4013.9225,
-          "1416445664801": 4014.1949999999997,
-          "1416445679801": 4014.58,
-          "1416445724801": 4015.73,
-          "1416445739801": 4015.8599999999997,
-          "1416445964801": 4020.1075,
-          "1416445979801": 4020.2975,
-          "1416445994801": 4020.4624999999996,
-          "1416446009801": 4020.825,
-          "1416446024801": 4020.96,
-          "1416446084801": 4021.9725,
-          "1416446099801": 4022.2,
-          "1416446114801": 4022.3775,
-          "1416446129801": 4022.7275,
-          "1416446144801": 4022.98,
-          "1416446204801": 4023.9425,
-          "1416446219801": 4024.245,
-          "1416446234801": 4024.5275,
-          "1416446249801": 4024.925,
-          "1416446264801": 4025.03,
-          "1416446324801": 4026.2075,
-          "1416446339801": 4026.4125,
-          "1416446354801": 4026.5275,
-          "1416446369801": 4026.915,
-          "1416446384801": 4027.0550000000003,
-          "1416446444801": 4028.0975,
-          "1416446459801": 4028.2799999999997,
-          "1416446474801": 4028.4275,
-          "1416446489801": 4028.8375,
-          "1416446504801": 4028.98,
-          "1416446564801": 4030.005,
-          "1416446579801": 4030.1800000000003,
-          "1416446594801": 4030.365,
-          "1416446609801": 4030.745,
-          "1416446624801": 4030.895,
-          "1416446684801": 4031.7825,
-          "1416446699801": 4032.0175,
-          "1416446714801": 4032.2025,
-          "1416446729801": 4032.5525,
-          "1416446744801": 4032.82,
-          "1416446804801": 4033.7475,
-          "1416446819801": 4033.9700000000003,
-          "1416446834801": 4034.1425,
-          "1416446849801": 4034.4725,
-          "1416446864801": 4034.635,
-          "1416446924801": 4035.54,
-          "1416446939801": 4035.7175,
-          "1416446954801": 4035.885,
-          "1416446969801": 4036.255,
-          "1416446984801": 4036.435,
-          "1416447044801": 4037.8475,
-          "1416447059801": 4038.045,
-          "1416447074801": 4038.2025,
-          "1416447089801": 4038.7,
-          "1416447104801": 4038.855,
-          "1416447164801": 4039.7375,
-          "1416447179801": 4039.9025,
-          "1416447194801": 4040.08,
-          "1416447209801": 4040.42,
-          "1416447224801": 4040.59,
-          "1416447284801": 4041.51,
-          "1416447299801": 4041.85,
-          "1416447314801": 4042.6525,
-          "1416447329801": 4043.1025,
-          "1416447344801": 4043.29,
-          "1416447404801": 4044.265,
-          "1416447419801": 4044.52,
-          "1416447434801": 4044.705,
-          "1416447449801": 4045.09,
-          "1416447464801": 4045.325,
-          "1416447524801": 4046.2525,
-          "1416447539801": 4046.3975,
-          "1416447554801": 4046.55,
-          "1416447569801": 4046.865,
-          "1416447584801": 4047.0550000000003,
-          "1416447644801": 4049.255,
-          "1416448681363": 4105.1125,
-          "1416448696363": 4105.3625,
-          "1416448711363": 4105.6875,
-          "1416448726363": 4105.842500000001,
-          "1416448741363": 4105.9574999999995,
-          "1416448756363": 4106.07,
-          "1416448801363": 4106.74,
-          "1416448816363": 4106.9175,
-          "1416448831363": 4107.2,
-          "1416448846363": 4107.3525,
-          "1416448861363": 4107.545,
-          "1416448876363": 4107.65,
-          "1416448921363": 4109.2625,
-          "1416448936363": 4109.4,
-          "1416448936464": 4108.2
-        }
+{
+  "metrics": [
+  {
+    "timestamp": 1416445244801,
+    "metricname": "cpu_user",
+    "appid": "HOST",
+    "hostname": "c6402.ambari.apache.org",
+    "starttime": 1416445244801,
+    "metrics": {
+      "1416445244801": 4006.085,
+      "1416445259801": 4006.29,
+      "1416445274801": 4006.6475,
+      "1416445289801": 4006.9474999999998,
+      "1416445304801": 4007.1025,
+      "1416445319801": 4007.21,
+      "1416445364801": 4007.8375,
+      "1416445379801": 4008.0425000000005,
+      "1416445394801": 4008.37,
+      "1416445409801": 4008.9474999999998,
+      "1416445424801": 4009.08,
+      "1416445439801": 4009.185,
+      "1416445484801": 4009.9525,
+      "1416445499801": 4010.19,
+      "1416445514801": 4010.8725,
+      "1416445529801": 4011.3424999999997,
+      "1416445544801": 4011.4975000000004,
+      "1416445559801": 4011.665,
+      "1416445604801": 4012.57,
+      "1416445619801": 4012.9325,
+      "1416445634801": 4013.3424999999997,
+      "1416445649801": 4013.9225,
+      "1416445664801": 4014.1949999999997,
+      "1416445679801": 4014.58,
+      "1416445724801": 4015.73,
+      "1416445739801": 4015.8599999999997,
+      "1416445964801": 4020.1075,
+      "1416445979801": 4020.2975,
+      "1416445994801": 4020.4624999999996,
+      "1416446009801": 4020.825,
+      "1416446024801": 4020.96,
+      "1416446084801": 4021.9725,
+      "1416446099801": 4022.2,
+      "1416446114801": 4022.3775,
+      "1416446129801": 4022.7275,
+      "1416446144801": 4022.98,
+      "1416446204801": 4023.9425,
+      "1416446219801": 4024.245,
+      "1416446234801": 4024.5275,
+      "1416446249801": 4024.925,
+      "1416446264801": 4025.03,
+      "1416446324801": 4026.2075,
+      "1416446339801": 4026.4125,
+      "1416446354801": 4026.5275,
+      "1416446369801": 4026.915,
+      "1416446384801": 4027.0550000000003,
+      "1416446444801": 4028.0975,
+      "1416446459801": 4028.2799999999997,
+      "1416446474801": 4028.4275,
+      "1416446489801": 4028.8375,
+      "1416446504801": 4028.98,
+      "1416446564801": 4030.005,
+      "1416446579801": 4030.1800000000003,
+      "1416446594801": 4030.365,
+      "1416446609801": 4030.745,
+      "1416446624801": 4030.895,
+      "1416446684801": 4031.7825,
+      "1416446699801": 4032.0175,
+      "1416446714801": 4032.2025,
+      "1416446729801": 4032.5525,
+      "1416446744801": 4032.82,
+      "1416446804801": 4033.7475,
+      "1416446819801": 4033.9700000000003,
+      "1416446834801": 4034.1425,
+      "1416446849801": 4034.4725,
+      "1416446864801": 4034.635,
+      "1416446924801": 4035.54,
+      "1416446939801": 4035.7175,
+      "1416446954801": 4035.885,
+      "1416446969801": 4036.255,
+      "1416446984801": 4036.435,
+      "1416447044801": 4037.8475,
+      "1416447059801": 4038.045,
+      "1416447074801": 4038.2025,
+      "1416447089801": 4038.7,
+      "1416447104801": 4038.855,
+      "1416447164801": 4039.7375,
+      "1416447179801": 4039.9025,
+      "1416447194801": 4040.08,
+      "1416447209801": 4040.42,
+      "1416447224801": 4040.59,
+      "1416447284801": 4041.51,
+      "1416447299801": 4041.85,
+      "1416447314801": 4042.6525,
+      "1416447329801": 4043.1025,
+      "1416447344801": 4043.29,
+      "1416447404801": 4044.265,
+      "1416447419801": 4044.52,
+      "1416447434801": 4044.705,
+      "1416447449801": 4045.09,
+      "1416447464801": 4045.325,
+      "1416447524801": 4046.2525,
+      "1416447539801": 4046.3975,
+      "1416447554801": 4046.55,
+      "1416447569801": 4046.865,
+      "1416447584801": 4047.0550000000003,
+      "1416447644801": 4049.255,
+      "1416448681363": 4105.1125,
+      "1416448696363": 4105.3625,
+      "1416448711363": 4105.6875,
+      "1416448726363": 4105.842500000001,
+      "1416448741363": 4105.9574999999995,
+      "1416448756363": 4106.07,
+      "1416448801363": 4106.74,
+      "1416448816363": 4106.9175,
+      "1416448831363": 4107.2,
+      "1416448846363": 4107.3525,
+      "1416448861363": 4107.545,
+      "1416448876363": 4107.65,
+      "1416448921363": 4109.2625,
+      "1416448936363": 4109.4,
+      "1416448936464": 4108.2
     }
+  }
 ]}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/23faabc4/ambari-server/src/test/resources/stacks/OTHER/1.0/services/HDFS/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/OTHER/1.0/services/HDFS/metrics.json b/ambari-server/src/test/resources/stacks/OTHER/1.0/services/HDFS/metrics.json
index bb9dca9..0040926 100644
--- a/ambari-server/src/test/resources/stacks/OTHER/1.0/services/HDFS/metrics.json
+++ b/ambari-server/src/test/resources/stacks/OTHER/1.0/services/HDFS/metrics.json
@@ -2777,7 +2777,8 @@
             "metrics/dfs/datanode/heartBeats_avg_time": {
               "metric": "dfs.datanode.HeartbeatsAvgTime",
               "pointInTime": true,
-              "temporal": true
+              "temporal": true,
+              "amsHostMetric":true
             },
             "metrics/rpc/closeRegion_num_ops": {
               "metric": "rpc.rpc.closeRegion_num_ops",


[6/7] ambari git commit: AMBARI-10290. Expose avaialble host metrics across hostcomponents. (swagle)

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineClusterMetricReader.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineClusterMetricReader.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineClusterMetricReader.java
deleted file mode 100644
index ea0913e..0000000
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineClusterMetricReader.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
-
-import java.sql.ResultSet;
-import java.sql.SQLException;
-
-public class TimelineClusterMetricReader {
-
-  private boolean ignoreInstance;
-
-  public TimelineClusterMetricReader(boolean ignoreInstance) {
-    this.ignoreInstance = ignoreInstance;
-  }
-
-  public TimelineClusterMetric fromResultSet(ResultSet rs)
-    throws SQLException {
-
-    return new TimelineClusterMetric(
-      rs.getString("METRIC_NAME"),
-      rs.getString("APP_ID"),
-      ignoreInstance ? null : rs.getString("INSTANCE_ID"),
-      rs.getLong("SERVER_TIME"),
-      rs.getString("UNITS"));
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricAggregator.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricAggregator.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricAggregator.java
deleted file mode 100644
index 459e612..0000000
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricAggregator.java
+++ /dev/null
@@ -1,145 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
-import java.io.IOException;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.util.HashMap;
-import java.util.Map;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.Condition;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.DefaultCondition;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.GET_METRIC_AGGREGATE_ONLY_SQL;
-
-public class TimelineMetricAggregator extends AbstractTimelineAggregator {
-  private static final Log LOG = LogFactory.getLog
-    (TimelineMetricAggregator.class);
-
-  private final String checkpointLocation;
-  private final Long sleepIntervalMillis;
-  private final Integer checkpointCutOffMultiplier;
-  private final String hostAggregatorDisabledParam;
-  private final String tableName;
-  private final String outputTableName;
-  private final Long nativeTimeRangeDelay;
-
-  public TimelineMetricAggregator(PhoenixHBaseAccessor hBaseAccessor,
-                                  Configuration metricsConf,
-                                  String checkpointLocation,
-                                  Long sleepIntervalMillis,
-                                  Integer checkpointCutOffMultiplier,
-                                  String hostAggregatorDisabledParam,
-                                  String tableName,
-                                  String outputTableName,
-                                  Long nativeTimeRangeDelay) {
-    super(hBaseAccessor, metricsConf);
-    this.checkpointLocation = checkpointLocation;
-    this.sleepIntervalMillis = sleepIntervalMillis;
-    this.checkpointCutOffMultiplier = checkpointCutOffMultiplier;
-    this.hostAggregatorDisabledParam = hostAggregatorDisabledParam;
-    this.tableName = tableName;
-    this.outputTableName = outputTableName;
-    this.nativeTimeRangeDelay =  nativeTimeRangeDelay;
-  }
-
-  @Override
-  protected String getCheckpointLocation() {
-    return checkpointLocation;
-  }
-
-  @Override
-  protected void aggregate(ResultSet rs, long startTime, long endTime)
-    throws IOException, SQLException {
-    Map<TimelineMetric, MetricHostAggregate> hostAggregateMap =
-      aggregateMetricsFromResultSet(rs);
-
-    LOG.info("Saving " + hostAggregateMap.size() + " metric aggregates.");
-    hBaseAccessor.saveHostAggregateRecords(hostAggregateMap,
-      outputTableName);
-  }
-
-  @Override
-  protected Condition prepareMetricQueryCondition(long startTime, long endTime) {
-    Condition condition = new DefaultCondition(null, null, null, null, startTime,
-      endTime, null, null, true);
-    condition.setNoLimit();
-    condition.setFetchSize(resultsetFetchSize);
-    condition.setStatement(String.format(GET_METRIC_AGGREGATE_ONLY_SQL,
-      PhoenixTransactSQL.getNaiveTimeRangeHint(startTime, nativeTimeRangeDelay),
-      tableName));
-    condition.addOrderByColumn("METRIC_NAME");
-    condition.addOrderByColumn("HOSTNAME");
-    condition.addOrderByColumn("APP_ID");
-    condition.addOrderByColumn("INSTANCE_ID");
-    condition.addOrderByColumn("SERVER_TIME");
-    return condition;
-  }
-
-  private Map<TimelineMetric, MetricHostAggregate> aggregateMetricsFromResultSet
-      (ResultSet rs) throws IOException, SQLException {
-    TimelineMetric existingMetric = null;
-    MetricHostAggregate hostAggregate = null;
-    Map<TimelineMetric, MetricHostAggregate> hostAggregateMap =
-      new HashMap<TimelineMetric, MetricHostAggregate>();
-
-    while (rs.next()) {
-      TimelineMetric currentMetric =
-        PhoenixHBaseAccessor.getTimelineMetricKeyFromResultSet(rs);
-      MetricHostAggregate currentHostAggregate =
-        PhoenixHBaseAccessor.getMetricHostAggregateFromResultSet(rs);
-
-      if (existingMetric == null) {
-        // First row
-        existingMetric = currentMetric;
-        hostAggregate = new MetricHostAggregate();
-        hostAggregateMap.put(currentMetric, hostAggregate);
-      }
-
-      if (existingMetric.equalsExceptTime(currentMetric)) {
-        // Recalculate totals with current metric
-        hostAggregate.updateAggregates(currentHostAggregate);
-      } else {
-        // Switched over to a new metric - save existing - create new aggregate
-        hostAggregate = new MetricHostAggregate();
-        hostAggregate.updateAggregates(currentHostAggregate);
-        hostAggregateMap.put(currentMetric, hostAggregate);
-        existingMetric = currentMetric;
-      }
-    }
-    return hostAggregateMap;
-  }
-
-  @Override
-  protected Long getSleepIntervalMillis() {
-    return sleepIntervalMillis;
-  }
-
-  @Override
-  protected Integer getCheckpointCutOffMultiplier() {
-    return checkpointCutOffMultiplier;
-  }
-
-  @Override
-  protected boolean isDisabled() {
-    return metricsConf.getBoolean(hostAggregatorDisabledParam, false);
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricAggregatorFactory.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricAggregatorFactory.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricAggregatorFactory.java
deleted file mode 100644
index 1dfd3e6..0000000
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricAggregatorFactory.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
-
-import org.apache.commons.io.FilenameUtils;
-import org.apache.hadoop.conf.Configuration;
-import static java.util.concurrent.TimeUnit.SECONDS;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.METRICS_AGGREGATE_HOURLY_TABLE_NAME;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.METRICS_AGGREGATE_MINUTE_TABLE_NAME;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.METRICS_RECORD_TABLE_NAME;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.DEFAULT_CHECKPOINT_LOCATION;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.HOST_AGGREGATOR_HOUR_CHECKPOINT_CUTOFF_MULTIPLIER;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.HOST_AGGREGATOR_HOUR_DISABLED;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.HOST_AGGREGATOR_HOUR_SLEEP_INTERVAL;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.HOST_AGGREGATOR_MINUTE_CHECKPOINT_CUTOFF_MULTIPLIER;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.HOST_AGGREGATOR_MINUTE_DISABLED;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.HOST_AGGREGATOR_MINUTE_SLEEP_INTERVAL;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.TIMELINE_METRICS_AGGREGATOR_CHECKPOINT_DIR;
-
-/**
- *
- */
-public class TimelineMetricAggregatorFactory {
-  private static final String MINUTE_AGGREGATE_CHECKPOINT_FILE =
-    "timeline-metrics-host-aggregator-checkpoint";
-  private static final String MINUTE_AGGREGATE_HOURLY_CHECKPOINT_FILE =
-    "timeline-metrics-host-aggregator-hourly-checkpoint";
-
-  public static TimelineMetricAggregator createTimelineMetricAggregatorMinute
-    (PhoenixHBaseAccessor hBaseAccessor, Configuration metricsConf) {
-
-    String checkpointDir = metricsConf.get(
-      TIMELINE_METRICS_AGGREGATOR_CHECKPOINT_DIR, DEFAULT_CHECKPOINT_LOCATION);
-    String checkpointLocation = FilenameUtils.concat(checkpointDir,
-      MINUTE_AGGREGATE_CHECKPOINT_FILE);
-    long sleepIntervalMillis = SECONDS.toMillis(metricsConf.getLong
-      (HOST_AGGREGATOR_MINUTE_SLEEP_INTERVAL, 300l));  // 5 mins
-
-    int checkpointCutOffMultiplier = metricsConf.getInt
-      (HOST_AGGREGATOR_MINUTE_CHECKPOINT_CUTOFF_MULTIPLIER, 3);
-    String hostAggregatorDisabledParam = HOST_AGGREGATOR_MINUTE_DISABLED;
-
-    String inputTableName = METRICS_RECORD_TABLE_NAME;
-    String outputTableName = METRICS_AGGREGATE_MINUTE_TABLE_NAME;
-
-    return new TimelineMetricAggregator(hBaseAccessor, metricsConf,
-      checkpointLocation,
-      sleepIntervalMillis,
-      checkpointCutOffMultiplier,
-      hostAggregatorDisabledParam,
-      inputTableName,
-      outputTableName,
-      120000l);
-  }
-
-  public static TimelineMetricAggregator createTimelineMetricAggregatorHourly
-    (PhoenixHBaseAccessor hBaseAccessor, Configuration metricsConf) {
-
-    String checkpointDir = metricsConf.get(
-      TIMELINE_METRICS_AGGREGATOR_CHECKPOINT_DIR, DEFAULT_CHECKPOINT_LOCATION);
-    String checkpointLocation = FilenameUtils.concat(checkpointDir,
-      MINUTE_AGGREGATE_HOURLY_CHECKPOINT_FILE);
-    long sleepIntervalMillis = SECONDS.toMillis(metricsConf.getLong
-      (HOST_AGGREGATOR_HOUR_SLEEP_INTERVAL, 3600l));
-
-    int checkpointCutOffMultiplier = metricsConf.getInt
-      (HOST_AGGREGATOR_HOUR_CHECKPOINT_CUTOFF_MULTIPLIER, 2);
-    String hostAggregatorDisabledParam = HOST_AGGREGATOR_HOUR_DISABLED;
-
-    String inputTableName = METRICS_AGGREGATE_MINUTE_TABLE_NAME;
-    String outputTableName = METRICS_AGGREGATE_HOURLY_TABLE_NAME;
-
-    return new TimelineMetricAggregator(hBaseAccessor, metricsConf,
-      checkpointLocation,
-      sleepIntervalMillis,
-      checkpointCutOffMultiplier,
-      hostAggregatorDisabledParam,
-      inputTableName,
-      outputTableName,
-      3600000l);
-  }
-
-
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricClusterAggregator.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricClusterAggregator.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricClusterAggregator.java
deleted file mode 100644
index f595d5e..0000000
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricClusterAggregator.java
+++ /dev/null
@@ -1,223 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
-
-
-import org.apache.commons.io.FilenameUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
-import java.io.IOException;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import static java.util.concurrent.TimeUnit.SECONDS;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.Condition;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.DefaultCondition;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.GET_METRIC_SQL;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.METRICS_RECORD_TABLE_NAME;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.NATIVE_TIME_RANGE_DELTA;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.CLUSTER_AGGREGATOR_MINUTE_CHECKPOINT_CUTOFF_MULTIPLIER;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.CLUSTER_AGGREGATOR_MINUTE_DISABLED;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.CLUSTER_AGGREGATOR_MINUTE_SLEEP_INTERVAL;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.CLUSTER_AGGREGATOR_TIMESLICE_INTERVAL;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.DEFAULT_CHECKPOINT_LOCATION;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.TIMELINE_METRICS_AGGREGATOR_CHECKPOINT_DIR;
-
-/**
- * Aggregates a metric across all hosts in the cluster. Reads metrics from
- * the precision table and saves into the aggregate.
- */
-public class TimelineMetricClusterAggregator extends AbstractTimelineAggregator {
-  private static final Log LOG = LogFactory.getLog(TimelineMetricClusterAggregator.class);
-  private static final String CLUSTER_AGGREGATOR_CHECKPOINT_FILE =
-    "timeline-metrics-cluster-aggregator-checkpoint";
-  private final String checkpointLocation;
-  private final Long sleepIntervalMillis;
-  public final int timeSliceIntervalMillis;
-  private final Integer checkpointCutOffMultiplier;
-  private TimelineMetricReader timelineMetricReader =
-    new TimelineMetricReader(true);
-
-  public TimelineMetricClusterAggregator(PhoenixHBaseAccessor hBaseAccessor,
-                                         Configuration metricsConf) {
-    super(hBaseAccessor, metricsConf);
-
-    String checkpointDir = metricsConf.get(
-      TIMELINE_METRICS_AGGREGATOR_CHECKPOINT_DIR, DEFAULT_CHECKPOINT_LOCATION);
-
-    checkpointLocation = FilenameUtils.concat(checkpointDir,
-      CLUSTER_AGGREGATOR_CHECKPOINT_FILE);
-
-    sleepIntervalMillis = SECONDS.toMillis(metricsConf.getLong
-      (CLUSTER_AGGREGATOR_MINUTE_SLEEP_INTERVAL, 120l));
-    timeSliceIntervalMillis = (int)SECONDS.toMillis(metricsConf.getInt
-      (CLUSTER_AGGREGATOR_TIMESLICE_INTERVAL, 15));
-    checkpointCutOffMultiplier =
-      metricsConf.getInt(CLUSTER_AGGREGATOR_MINUTE_CHECKPOINT_CUTOFF_MULTIPLIER, 2);
-  }
-
-  @Override
-  protected String getCheckpointLocation() {
-    return checkpointLocation;
-  }
-
-  @Override
-  protected void aggregate(ResultSet rs, long startTime, long endTime)
-    throws SQLException, IOException {
-    List<Long[]> timeSlices = getTimeSlices(startTime, endTime);
-    Map<TimelineClusterMetric, MetricClusterAggregate>
-      aggregateClusterMetrics = aggregateMetricsFromResultSet(rs, timeSlices);
-
-    LOG.info("Saving " + aggregateClusterMetrics.size() + " metric aggregates.");
-    hBaseAccessor.saveClusterAggregateRecords(aggregateClusterMetrics);
-  }
-
-  @Override
-  protected Condition prepareMetricQueryCondition(long startTime, long endTime) {
-    Condition condition = new DefaultCondition(null, null, null, null, startTime,
-      endTime, null, null, true);
-    condition.setNoLimit();
-    condition.setFetchSize(resultsetFetchSize);
-    condition.setStatement(String.format(GET_METRIC_SQL,
-      PhoenixTransactSQL.getNaiveTimeRangeHint(startTime, NATIVE_TIME_RANGE_DELTA),
-      METRICS_RECORD_TABLE_NAME));
-    condition.addOrderByColumn("METRIC_NAME");
-    condition.addOrderByColumn("APP_ID");
-    condition.addOrderByColumn("INSTANCE_ID");
-    condition.addOrderByColumn("SERVER_TIME");
-    return condition;
-  }
-
-  private List<Long[]> getTimeSlices(long startTime, long endTime) {
-    List<Long[]> timeSlices = new ArrayList<Long[]>();
-    long sliceStartTime = startTime;
-    while (sliceStartTime < endTime) {
-      timeSlices.add(new Long[] { sliceStartTime, sliceStartTime + timeSliceIntervalMillis});
-      sliceStartTime += timeSliceIntervalMillis;
-    }
-    return timeSlices;
-  }
-
-  private Map<TimelineClusterMetric, MetricClusterAggregate>
-  aggregateMetricsFromResultSet(ResultSet rs, List<Long[]> timeSlices)
-    throws SQLException, IOException {
-    Map<TimelineClusterMetric, MetricClusterAggregate> aggregateClusterMetrics =
-      new HashMap<TimelineClusterMetric, MetricClusterAggregate>();
-    // Create time slices
-
-    while (rs.next()) {
-      TimelineMetric metric =
-        timelineMetricReader.getTimelineMetricFromResultSet(rs);
-
-      Map<TimelineClusterMetric, Double> clusterMetrics =
-        sliceFromTimelineMetric(metric, timeSlices);
-
-      if (clusterMetrics != null && !clusterMetrics.isEmpty()) {
-        for (Map.Entry<TimelineClusterMetric, Double> clusterMetricEntry :
-            clusterMetrics.entrySet()) {
-          TimelineClusterMetric clusterMetric = clusterMetricEntry.getKey();
-          MetricClusterAggregate aggregate = aggregateClusterMetrics.get(clusterMetric);
-          Double avgValue = clusterMetricEntry.getValue();
-
-          if (aggregate == null) {
-            aggregate = new MetricClusterAggregate(avgValue, 1, null,
-              avgValue, avgValue);
-            aggregateClusterMetrics.put(clusterMetric, aggregate);
-          } else {
-            aggregate.updateSum(avgValue);
-            aggregate.updateNumberOfHosts(1);
-            aggregate.updateMax(avgValue);
-            aggregate.updateMin(avgValue);
-          }
-        }
-      }
-    }
-    return aggregateClusterMetrics;
-  }
-
-  @Override
-  protected Long getSleepIntervalMillis() {
-    return sleepIntervalMillis;
-  }
-
-  @Override
-  protected Integer getCheckpointCutOffMultiplier() {
-    return checkpointCutOffMultiplier;
-  }
-
-  @Override
-  protected boolean isDisabled() {
-    return metricsConf.getBoolean(CLUSTER_AGGREGATOR_MINUTE_DISABLED, false);
-  }
-
-  private Map<TimelineClusterMetric, Double> sliceFromTimelineMetric(
-        TimelineMetric timelineMetric, List<Long[]> timeSlices) {
-
-    if (timelineMetric.getMetricValues().isEmpty()) {
-      return null;
-    }
-
-    Map<TimelineClusterMetric, Double> timelineClusterMetricMap =
-      new HashMap<TimelineClusterMetric, Double>();
-
-    for (Map.Entry<Long, Double> metric : timelineMetric.getMetricValues().entrySet()) {
-      // TODO: investigate null values - pre filter
-      if (metric.getValue() == null) {
-        continue;
-      }
-      Long timestamp = getSliceTimeForMetric(timeSlices,
-                       Long.parseLong(metric.getKey().toString()));
-      if (timestamp != -1) {
-        // Metric is within desired time range
-        TimelineClusterMetric clusterMetric = new TimelineClusterMetric(
-          timelineMetric.getMetricName(),
-          timelineMetric.getAppId(),
-          timelineMetric.getInstanceId(),
-          timestamp,
-          timelineMetric.getType());
-        if (!timelineClusterMetricMap.containsKey(clusterMetric)) {
-          timelineClusterMetricMap.put(clusterMetric, metric.getValue());
-        } else {
-          Double oldValue = timelineClusterMetricMap.get(clusterMetric);
-          Double newValue = (oldValue + metric.getValue()) / 2;
-          timelineClusterMetricMap.put(clusterMetric, newValue);
-        }
-      }
-    }
-
-    return timelineClusterMetricMap;
-  }
-
-  /**
-   * Return beginning of the time slice into which the metric fits.
-   */
-  private Long getSliceTimeForMetric(List<Long[]> timeSlices, Long timestamp) {
-    for (Long[] timeSlice : timeSlices) {
-      if (timestamp >= timeSlice[0] && timestamp < timeSlice[1]) {
-        return timeSlice[0];
-      }
-    }
-    return -1l;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricClusterAggregatorHourly.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricClusterAggregatorHourly.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricClusterAggregatorHourly.java
deleted file mode 100644
index c76ec60..0000000
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricClusterAggregatorHourly.java
+++ /dev/null
@@ -1,177 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics
-  .timeline;
-
-import org.apache.commons.io.FilenameUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import java.io.IOException;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.util.HashMap;
-import java.util.Map;
-import static java.util.concurrent.TimeUnit.SECONDS;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixHBaseAccessor.getMetricClusterAggregateFromResultSet;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.Condition;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.DefaultCondition;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.GET_CLUSTER_AGGREGATE_SQL;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.METRICS_CLUSTER_AGGREGATE_HOURLY_TABLE_NAME;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.METRICS_CLUSTER_AGGREGATE_TABLE_NAME;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.CLUSTER_AGGREGATOR_HOUR_CHECKPOINT_CUTOFF_INTERVAL;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.CLUSTER_AGGREGATOR_HOUR_CHECKPOINT_CUTOFF_MULTIPLIER;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.CLUSTER_AGGREGATOR_HOUR_DISABLED;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.CLUSTER_AGGREGATOR_HOUR_SLEEP_INTERVAL;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.DEFAULT_CHECKPOINT_LOCATION;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.TIMELINE_METRICS_AGGREGATOR_CHECKPOINT_DIR;
-
-public class TimelineMetricClusterAggregatorHourly extends
-  AbstractTimelineAggregator {
-  private static final Log LOG = LogFactory.getLog
-    (TimelineMetricClusterAggregatorHourly.class);
-  private static final String CLUSTER_AGGREGATOR_HOURLY_CHECKPOINT_FILE =
-    "timeline-metrics-cluster-aggregator-hourly-checkpoint";
-  private final String checkpointLocation;
-  private final long sleepIntervalMillis;
-  private final Integer checkpointCutOffMultiplier;
-  private long checkpointCutOffIntervalMillis;
-  private static final Long NATIVE_TIME_RANGE_DELTA = 3600000l; // 1 hour
-  private final TimelineClusterMetricReader timelineClusterMetricReader
-     = new TimelineClusterMetricReader(true);
-
-  public TimelineMetricClusterAggregatorHourly(
-    PhoenixHBaseAccessor hBaseAccessor, Configuration metricsConf) {
-    super(hBaseAccessor, metricsConf);
-
-    String checkpointDir = metricsConf.get(
-      TIMELINE_METRICS_AGGREGATOR_CHECKPOINT_DIR, DEFAULT_CHECKPOINT_LOCATION);
-
-    checkpointLocation = FilenameUtils.concat(checkpointDir,
-      CLUSTER_AGGREGATOR_HOURLY_CHECKPOINT_FILE);
-
-    sleepIntervalMillis = SECONDS.toMillis(metricsConf.getLong
-      (CLUSTER_AGGREGATOR_HOUR_SLEEP_INTERVAL, 3600l));
-    checkpointCutOffIntervalMillis =  SECONDS.toMillis(metricsConf.getLong
-      (CLUSTER_AGGREGATOR_HOUR_CHECKPOINT_CUTOFF_INTERVAL, 7200l));
-    checkpointCutOffMultiplier = metricsConf.getInt
-      (CLUSTER_AGGREGATOR_HOUR_CHECKPOINT_CUTOFF_MULTIPLIER, 2);
-  }
-
-  @Override
-  protected String getCheckpointLocation() {
-    return checkpointLocation;
-  }
-
-  @Override
-  protected void aggregate(ResultSet rs, long startTime, long endTime)
-    throws SQLException, IOException {
-      Map<TimelineClusterMetric, MetricHostAggregate> hostAggregateMap =
-        aggregateMetricsFromResultSet(rs);
-
-    LOG.info("Saving " + hostAggregateMap.size() + " metric aggregates.");
-    hBaseAccessor.saveClusterAggregateHourlyRecords(hostAggregateMap,
-      METRICS_CLUSTER_AGGREGATE_HOURLY_TABLE_NAME);
-  }
-
-  @Override
-  protected Condition prepareMetricQueryCondition(long startTime,
-                                                  long endTime) {
-    Condition condition = new DefaultCondition(null, null, null, null, startTime,
-      endTime, null, null, true);
-    condition.setNoLimit();
-    condition.setFetchSize(resultsetFetchSize);
-    condition.setStatement(String.format(GET_CLUSTER_AGGREGATE_SQL,
-      PhoenixTransactSQL.getNaiveTimeRangeHint(startTime, NATIVE_TIME_RANGE_DELTA),
-        METRICS_CLUSTER_AGGREGATE_TABLE_NAME));
-    condition.addOrderByColumn("METRIC_NAME");
-    condition.addOrderByColumn("APP_ID");
-    condition.addOrderByColumn("INSTANCE_ID");
-    condition.addOrderByColumn("SERVER_TIME");
-    return condition;
-  }
-
-  private Map<TimelineClusterMetric, MetricHostAggregate>
-  aggregateMetricsFromResultSet(ResultSet rs) throws IOException, SQLException {
-
-    TimelineClusterMetric existingMetric = null;
-    MetricHostAggregate hostAggregate = null;
-    Map<TimelineClusterMetric, MetricHostAggregate> hostAggregateMap =
-      new HashMap<TimelineClusterMetric, MetricHostAggregate>();
-
-    while (rs.next()) {
-      TimelineClusterMetric currentMetric =
-        timelineClusterMetricReader.fromResultSet(rs);
-      MetricClusterAggregate currentHostAggregate =
-        getMetricClusterAggregateFromResultSet(rs);
-
-      if (existingMetric == null) {
-        // First row
-        existingMetric = currentMetric;
-        hostAggregate = new MetricHostAggregate();
-        hostAggregateMap.put(currentMetric, hostAggregate);
-      }
-
-      if (existingMetric.equalsExceptTime(currentMetric)) {
-        // Recalculate totals with current metric
-        updateAggregatesFromHost(hostAggregate, currentHostAggregate);
-
-      } else {
-        // Switched over to a new metric - save existing
-        hostAggregate = new MetricHostAggregate();
-        updateAggregatesFromHost(hostAggregate, currentHostAggregate);
-        hostAggregateMap.put(currentMetric, hostAggregate);
-        existingMetric = currentMetric;
-      }
-
-    }
-
-    return hostAggregateMap;
-  }
-
-  private void updateAggregatesFromHost(
-    MetricHostAggregate agg,
-    MetricClusterAggregate currentClusterAggregate) {
-    agg.updateMax(currentClusterAggregate.getMax());
-    agg.updateMin(currentClusterAggregate.getMin());
-    agg.updateSum(currentClusterAggregate.getSum());
-    agg.updateNumberOfSamples(currentClusterAggregate.getNumberOfHosts());
-  }
-
-  @Override
-  protected Long getSleepIntervalMillis() {
-    return sleepIntervalMillis;
-  }
-
-  @Override
-  protected Integer getCheckpointCutOffMultiplier() {
-    return checkpointCutOffMultiplier;
-  }
-
-  @Override
-  protected Long getCheckpointCutOffIntervalMillis() {
-    return checkpointCutOffIntervalMillis;
-  }
-
-  @Override
-  protected boolean isDisabled() {
-    return metricsConf.getBoolean(CLUSTER_AGGREGATOR_HOUR_DISABLED, false);
-  }
-
-
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
index 248894c..b72aa64 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
 
+import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -128,6 +129,11 @@ public class TimelineMetricConfiguration {
   public static final String TIMELINE_SERVICE_RPC_ADDRESS =
     "timeline.metrics.service.rpc.address";
 
+  public static final String CLUSTER_AGGREGATOR_APP_IDS =
+    "timeline.metrics.service.cluster.aggregator.appIds";
+
+  public static final String HOST_APP_ID = "HOST";
+
   private Configuration hbaseConf;
   private Configuration metricsConf;
   private volatile boolean isInitialized = false;

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricReader.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricReader.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricReader.java
deleted file mode 100644
index aa349f0..0000000
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricReader.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
-
-
-import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
-
-import java.io.IOException;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.util.Map;
-import java.util.TreeMap;
-
-public class TimelineMetricReader {
-
-  private boolean ignoreInstance = false;
-
-  public TimelineMetricReader() {}
-
-  public TimelineMetricReader(boolean ignoreInstance) {
-    this.ignoreInstance = ignoreInstance;
-  }
-
-  public TimelineMetric getTimelineMetricFromResultSet(ResultSet rs)
-    throws SQLException, IOException {
-    TimelineMetric metric = getTimelineMetricCommonsFromResultSet(rs);
-    Map<Long, Double> sortedByTimeMetrics = new TreeMap<Long, Double>(
-        PhoenixHBaseAccessor.readMetricFromJSON(rs.getString("METRICS")));
-    metric.setMetricValues(sortedByTimeMetrics);
-    return metric;
-  }
-
-  /**
-   * Returns common part of timeline metrics record without the values.
-   */
-  public TimelineMetric getTimelineMetricCommonsFromResultSet(ResultSet rs)
-    throws SQLException {
-    TimelineMetric metric = new TimelineMetric();
-    metric.setMetricName(rs.getString("METRIC_NAME"));
-    metric.setAppId(rs.getString("APP_ID"));
-    if (!ignoreInstance) metric.setInstanceId(rs.getString("INSTANCE_ID"));
-    metric.setHostName(rs.getString("HOSTNAME"));
-    metric.setTimestamp(rs.getLong("SERVER_TIME"));
-    metric.setStartTime(rs.getLong("START_TIME"));
-    metric.setType(rs.getString("UNITS"));
-    return metric;
-  }
-
-}
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/AbstractTimelineAggregator.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/AbstractTimelineAggregator.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/AbstractTimelineAggregator.java
new file mode 100644
index 0000000..8dea46e
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/AbstractTimelineAggregator.java
@@ -0,0 +1,270 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixHBaseAccessor;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.Condition;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL;
+import org.apache.hadoop.yarn.util.Clock;
+import org.apache.hadoop.yarn.util.SystemClock;
+import java.io.File;
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.Date;
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.AGGREGATOR_CHECKPOINT_DELAY;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.RESULTSET_FETCH_SIZE;
+
+public abstract class AbstractTimelineAggregator implements Runnable {
+  protected final PhoenixHBaseAccessor hBaseAccessor;
+  private final Log LOG;
+
+  private Clock clock;
+  protected final long checkpointDelayMillis;
+  protected final Integer resultsetFetchSize;
+  protected Configuration metricsConf;
+
+  public AbstractTimelineAggregator(PhoenixHBaseAccessor hBaseAccessor,
+                                    Configuration metricsConf) {
+    this(hBaseAccessor, metricsConf, new SystemClock());
+  }
+
+  public AbstractTimelineAggregator(PhoenixHBaseAccessor hBaseAccessor,
+                                    Configuration metricsConf, Clock clk) {
+    this.hBaseAccessor = hBaseAccessor;
+    this.metricsConf = metricsConf;
+    this.checkpointDelayMillis = SECONDS.toMillis(
+      metricsConf.getInt(AGGREGATOR_CHECKPOINT_DELAY, 120));
+    this.resultsetFetchSize = metricsConf.getInt(RESULTSET_FETCH_SIZE, 2000);
+    this.LOG = LogFactory.getLog(this.getClass());
+    this.clock = clk;
+  }
+
+  @Override
+  public void run() {
+    LOG.info("Started Timeline aggregator thread @ " + new Date());
+    Long SLEEP_INTERVAL = getSleepIntervalMillis();
+
+    while (true) {
+      long sleepTime = runOnce(SLEEP_INTERVAL);
+
+      try {
+        Thread.sleep(sleepTime);
+      } catch (InterruptedException e) {
+        LOG.info("Sleep interrupted, continuing with aggregation.");
+      }
+    }
+  }
+
+  /**
+   * Access relaxed for tests
+   */
+  public long runOnce(Long SLEEP_INTERVAL) {
+    long currentTime = clock.getTime();
+    long lastCheckPointTime = readLastCheckpointSavingOnFirstRun(currentTime);
+    long sleepTime = SLEEP_INTERVAL;
+
+    if (lastCheckPointTime != -1) {
+      LOG.info("Last check point time: " + lastCheckPointTime + ", lagBy: "
+        + ((clock.getTime() - lastCheckPointTime) / 1000)
+        + " seconds.");
+
+      long startTime = clock.getTime();
+      boolean success = doWork(lastCheckPointTime,
+        lastCheckPointTime + SLEEP_INTERVAL);
+      long executionTime = clock.getTime() - startTime;
+      long delta = SLEEP_INTERVAL - executionTime;
+
+      if (delta > 0) {
+        // Sleep for (configured sleep - time to execute task)
+        sleepTime = delta;
+      } else {
+        // No sleep because last run took too long to execute
+        LOG.info("Aggregator execution took too long, " +
+          "cancelling sleep. executionTime = " + executionTime);
+        sleepTime = 1;
+      }
+
+      LOG.debug("Aggregator sleep interval = " + sleepTime);
+
+      if (success) {
+        try {
+          // Comment to bug fix:
+          // cannot just save lastCheckPointTime + SLEEP_INTERVAL,
+          // it has to be verified so it is not a time in the future
+          // checkpoint says what was aggregated, and there is no way
+          // the future metrics were aggregated!
+          saveCheckPoint(Math.min(currentTime, lastCheckPointTime +
+            SLEEP_INTERVAL));
+        } catch (IOException io) {
+          LOG.warn("Error saving checkpoint, restarting aggregation at " +
+            "previous checkpoint.");
+        }
+      }
+    }
+
+    return sleepTime;
+  }
+
+  private long readLastCheckpointSavingOnFirstRun(long currentTime) {
+    long lastCheckPointTime = -1;
+
+    try {
+      lastCheckPointTime = readCheckPoint();
+      if (isLastCheckPointTooOld(lastCheckPointTime)) {
+        LOG.warn("Last Checkpoint is too old, discarding last checkpoint. " +
+          "lastCheckPointTime = " + lastCheckPointTime);
+        lastCheckPointTime = -1;
+      }
+      if (lastCheckPointTime == -1) {
+        // Assuming first run, save checkpoint and sleep.
+        // Set checkpoint to 2 minutes in the past to allow the
+        // agents/collectors to catch up
+        LOG.info("Saving checkpoint time on first run." +
+          (currentTime - checkpointDelayMillis));
+        saveCheckPoint(currentTime - checkpointDelayMillis);
+      }
+    } catch (IOException io) {
+      LOG.warn("Unable to write last checkpoint time. Resuming sleep.", io);
+    }
+    return lastCheckPointTime;
+  }
+
+  private boolean isLastCheckPointTooOld(long checkpoint) {
+    // first checkpoint is saved checkpointDelayMillis in the past,
+    // so here we also need to take it into account
+    return checkpoint != -1 &&
+      ((clock.getTime() - checkpoint - checkpointDelayMillis) >
+        getCheckpointCutOffIntervalMillis());
+  }
+
+  protected long readCheckPoint() {
+    try {
+      File checkpoint = new File(getCheckpointLocation());
+      if (checkpoint.exists()) {
+        String contents = FileUtils.readFileToString(checkpoint);
+        if (contents != null && !contents.isEmpty()) {
+          return Long.parseLong(contents);
+        }
+      }
+    } catch (IOException io) {
+      LOG.debug(io);
+    }
+    return -1;
+  }
+
+  protected void saveCheckPoint(long checkpointTime) throws IOException {
+    File checkpoint = new File(getCheckpointLocation());
+    if (!checkpoint.exists()) {
+      boolean done = checkpoint.createNewFile();
+      if (!done) {
+        throw new IOException("Could not create checkpoint at location, " +
+          getCheckpointLocation());
+      }
+    }
+    FileUtils.writeStringToFile(checkpoint, String.valueOf(checkpointTime));
+  }
+
+  /**
+   * Read metrics written during the time interval and save the sum and total
+   * in the aggregate table.
+   *
+   * @param startTime Sample start time
+   * @param endTime Sample end time
+   */
+  public boolean doWork(long startTime, long endTime) {
+    LOG.info("Start aggregation cycle @ " + new Date() + ", " +
+      "startTime = " + new Date(startTime) + ", endTime = " + new Date(endTime));
+
+    boolean success = true;
+    Condition condition = prepareMetricQueryCondition(startTime, endTime);
+
+    Connection conn = null;
+    PreparedStatement stmt = null;
+    ResultSet rs = null;
+
+    try {
+      conn = hBaseAccessor.getConnection();
+      // FLUME 2. aggregate and ignore the instance
+      stmt = PhoenixTransactSQL.prepareGetMetricsSqlStmt(conn, condition);
+
+      LOG.debug("Query issued @: " + new Date());
+      rs = stmt.executeQuery();
+      LOG.debug("Query returned @: " + new Date());
+
+      aggregate(rs, startTime, endTime);
+      LOG.info("End aggregation cycle @ " + new Date());
+
+    } catch (SQLException e) {
+      LOG.error("Exception during aggregating metrics.", e);
+      success = false;
+    } catch (IOException e) {
+      LOG.error("Exception during aggregating metrics.", e);
+      success = false;
+    } finally {
+      if (rs != null) {
+        try {
+          rs.close();
+        } catch (SQLException e) {
+          // Ignore
+        }
+      }
+      if (stmt != null) {
+        try {
+          stmt.close();
+        } catch (SQLException e) {
+          // Ignore
+        }
+      }
+      if (conn != null) {
+        try {
+          conn.close();
+        } catch (SQLException sql) {
+          // Ignore
+        }
+      }
+    }
+
+    LOG.info("End aggregation cycle @ " + new Date());
+    return success;
+  }
+
+  protected abstract Condition prepareMetricQueryCondition(long startTime, long endTime);
+
+  protected abstract void aggregate(ResultSet rs, long startTime, long endTime)
+    throws IOException, SQLException;
+
+  protected abstract Long getSleepIntervalMillis();
+
+  protected abstract Integer getCheckpointCutOffMultiplier();
+
+  protected Long getCheckpointCutOffIntervalMillis() {
+    return getCheckpointCutOffMultiplier() * getSleepIntervalMillis();
+  }
+
+  public abstract boolean isDisabled();
+
+  protected abstract String getCheckpointLocation();
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/AggregatorUtils.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/AggregatorUtils.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/AggregatorUtils.java
new file mode 100644
index 0000000..ce79b6f
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/AggregatorUtils.java
@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators;
+
+
+import java.util.Map;
+
+/**
+ *
+ */
+public class AggregatorUtils {
+
+  public static double[] calculateAggregates(Map<Long, Double> metricValues) {
+    double[] values = new double[4];
+    double max = Double.MIN_VALUE;
+    double min = Double.MAX_VALUE;
+    double sum = 0.0;
+    int metricCount = 0;
+
+    if (metricValues != null && !metricValues.isEmpty()) {
+      for (Double value : metricValues.values()) {
+        // TODO: Some nulls in data - need to investigate null values from host
+        if (value != null) {
+          if (value > max) {
+            max = value;
+          }
+          if (value < min) {
+            min = value;
+          }
+          sum += value;
+        }
+      }
+      metricCount = metricValues.values().size();
+    }
+    // BR: WHY ZERO is a good idea?
+    values[0] = sum;
+    values[1] = max != Double.MIN_VALUE ? max : 0.0;
+    values[2] = min != Double.MAX_VALUE ? min : 0.0;
+    values[3] = metricCount;
+
+    return values;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/Function.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/Function.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/Function.java
new file mode 100644
index 0000000..9d17fca
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/Function.java
@@ -0,0 +1,169 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators;
+
+/**
+ * Is used to determine metrics aggregate table.
+ *
+ * @see org.apache.hadoop.yarn.server.applicationhistoryservice.webapp.TimelineWebServices#getTimelineMetric
+ * @see org.apache.hadoop.yarn.server.applicationhistoryservice.webapp.TimelineWebServices#getTimelineMetrics
+ */
+public class Function {
+  public static Function DEFAULT_VALUE_FUNCTION =
+    new Function(ReadFunction.VALUE, null);
+  private static final String SUFFIX_SEPARATOR = "\\._";
+
+  private ReadFunction readFunction = ReadFunction.VALUE;
+  private PostProcessingFunction postProcessingFunction = null;
+
+  public Function(){
+
+  }
+
+  public Function(ReadFunction readFunction,
+                  PostProcessingFunction ppFunction){
+    if (readFunction!=null){
+      this.readFunction = readFunction ;
+    }
+    this.postProcessingFunction = ppFunction;
+  }
+
+  public static Function fromMetricName(String metricName){
+    // gets postprocessing, and aggregation function
+    // ex. Metric._rate._avg
+    String[] parts = metricName.split(SUFFIX_SEPARATOR);
+
+    ReadFunction readFunction = ReadFunction.VALUE;
+    PostProcessingFunction ppFunction = null;
+
+      if (parts.length == 3) {
+        ppFunction = PostProcessingFunction.getFunction(parts[1]);
+        readFunction = ReadFunction.getFunction(parts[2]);
+      } else if (parts.length == 2) {
+        ppFunction = null;
+        readFunction = ReadFunction.getFunction(parts[1]);
+      }
+
+
+    return new Function(readFunction, ppFunction);
+  }
+
+  public String getSuffix(){
+    return (postProcessingFunction == null)? readFunction.getSuffix() :
+      postProcessingFunction.getSuffix() + readFunction.getSuffix();
+  }
+
+  public ReadFunction getReadFunction() {
+    return readFunction;
+  }
+
+  @Override
+  public String toString() {
+    return "Function{" +
+      "readFunction=" + readFunction +
+      ", postProcessingFunction=" + postProcessingFunction +
+      '}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (!(o instanceof Function)) return false;
+
+    Function function = (Function) o;
+
+    return postProcessingFunction == function.postProcessingFunction
+      && readFunction == function.readFunction;
+
+  }
+
+  @Override
+  public int hashCode() {
+    int result = readFunction.hashCode();
+    result = 31 * result + (postProcessingFunction != null ?
+      postProcessingFunction.hashCode() : 0);
+    return result;
+  }
+
+  public enum PostProcessingFunction {
+    NONE(""),
+    RATE("._rate");
+
+    PostProcessingFunction(String suffix){
+      this.suffix = suffix;
+    }
+
+    private String suffix = "";
+
+    public String getSuffix(){
+      return suffix;
+    }
+
+    public static PostProcessingFunction getFunction(String functionName) throws
+      FunctionFormatException {
+      if (functionName == null) {
+        return NONE;
+      }
+
+      try {
+        return PostProcessingFunction.valueOf(functionName.toUpperCase());
+      } catch (IllegalArgumentException e) {
+        throw new FunctionFormatException("Function should be value, avg, min, " +
+          "max", e);
+      }
+    }
+  }
+
+  public enum ReadFunction {
+    VALUE(""),
+    AVG("._avg"),
+    MIN("._min"),
+    MAX("._max"),
+    SUM("._sum");
+
+    private final String suffix;
+
+    ReadFunction(String suffix){
+      this.suffix = suffix;
+    }
+
+    public String getSuffix() {
+      return suffix;
+    }
+
+    public static ReadFunction getFunction(String functionName) throws
+      FunctionFormatException {
+      if (functionName == null) {
+        return VALUE;
+      }
+      try {
+        return ReadFunction.valueOf(functionName.toUpperCase());
+      } catch (IllegalArgumentException e) {
+        throw new FunctionFormatException(
+          "Function should be value, avg, min, max. Got " + functionName, e);
+      }
+    }
+  }
+
+  public static class FunctionFormatException extends IllegalArgumentException {
+    public FunctionFormatException(String message, Throwable cause) {
+      super(message, cause);
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricAggregate.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricAggregate.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricAggregate.java
new file mode 100644
index 0000000..825ac25
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricAggregate.java
@@ -0,0 +1,110 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators;
+
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.codehaus.jackson.annotate.JsonProperty;
+import org.codehaus.jackson.annotate.JsonSubTypes;
+import org.codehaus.jackson.map.ObjectMapper;
+
+import java.io.IOException;
+
+/**
+*
+*/
+@JsonSubTypes({@JsonSubTypes.Type(value = MetricClusterAggregate.class),
+  @JsonSubTypes.Type(value = MetricHostAggregate.class)})
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public class MetricAggregate {
+  private static final ObjectMapper mapper = new ObjectMapper();
+
+  protected Double sum = 0.0;
+  protected Double deviation;
+  protected Double max = Double.MIN_VALUE;
+  protected Double min = Double.MAX_VALUE;
+
+  public MetricAggregate() {
+  }
+
+  MetricAggregate(Double sum, Double deviation, Double max,
+                  Double min) {
+    this.sum = sum;
+    this.deviation = deviation;
+    this.max = max;
+    this.min = min;
+  }
+
+  public void updateSum(Double sum) {
+    this.sum += sum;
+  }
+
+  public void updateMax(Double max) {
+    if (max > this.max) {
+      this.max = max;
+    }
+  }
+
+  public void updateMin(Double min) {
+    if (min < this.min) {
+      this.min = min;
+    }
+  }
+
+  @JsonProperty("sum")
+  public Double getSum() {
+    return sum;
+  }
+
+  @JsonProperty("deviation")
+  public Double getDeviation() {
+    return deviation;
+  }
+
+  @JsonProperty("max")
+  public Double getMax() {
+    return max;
+  }
+
+  @JsonProperty("min")
+  public Double getMin() {
+    return min;
+  }
+
+  public void setSum(Double sum) {
+    this.sum = sum;
+  }
+
+  public void setDeviation(Double deviation) {
+    this.deviation = deviation;
+  }
+
+  public void setMax(Double max) {
+    this.max = max;
+  }
+
+  public void setMin(Double min) {
+    this.min = min;
+  }
+
+  public String toJSON() throws IOException {
+    return mapper.writeValueAsString(this);
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricClusterAggregate.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricClusterAggregate.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricClusterAggregate.java
new file mode 100644
index 0000000..9c837b6
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricClusterAggregate.java
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators;
+
+
+import org.codehaus.jackson.annotate.JsonCreator;
+import org.codehaus.jackson.annotate.JsonProperty;
+
+/**
+*
+*/
+public class MetricClusterAggregate extends MetricAggregate {
+  private int numberOfHosts;
+
+  @JsonCreator
+  public MetricClusterAggregate() {
+  }
+
+  public MetricClusterAggregate(Double sum, int numberOfHosts, Double deviation,
+                         Double max, Double min) {
+    super(sum, deviation, max, min);
+    this.numberOfHosts = numberOfHosts;
+  }
+
+  @JsonProperty("numberOfHosts")
+  public int getNumberOfHosts() {
+    return numberOfHosts;
+  }
+
+  public void updateNumberOfHosts(int count) {
+    this.numberOfHosts += count;
+  }
+
+  public void setNumberOfHosts(int numberOfHosts) {
+    this.numberOfHosts = numberOfHosts;
+  }
+
+  /**
+   * Find and update min, max and avg for a minute
+   */
+  public void updateAggregates(MetricClusterAggregate hostAggregate) {
+    updateMax(hostAggregate.getMax());
+    updateMin(hostAggregate.getMin());
+    updateSum(hostAggregate.getSum());
+    updateNumberOfHosts(hostAggregate.getNumberOfHosts());
+  }
+
+  @Override
+  public String toString() {
+    return "MetricAggregate{" +
+      "sum=" + sum +
+      ", numberOfHosts=" + numberOfHosts +
+      ", deviation=" + deviation +
+      ", max=" + max +
+      ", min=" + min +
+      '}';
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricHostAggregate.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricHostAggregate.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricHostAggregate.java
new file mode 100644
index 0000000..340ec75
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricHostAggregate.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators;
+
+
+import org.codehaus.jackson.annotate.JsonCreator;
+import org.codehaus.jackson.annotate.JsonProperty;
+
+/**
+ * Represents a collection of minute based aggregation of values for
+ * resolution greater than a minute.
+ */
+public class MetricHostAggregate extends MetricAggregate {
+
+  private long numberOfSamples = 0;
+
+  @JsonCreator
+  public MetricHostAggregate() {
+    super(0.0, 0.0, Double.MIN_VALUE, Double.MAX_VALUE);
+  }
+
+  public MetricHostAggregate(Double sum, int numberOfSamples,
+                             Double deviation,
+                             Double max, Double min) {
+    super(sum, deviation, max, min);
+    this.numberOfSamples = numberOfSamples;
+  }
+
+  @JsonProperty("numberOfSamples")
+  public long getNumberOfSamples() {
+    return numberOfSamples == 0 ? 1 : numberOfSamples;
+  }
+
+  public void updateNumberOfSamples(long count) {
+    this.numberOfSamples += count;
+  }
+
+  public void setNumberOfSamples(long numberOfSamples) {
+    this.numberOfSamples = numberOfSamples;
+  }
+
+  public double getAvg() {
+    return sum / numberOfSamples;
+  }
+
+  /**
+   * Find and update min, max and avg for a minute
+   */
+  public void updateAggregates(MetricHostAggregate hostAggregate) {
+    updateMax(hostAggregate.getMax());
+    updateMin(hostAggregate.getMin());
+    updateSum(hostAggregate.getSum());
+    updateNumberOfSamples(hostAggregate.getNumberOfSamples());
+  }
+
+  @Override
+  public String toString() {
+    return "MetricHostAggregate{" +
+      "sum=" + sum +
+      ", numberOfSamples=" + numberOfSamples +
+      ", deviation=" + deviation +
+      ", max=" + max +
+      ", min=" + min +
+      '}';
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineClusterMetric.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineClusterMetric.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineClusterMetric.java
new file mode 100644
index 0000000..3c30a6f
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineClusterMetric.java
@@ -0,0 +1,97 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators;
+
+public class TimelineClusterMetric {
+  private String metricName;
+  private String appId;
+  private String instanceId;
+  private long timestamp;
+  private String type;
+
+  public TimelineClusterMetric(String metricName, String appId, String instanceId,
+                        long timestamp, String type) {
+    this.metricName = metricName;
+    this.appId = appId;
+    this.instanceId = instanceId;
+    this.timestamp = timestamp;
+    this.type = type;
+  }
+
+  public String getMetricName() {
+    return metricName;
+  }
+
+  public String getAppId() {
+    return appId;
+  }
+
+  public String getInstanceId() {
+    return instanceId;
+  }
+
+  public long getTimestamp() {
+    return timestamp;
+  }
+
+  public String getType() { return type; }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+
+    TimelineClusterMetric that = (TimelineClusterMetric) o;
+
+    if (timestamp != that.timestamp) return false;
+    if (appId != null ? !appId.equals(that.appId) : that.appId != null)
+      return false;
+    if (instanceId != null ? !instanceId.equals(that.instanceId) : that.instanceId != null)
+      return false;
+    if (!metricName.equals(that.metricName)) return false;
+
+    return true;
+  }
+
+  public boolean equalsExceptTime(TimelineClusterMetric metric) {
+    if (!metricName.equals(metric.metricName)) return false;
+    if (!appId.equals(metric.appId)) return false;
+    if (instanceId != null ? !instanceId.equals(metric.instanceId) : metric.instanceId != null)
+      return false;
+
+    return true;
+  }
+  @Override
+  public int hashCode() {
+    int result = metricName.hashCode();
+    result = 31 * result + (appId != null ? appId.hashCode() : 0);
+    result = 31 * result + (instanceId != null ? instanceId.hashCode() : 0);
+    result = 31 * result + (int) (timestamp ^ (timestamp >>> 32));
+    return result;
+  }
+
+  @Override
+  public String toString() {
+    return "TimelineClusterMetric{" +
+      "metricName='" + metricName + '\'' +
+      ", appId='" + appId + '\'' +
+      ", instanceId='" + instanceId + '\'' +
+      ", timestamp=" + timestamp +
+      '}';
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineClusterMetricReader.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineClusterMetricReader.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineClusterMetricReader.java
new file mode 100644
index 0000000..3df88d2
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineClusterMetricReader.java
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators;
+
+import java.sql.ResultSet;
+import java.sql.SQLException;
+
+public class TimelineClusterMetricReader {
+
+  private boolean ignoreInstance;
+
+  public TimelineClusterMetricReader(boolean ignoreInstance) {
+    this.ignoreInstance = ignoreInstance;
+  }
+
+  public TimelineClusterMetric fromResultSet(ResultSet rs)
+    throws SQLException {
+
+    return new TimelineClusterMetric(
+      rs.getString("METRIC_NAME"),
+      rs.getString("APP_ID"),
+      ignoreInstance ? null : rs.getString("INSTANCE_ID"),
+      rs.getLong("SERVER_TIME"),
+      rs.getString("UNITS"));
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricAggregator.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricAggregator.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricAggregator.java
new file mode 100644
index 0000000..a2887ea
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricAggregator.java
@@ -0,0 +1,147 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixHBaseAccessor;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.Condition;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.DefaultCondition;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL;
+import java.io.IOException;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.HashMap;
+import java.util.Map;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.GET_METRIC_AGGREGATE_ONLY_SQL;
+
+public class TimelineMetricAggregator extends AbstractTimelineAggregator {
+  private static final Log LOG = LogFactory.getLog
+    (TimelineMetricAggregator.class);
+
+  private final String checkpointLocation;
+  private final Long sleepIntervalMillis;
+  private final Integer checkpointCutOffMultiplier;
+  private final String hostAggregatorDisabledParam;
+  private final String tableName;
+  private final String outputTableName;
+  private final Long nativeTimeRangeDelay;
+
+  public TimelineMetricAggregator(PhoenixHBaseAccessor hBaseAccessor,
+                                  Configuration metricsConf,
+                                  String checkpointLocation,
+                                  Long sleepIntervalMillis,
+                                  Integer checkpointCutOffMultiplier,
+                                  String hostAggregatorDisabledParam,
+                                  String tableName,
+                                  String outputTableName,
+                                  Long nativeTimeRangeDelay) {
+    super(hBaseAccessor, metricsConf);
+    this.checkpointLocation = checkpointLocation;
+    this.sleepIntervalMillis = sleepIntervalMillis;
+    this.checkpointCutOffMultiplier = checkpointCutOffMultiplier;
+    this.hostAggregatorDisabledParam = hostAggregatorDisabledParam;
+    this.tableName = tableName;
+    this.outputTableName = outputTableName;
+    this.nativeTimeRangeDelay =  nativeTimeRangeDelay;
+  }
+
+  @Override
+  protected String getCheckpointLocation() {
+    return checkpointLocation;
+  }
+
+  @Override
+  protected void aggregate(ResultSet rs, long startTime, long endTime)
+    throws IOException, SQLException {
+    Map<TimelineMetric, MetricHostAggregate> hostAggregateMap =
+      aggregateMetricsFromResultSet(rs);
+
+    LOG.info("Saving " + hostAggregateMap.size() + " metric aggregates.");
+    hBaseAccessor.saveHostAggregateRecords(hostAggregateMap,
+      outputTableName);
+  }
+
+  @Override
+  protected Condition prepareMetricQueryCondition(long startTime, long endTime) {
+    Condition condition = new DefaultCondition(null, null, null, null, startTime,
+      endTime, null, null, true);
+    condition.setNoLimit();
+    condition.setFetchSize(resultsetFetchSize);
+    condition.setStatement(String.format(GET_METRIC_AGGREGATE_ONLY_SQL,
+      PhoenixTransactSQL.getNaiveTimeRangeHint(startTime, nativeTimeRangeDelay),
+      tableName));
+    condition.addOrderByColumn("METRIC_NAME");
+    condition.addOrderByColumn("HOSTNAME");
+    condition.addOrderByColumn("APP_ID");
+    condition.addOrderByColumn("INSTANCE_ID");
+    condition.addOrderByColumn("SERVER_TIME");
+    return condition;
+  }
+
+  private Map<TimelineMetric, MetricHostAggregate> aggregateMetricsFromResultSet
+      (ResultSet rs) throws IOException, SQLException {
+    TimelineMetric existingMetric = null;
+    MetricHostAggregate hostAggregate = null;
+    Map<TimelineMetric, MetricHostAggregate> hostAggregateMap =
+      new HashMap<TimelineMetric, MetricHostAggregate>();
+
+    while (rs.next()) {
+      TimelineMetric currentMetric =
+        PhoenixHBaseAccessor.getTimelineMetricKeyFromResultSet(rs);
+      MetricHostAggregate currentHostAggregate =
+        PhoenixHBaseAccessor.getMetricHostAggregateFromResultSet(rs);
+
+      if (existingMetric == null) {
+        // First row
+        existingMetric = currentMetric;
+        hostAggregate = new MetricHostAggregate();
+        hostAggregateMap.put(currentMetric, hostAggregate);
+      }
+
+      if (existingMetric.equalsExceptTime(currentMetric)) {
+        // Recalculate totals with current metric
+        hostAggregate.updateAggregates(currentHostAggregate);
+      } else {
+        // Switched over to a new metric - save existing - create new aggregate
+        hostAggregate = new MetricHostAggregate();
+        hostAggregate.updateAggregates(currentHostAggregate);
+        hostAggregateMap.put(currentMetric, hostAggregate);
+        existingMetric = currentMetric;
+      }
+    }
+    return hostAggregateMap;
+  }
+
+  @Override
+  protected Long getSleepIntervalMillis() {
+    return sleepIntervalMillis;
+  }
+
+  @Override
+  protected Integer getCheckpointCutOffMultiplier() {
+    return checkpointCutOffMultiplier;
+  }
+
+  @Override
+  public boolean isDisabled() {
+    return metricsConf.getBoolean(hostAggregatorDisabledParam, false);
+  }
+}


[5/7] ambari git commit: AMBARI-10290. Expose avaialble host metrics across hostcomponents. (swagle)

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricAggregatorFactory.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricAggregatorFactory.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricAggregatorFactory.java
new file mode 100644
index 0000000..a0e4e32
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricAggregatorFactory.java
@@ -0,0 +1,98 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators;
+
+import org.apache.commons.io.FilenameUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixHBaseAccessor;
+
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.METRICS_AGGREGATE_HOURLY_TABLE_NAME;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.METRICS_AGGREGATE_MINUTE_TABLE_NAME;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.METRICS_RECORD_TABLE_NAME;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.DEFAULT_CHECKPOINT_LOCATION;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.HOST_AGGREGATOR_HOUR_CHECKPOINT_CUTOFF_MULTIPLIER;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.HOST_AGGREGATOR_HOUR_DISABLED;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.HOST_AGGREGATOR_HOUR_SLEEP_INTERVAL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.HOST_AGGREGATOR_MINUTE_CHECKPOINT_CUTOFF_MULTIPLIER;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.HOST_AGGREGATOR_MINUTE_DISABLED;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.HOST_AGGREGATOR_MINUTE_SLEEP_INTERVAL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.TIMELINE_METRICS_AGGREGATOR_CHECKPOINT_DIR;
+
+public class TimelineMetricAggregatorFactory {
+  private static final String MINUTE_AGGREGATE_CHECKPOINT_FILE =
+    "timeline-metrics-host-aggregator-checkpoint";
+  private static final String MINUTE_AGGREGATE_HOURLY_CHECKPOINT_FILE =
+    "timeline-metrics-host-aggregator-hourly-checkpoint";
+
+  public static TimelineMetricAggregator createTimelineMetricAggregatorMinute
+    (PhoenixHBaseAccessor hBaseAccessor, Configuration metricsConf) {
+
+    String checkpointDir = metricsConf.get(
+      TIMELINE_METRICS_AGGREGATOR_CHECKPOINT_DIR, DEFAULT_CHECKPOINT_LOCATION);
+    String checkpointLocation = FilenameUtils.concat(checkpointDir,
+      MINUTE_AGGREGATE_CHECKPOINT_FILE);
+    long sleepIntervalMillis = SECONDS.toMillis(metricsConf.getLong
+      (HOST_AGGREGATOR_MINUTE_SLEEP_INTERVAL, 300l));  // 5 mins
+
+    int checkpointCutOffMultiplier = metricsConf.getInt
+      (HOST_AGGREGATOR_MINUTE_CHECKPOINT_CUTOFF_MULTIPLIER, 3);
+    String hostAggregatorDisabledParam = HOST_AGGREGATOR_MINUTE_DISABLED;
+
+    String inputTableName = METRICS_RECORD_TABLE_NAME;
+    String outputTableName = METRICS_AGGREGATE_MINUTE_TABLE_NAME;
+
+    return new TimelineMetricAggregator(hBaseAccessor, metricsConf,
+      checkpointLocation,
+      sleepIntervalMillis,
+      checkpointCutOffMultiplier,
+      hostAggregatorDisabledParam,
+      inputTableName,
+      outputTableName,
+      120000l);
+  }
+
+  public static TimelineMetricAggregator createTimelineMetricAggregatorHourly
+    (PhoenixHBaseAccessor hBaseAccessor, Configuration metricsConf) {
+
+    String checkpointDir = metricsConf.get(
+      TIMELINE_METRICS_AGGREGATOR_CHECKPOINT_DIR, DEFAULT_CHECKPOINT_LOCATION);
+    String checkpointLocation = FilenameUtils.concat(checkpointDir,
+      MINUTE_AGGREGATE_HOURLY_CHECKPOINT_FILE);
+    long sleepIntervalMillis = SECONDS.toMillis(metricsConf.getLong
+      (HOST_AGGREGATOR_HOUR_SLEEP_INTERVAL, 3600l));
+
+    int checkpointCutOffMultiplier = metricsConf.getInt
+      (HOST_AGGREGATOR_HOUR_CHECKPOINT_CUTOFF_MULTIPLIER, 2);
+    String hostAggregatorDisabledParam = HOST_AGGREGATOR_HOUR_DISABLED;
+
+    String inputTableName = METRICS_AGGREGATE_MINUTE_TABLE_NAME;
+    String outputTableName = METRICS_AGGREGATE_HOURLY_TABLE_NAME;
+
+    return new TimelineMetricAggregator(hBaseAccessor, metricsConf,
+      checkpointLocation,
+      sleepIntervalMillis,
+      checkpointCutOffMultiplier,
+      hostAggregatorDisabledParam,
+      inputTableName,
+      outputTableName,
+      3600000l);
+  }
+
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricAppAggregator.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricAppAggregator.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricAppAggregator.java
new file mode 100644
index 0000000..0c8ded2
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricAppAggregator.java
@@ -0,0 +1,169 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.CLUSTER_AGGREGATOR_APP_IDS;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.HOST_APP_ID;
+
+/**
+ * Aggregator responsible for providing app level host aggregates. This task
+ * is accomplished without doing a round trip to storage, rather
+ * TimelineMetricClusterAggregators are responsible for lifecycle of
+ * @TimelineMetricAppAggregator and provide the raw data to aggregate.
+ */
+public class TimelineMetricAppAggregator {
+  private static final Log LOG = LogFactory.getLog(TimelineMetricAppAggregator.class);
+  // Lookup to check candidacy of an app
+  private final List<String> appIdsToAggregate;
+  // Map to lookup apps on a host
+  private Map<String, List<String>> hostedAppsMap = new HashMap<String, List<String>>();
+
+  Map<TimelineClusterMetric, MetricClusterAggregate> aggregateClusterMetrics;
+
+  public TimelineMetricAppAggregator(Configuration metricsConf) {
+    appIdsToAggregate = getAppIdsForHostAggregation(metricsConf);
+    LOG.info("AppIds configured for aggregation: " + appIdsToAggregate);
+  }
+
+  /**
+   * Lifecycle method to initialize aggregation cycle.
+   */
+  public void init() {
+    LOG.debug("Initializing aggregation cycle.");
+    aggregateClusterMetrics = new HashMap<TimelineClusterMetric, MetricClusterAggregate>();
+  }
+
+  /**
+   * Lifecycle method to indicate end of aggregation cycle.
+   */
+  public void cleanup() {
+    LOG.debug("Cleanup aggregated data.");
+    aggregateClusterMetrics = null;
+  }
+
+  /**
+   * Useful for resetting apps that no-longer need aggregation without restart.
+   */
+  public void destroy() {
+    LOG.debug("Cleanup aggregated data as well as in-memory state.");
+    aggregateClusterMetrics = null;
+    hostedAppsMap = new HashMap<String, List<String>>();
+  }
+
+  /**
+   * Calculate aggregates if the clusterMetric is a Host metric for recorded
+   * apps that are housed by this host.
+   *
+   * @param clusterMetric @TimelineClusterMetric Host / App metric
+   * @param hostname This is the hostname from which this clusterMetric originated.
+   * @param metricValue The metric value for this metric.
+   */
+  public void processTimelineClusterMetric(TimelineClusterMetric clusterMetric,
+                                           String hostname, Double metricValue) {
+
+    String appId = clusterMetric.getAppId();
+    if (appId == null) {
+      return; // No real use case except tests
+    }
+
+    // If metric is a host metric and host has apps on it
+    if (appId.equalsIgnoreCase(HOST_APP_ID)) {
+      // Candidate metric, update app aggregates
+      if (hostedAppsMap.containsKey(hostname)) {
+        updateAppAggregatesFromHostMetric(clusterMetric, hostname, metricValue);
+      }
+    } else {
+      // Build the hostedapps map if not a host metric
+      // Check app candidacy for host aggregation
+      if (appIdsToAggregate.contains(appId)) {
+        List<String> appIds = hostedAppsMap.get(hostname);
+        if (appIds == null) {
+          appIds = new ArrayList<String>();
+          hostedAppsMap.put(hostname, appIds);
+        }
+        if (!appIds.contains(appId)) {
+          appIds.add(appId);
+          LOG.info("Adding appId to hosted apps: appId = " +
+            clusterMetric.getAppId() + ", hostname = " + hostname);
+        }
+      }
+    }
+  }
+
+  /**
+   * Build a cluster app metric from a host metric
+   */
+  private void updateAppAggregatesFromHostMetric(TimelineClusterMetric clusterMetric,
+                                                 String hostname, Double metricValue) {
+
+    if (aggregateClusterMetrics == null) {
+      LOG.error("Aggregation requested without init call.");
+      return;
+    }
+
+    List<String> apps = hostedAppsMap.get(hostname);
+    for (String appId : apps) {
+      // Add a new cluster aggregate metric if none exists
+      TimelineClusterMetric appTimelineClusterMetric =
+        new TimelineClusterMetric(clusterMetric.getMetricName(),
+          appId,
+          clusterMetric.getInstanceId(),
+          clusterMetric.getTimestamp(),
+          clusterMetric.getType()
+        );
+
+      MetricClusterAggregate clusterAggregate = aggregateClusterMetrics.get(appTimelineClusterMetric);
+
+      if (clusterAggregate == null) {
+        clusterAggregate = new MetricClusterAggregate(metricValue, 1, null, metricValue, metricValue);
+        aggregateClusterMetrics.put(appTimelineClusterMetric, clusterAggregate);
+      } else {
+        clusterAggregate.updateSum(metricValue);
+        clusterAggregate.updateNumberOfHosts(1);
+        clusterAggregate.updateMax(metricValue);
+        clusterAggregate.updateMin(metricValue);
+      }
+
+    }
+  }
+
+  /**
+   * Return current copy of aggregated data.
+   */
+  public Map<TimelineClusterMetric, MetricClusterAggregate> getAggregateClusterMetrics() {
+    return aggregateClusterMetrics;
+  }
+
+  private List<String> getAppIdsForHostAggregation(Configuration metricsConf) {
+    String appIds = metricsConf.get(CLUSTER_AGGREGATOR_APP_IDS);
+    if (!StringUtils.isEmpty(appIds)) {
+      return Arrays.asList(StringUtils.stripAll(appIds.split(",")));
+    }
+    return Collections.emptyList();
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregator.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregator.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregator.java
new file mode 100644
index 0000000..68b2ba9
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregator.java
@@ -0,0 +1,235 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators;
+
+
+import org.apache.commons.io.FilenameUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixHBaseAccessor;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.Condition;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.DefaultCondition;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL;
+import java.io.IOException;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.CLUSTER_AGGREGATOR_MINUTE_CHECKPOINT_CUTOFF_MULTIPLIER;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.CLUSTER_AGGREGATOR_MINUTE_DISABLED;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.CLUSTER_AGGREGATOR_MINUTE_SLEEP_INTERVAL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.CLUSTER_AGGREGATOR_TIMESLICE_INTERVAL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.DEFAULT_CHECKPOINT_LOCATION;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.TIMELINE_METRICS_AGGREGATOR_CHECKPOINT_DIR;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.GET_METRIC_SQL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.METRICS_RECORD_TABLE_NAME;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.NATIVE_TIME_RANGE_DELTA;
+
+/**
+ * Aggregates a metric across all hosts in the cluster. Reads metrics from
+ * the precision table and saves into the aggregate.
+ */
+public class TimelineMetricClusterAggregator extends AbstractTimelineAggregator {
+  private static final Log LOG = LogFactory.getLog(TimelineMetricClusterAggregator.class);
+  private static final String CLUSTER_AGGREGATOR_CHECKPOINT_FILE =
+    "timeline-metrics-cluster-aggregator-checkpoint";
+  private final String checkpointLocation;
+  private final Long sleepIntervalMillis;
+  public final int timeSliceIntervalMillis;
+  private final Integer checkpointCutOffMultiplier;
+  private TimelineMetricReadHelper timelineMetricReadHelper = new TimelineMetricReadHelper(true);
+  // Aggregator to perform app-level aggregates for host metrics
+  private final TimelineMetricAppAggregator appAggregator;
+
+  public TimelineMetricClusterAggregator(PhoenixHBaseAccessor hBaseAccessor,
+                                         Configuration metricsConf) {
+    super(hBaseAccessor, metricsConf);
+
+    String checkpointDir = metricsConf.get(
+      TIMELINE_METRICS_AGGREGATOR_CHECKPOINT_DIR, DEFAULT_CHECKPOINT_LOCATION);
+
+    checkpointLocation = FilenameUtils.concat(checkpointDir,
+      CLUSTER_AGGREGATOR_CHECKPOINT_FILE);
+
+    sleepIntervalMillis = SECONDS.toMillis(metricsConf.getLong
+      (CLUSTER_AGGREGATOR_MINUTE_SLEEP_INTERVAL, 120l));
+    timeSliceIntervalMillis = (int)SECONDS.toMillis(metricsConf.getInt
+      (CLUSTER_AGGREGATOR_TIMESLICE_INTERVAL, 15));
+    checkpointCutOffMultiplier =
+      metricsConf.getInt(CLUSTER_AGGREGATOR_MINUTE_CHECKPOINT_CUTOFF_MULTIPLIER, 2);
+
+    appAggregator = new TimelineMetricAppAggregator(metricsConf);
+  }
+
+  @Override
+  protected String getCheckpointLocation() {
+    return checkpointLocation;
+  }
+
+  @Override
+  protected void aggregate(ResultSet rs, long startTime, long endTime)
+    throws SQLException, IOException {
+    List<Long[]> timeSlices = getTimeSlices(startTime, endTime);
+    // Initialize app aggregates for host metrics
+    appAggregator.init();
+    Map<TimelineClusterMetric, MetricClusterAggregate>
+      aggregateClusterMetrics = aggregateMetricsFromResultSet(rs, timeSlices);
+
+    LOG.info("Saving " + aggregateClusterMetrics.size() + " metric aggregates.");
+    hBaseAccessor.saveClusterAggregateRecords(aggregateClusterMetrics);
+    appAggregator.cleanup();
+  }
+
+  @Override
+  protected Condition prepareMetricQueryCondition(long startTime, long endTime) {
+    Condition condition = new DefaultCondition(null, null, null, null, startTime,
+      endTime, null, null, true);
+    condition.setNoLimit();
+    condition.setFetchSize(resultsetFetchSize);
+    condition.setStatement(String.format(GET_METRIC_SQL,
+      PhoenixTransactSQL.getNaiveTimeRangeHint(startTime, NATIVE_TIME_RANGE_DELTA),
+      METRICS_RECORD_TABLE_NAME));
+    condition.addOrderByColumn("METRIC_NAME");
+    condition.addOrderByColumn("APP_ID");
+    condition.addOrderByColumn("INSTANCE_ID");
+    condition.addOrderByColumn("SERVER_TIME");
+    return condition;
+  }
+
+  private List<Long[]> getTimeSlices(long startTime, long endTime) {
+    List<Long[]> timeSlices = new ArrayList<Long[]>();
+    long sliceStartTime = startTime;
+    while (sliceStartTime < endTime) {
+      timeSlices.add(new Long[] { sliceStartTime, sliceStartTime + timeSliceIntervalMillis});
+      sliceStartTime += timeSliceIntervalMillis;
+    }
+    return timeSlices;
+  }
+
+  private Map<TimelineClusterMetric, MetricClusterAggregate> aggregateMetricsFromResultSet(ResultSet rs, List<Long[]> timeSlices)
+      throws SQLException, IOException {
+    Map<TimelineClusterMetric, MetricClusterAggregate> aggregateClusterMetrics =
+      new HashMap<TimelineClusterMetric, MetricClusterAggregate>();
+    // Create time slices
+
+    while (rs.next()) {
+      TimelineMetric metric = timelineMetricReadHelper.getTimelineMetricFromResultSet(rs);
+
+      Map<TimelineClusterMetric, Double> clusterMetrics =
+        sliceFromTimelineMetric(metric, timeSlices);
+
+      if (clusterMetrics != null && !clusterMetrics.isEmpty()) {
+        for (Map.Entry<TimelineClusterMetric, Double> clusterMetricEntry :
+            clusterMetrics.entrySet()) {
+
+          TimelineClusterMetric clusterMetric = clusterMetricEntry.getKey();
+          Double avgValue = clusterMetricEntry.getValue();
+
+          MetricClusterAggregate aggregate = aggregateClusterMetrics.get(clusterMetric);
+
+          if (aggregate == null) {
+            aggregate = new MetricClusterAggregate(avgValue, 1, null, avgValue, avgValue);
+            aggregateClusterMetrics.put(clusterMetric, aggregate);
+          } else {
+            aggregate.updateSum(avgValue);
+            aggregate.updateNumberOfHosts(1);
+            aggregate.updateMax(avgValue);
+            aggregate.updateMin(avgValue);
+          }
+          // Update app level aggregates
+          appAggregator.processTimelineClusterMetric(clusterMetric,
+            metric.getHostName(), avgValue);
+        }
+      }
+    }
+    // Add app level aggregates to save
+    aggregateClusterMetrics.putAll(appAggregator.getAggregateClusterMetrics());
+    return aggregateClusterMetrics;
+  }
+
+  @Override
+  protected Long getSleepIntervalMillis() {
+    return sleepIntervalMillis;
+  }
+
+  @Override
+  protected Integer getCheckpointCutOffMultiplier() {
+    return checkpointCutOffMultiplier;
+  }
+
+  @Override
+  public boolean isDisabled() {
+    return metricsConf.getBoolean(CLUSTER_AGGREGATOR_MINUTE_DISABLED, false);
+  }
+
+  private Map<TimelineClusterMetric, Double> sliceFromTimelineMetric(
+        TimelineMetric timelineMetric, List<Long[]> timeSlices) {
+
+    if (timelineMetric.getMetricValues().isEmpty()) {
+      return null;
+    }
+
+    Map<TimelineClusterMetric, Double> timelineClusterMetricMap =
+      new HashMap<TimelineClusterMetric, Double>();
+
+    for (Map.Entry<Long, Double> metric : timelineMetric.getMetricValues().entrySet()) {
+      // TODO: investigate null values - pre filter
+      if (metric.getValue() == null) {
+        continue;
+      }
+      Long timestamp = getSliceTimeForMetric(timeSlices,
+                       Long.parseLong(metric.getKey().toString()));
+      if (timestamp != -1) {
+        // Metric is within desired time range
+        TimelineClusterMetric clusterMetric = new TimelineClusterMetric(
+          timelineMetric.getMetricName(),
+          timelineMetric.getAppId(),
+          timelineMetric.getInstanceId(),
+          timestamp,
+          timelineMetric.getType());
+        if (!timelineClusterMetricMap.containsKey(clusterMetric)) {
+          timelineClusterMetricMap.put(clusterMetric, metric.getValue());
+        } else {
+          Double oldValue = timelineClusterMetricMap.get(clusterMetric);
+          Double newValue = (oldValue + metric.getValue()) / 2;
+          timelineClusterMetricMap.put(clusterMetric, newValue);
+        }
+      }
+    }
+
+    return timelineClusterMetricMap;
+  }
+
+  /**
+   * Return beginning of the time slice into which the metric fits.
+   */
+  private Long getSliceTimeForMetric(List<Long[]> timeSlices, Long timestamp) {
+    for (Long[] timeSlice : timeSlices) {
+      if (timestamp >= timeSlice[0] && timestamp < timeSlice[1]) {
+        return timeSlice[0];
+      }
+    }
+    return -1l;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorHourly.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorHourly.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorHourly.java
new file mode 100644
index 0000000..264e4e6
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorHourly.java
@@ -0,0 +1,175 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators;
+
+import org.apache.commons.io.FilenameUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixHBaseAccessor;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.Condition;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.DefaultCondition;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL;
+import java.io.IOException;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.HashMap;
+import java.util.Map;
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixHBaseAccessor.getMetricClusterAggregateFromResultSet;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.GET_CLUSTER_AGGREGATE_SQL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.METRICS_CLUSTER_AGGREGATE_HOURLY_TABLE_NAME;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.METRICS_CLUSTER_AGGREGATE_TABLE_NAME;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.CLUSTER_AGGREGATOR_HOUR_CHECKPOINT_CUTOFF_INTERVAL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.CLUSTER_AGGREGATOR_HOUR_CHECKPOINT_CUTOFF_MULTIPLIER;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.CLUSTER_AGGREGATOR_HOUR_DISABLED;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.CLUSTER_AGGREGATOR_HOUR_SLEEP_INTERVAL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.DEFAULT_CHECKPOINT_LOCATION;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.TIMELINE_METRICS_AGGREGATOR_CHECKPOINT_DIR;
+
+public class TimelineMetricClusterAggregatorHourly extends AbstractTimelineAggregator {
+  private static final Log LOG = LogFactory.getLog
+    (TimelineMetricClusterAggregatorHourly.class);
+  private static final String CLUSTER_AGGREGATOR_HOURLY_CHECKPOINT_FILE =
+    "timeline-metrics-cluster-aggregator-hourly-checkpoint";
+  private final String checkpointLocation;
+  private final long sleepIntervalMillis;
+  private final Integer checkpointCutOffMultiplier;
+  private long checkpointCutOffIntervalMillis;
+  private static final Long NATIVE_TIME_RANGE_DELTA = 3600000l; // 1 hour
+  private final TimelineClusterMetricReader timelineClusterMetricReader
+     = new TimelineClusterMetricReader(true);
+
+  public TimelineMetricClusterAggregatorHourly(
+    PhoenixHBaseAccessor hBaseAccessor, Configuration metricsConf) {
+    super(hBaseAccessor, metricsConf);
+
+    String checkpointDir = metricsConf.get(
+      TIMELINE_METRICS_AGGREGATOR_CHECKPOINT_DIR, DEFAULT_CHECKPOINT_LOCATION);
+
+    checkpointLocation = FilenameUtils.concat(checkpointDir,
+      CLUSTER_AGGREGATOR_HOURLY_CHECKPOINT_FILE);
+
+    sleepIntervalMillis = SECONDS.toMillis(metricsConf.getLong
+      (CLUSTER_AGGREGATOR_HOUR_SLEEP_INTERVAL, 3600l));
+    checkpointCutOffIntervalMillis =  SECONDS.toMillis(metricsConf.getLong
+      (CLUSTER_AGGREGATOR_HOUR_CHECKPOINT_CUTOFF_INTERVAL, 7200l));
+    checkpointCutOffMultiplier = metricsConf.getInt
+      (CLUSTER_AGGREGATOR_HOUR_CHECKPOINT_CUTOFF_MULTIPLIER, 2);
+  }
+
+  @Override
+  protected String getCheckpointLocation() {
+    return checkpointLocation;
+  }
+
+  @Override
+  protected void aggregate(ResultSet rs, long startTime, long endTime)
+    throws SQLException, IOException {
+      Map<TimelineClusterMetric, MetricHostAggregate> hostAggregateMap =
+        aggregateMetricsFromResultSet(rs);
+
+    LOG.info("Saving " + hostAggregateMap.size() + " metric aggregates.");
+    hBaseAccessor.saveClusterAggregateHourlyRecords(hostAggregateMap,
+      METRICS_CLUSTER_AGGREGATE_HOURLY_TABLE_NAME);
+  }
+
+  @Override
+  protected Condition prepareMetricQueryCondition(long startTime,
+                                                  long endTime) {
+    Condition condition = new DefaultCondition(null, null, null, null, startTime,
+      endTime, null, null, true);
+    condition.setNoLimit();
+    condition.setFetchSize(resultsetFetchSize);
+    condition.setStatement(String.format(GET_CLUSTER_AGGREGATE_SQL,
+      PhoenixTransactSQL.getNaiveTimeRangeHint(startTime, NATIVE_TIME_RANGE_DELTA),
+        METRICS_CLUSTER_AGGREGATE_TABLE_NAME));
+    condition.addOrderByColumn("METRIC_NAME");
+    condition.addOrderByColumn("APP_ID");
+    condition.addOrderByColumn("INSTANCE_ID");
+    condition.addOrderByColumn("SERVER_TIME");
+    return condition;
+  }
+
+  private Map<TimelineClusterMetric, MetricHostAggregate> aggregateMetricsFromResultSet(ResultSet rs)
+      throws IOException, SQLException {
+
+    TimelineClusterMetric existingMetric = null;
+    MetricHostAggregate hostAggregate = null;
+    Map<TimelineClusterMetric, MetricHostAggregate> hostAggregateMap =
+      new HashMap<TimelineClusterMetric, MetricHostAggregate>();
+
+    while (rs.next()) {
+      TimelineClusterMetric currentMetric =
+        timelineClusterMetricReader.fromResultSet(rs);
+      MetricClusterAggregate currentHostAggregate =
+        getMetricClusterAggregateFromResultSet(rs);
+
+      if (existingMetric == null) {
+        // First row
+        existingMetric = currentMetric;
+        hostAggregate = new MetricHostAggregate();
+        hostAggregateMap.put(currentMetric, hostAggregate);
+      }
+
+      if (existingMetric.equalsExceptTime(currentMetric)) {
+        // Recalculate totals with current metric
+        updateAggregatesFromHost(hostAggregate, currentHostAggregate);
+
+      } else {
+        // Switched over to a new metric - save existing
+        hostAggregate = new MetricHostAggregate();
+        updateAggregatesFromHost(hostAggregate, currentHostAggregate);
+        hostAggregateMap.put(currentMetric, hostAggregate);
+        existingMetric = currentMetric;
+      }
+
+    }
+
+    return hostAggregateMap;
+  }
+
+  private void updateAggregatesFromHost(MetricHostAggregate agg, MetricClusterAggregate currentClusterAggregate) {
+    agg.updateMax(currentClusterAggregate.getMax());
+    agg.updateMin(currentClusterAggregate.getMin());
+    agg.updateSum(currentClusterAggregate.getSum());
+    agg.updateNumberOfSamples(currentClusterAggregate.getNumberOfHosts());
+  }
+
+  @Override
+  protected Long getSleepIntervalMillis() {
+    return sleepIntervalMillis;
+  }
+
+  @Override
+  protected Integer getCheckpointCutOffMultiplier() {
+    return checkpointCutOffMultiplier;
+  }
+
+  @Override
+  protected Long getCheckpointCutOffIntervalMillis() {
+    return checkpointCutOffIntervalMillis;
+  }
+
+  @Override
+  public boolean isDisabled() {
+    return metricsConf.getBoolean(CLUSTER_AGGREGATOR_HOUR_DISABLED, false);
+  }
+
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricReadHelper.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricReadHelper.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricReadHelper.java
new file mode 100644
index 0000000..40a9648
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricReadHelper.java
@@ -0,0 +1,66 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators;
+
+
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixHBaseAccessor;
+
+import java.io.IOException;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.Map;
+import java.util.TreeMap;
+
+public class TimelineMetricReadHelper {
+
+  private boolean ignoreInstance = false;
+
+  public TimelineMetricReadHelper() {}
+
+  public TimelineMetricReadHelper(boolean ignoreInstance) {
+    this.ignoreInstance = ignoreInstance;
+  }
+
+  public TimelineMetric getTimelineMetricFromResultSet(ResultSet rs)
+    throws SQLException, IOException {
+    TimelineMetric metric = getTimelineMetricCommonsFromResultSet(rs);
+    Map<Long, Double> sortedByTimeMetrics = new TreeMap<Long, Double>(
+        PhoenixHBaseAccessor.readMetricFromJSON(rs.getString("METRICS")));
+    metric.setMetricValues(sortedByTimeMetrics);
+    return metric;
+  }
+
+  /**
+   * Returns common part of timeline metrics record without the values.
+   */
+  public TimelineMetric getTimelineMetricCommonsFromResultSet(ResultSet rs)
+    throws SQLException {
+    TimelineMetric metric = new TimelineMetric();
+    metric.setMetricName(rs.getString("METRIC_NAME"));
+    metric.setAppId(rs.getString("APP_ID"));
+    if (!ignoreInstance) metric.setInstanceId(rs.getString("INSTANCE_ID"));
+    metric.setHostName(rs.getString("HOSTNAME"));
+    metric.setTimestamp(rs.getLong("SERVER_TIME"));
+    metric.setStartTime(rs.getLong("START_TIME"));
+    metric.setType(rs.getString("UNITS"));
+    return metric;
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/Condition.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/Condition.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/Condition.java
new file mode 100644
index 0000000..b52748f
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/Condition.java
@@ -0,0 +1,46 @@
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query;
+
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.Precision;
+
+import java.util.List;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+public interface Condition {
+  boolean isEmpty();
+
+  List<String> getMetricNames();
+  boolean isPointInTime();
+  boolean isGrouped();
+  void setStatement(String statement);
+  String getHostname();
+  Precision getPrecision();
+  void setPrecision(Precision precision);
+  String getAppId();
+  String getInstanceId();
+  StringBuilder getConditionClause();
+  String getOrderByClause(boolean asc);
+  String getStatement();
+  Long getStartTime();
+  Long getEndTime();
+  Integer getLimit();
+  Integer getFetchSize();
+  void setFetchSize(Integer fetchSize);
+  void addOrderByColumn(String column);
+  void setNoLimit();
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/ConnectionProvider.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/ConnectionProvider.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/ConnectionProvider.java
new file mode 100644
index 0000000..24239a0
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/ConnectionProvider.java
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query;
+
+
+import java.sql.Connection;
+import java.sql.SQLException;
+
+/**
+ *
+ */
+public interface ConnectionProvider {
+  public Connection getConnection() throws SQLException;
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/DefaultCondition.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/DefaultCondition.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/DefaultCondition.java
new file mode 100644
index 0000000..9d6b7df
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/DefaultCondition.java
@@ -0,0 +1,258 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixHBaseAccessor;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.Precision;
+
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Set;
+
+public class DefaultCondition implements Condition {
+  List<String> metricNames;
+  String hostname;
+  String appId;
+  String instanceId;
+  Long startTime;
+  Long endTime;
+  Precision precision;
+  Integer limit;
+  boolean grouped;
+  boolean noLimit = false;
+  Integer fetchSize;
+  String statement;
+  Set<String> orderByColumns = new LinkedHashSet<String>();
+
+  public DefaultCondition(List<String> metricNames, String hostname, String appId,
+                   String instanceId, Long startTime, Long endTime, Precision precision,
+                   Integer limit, boolean grouped) {
+    this.metricNames = metricNames;
+    this.hostname = hostname;
+    this.appId = appId;
+    this.instanceId = instanceId;
+    this.startTime = startTime;
+    this.endTime = endTime;
+    this.precision = precision;
+    this.limit = limit;
+    this.grouped = grouped;
+  }
+
+  public String getStatement() {
+    return statement;
+  }
+
+  public void setStatement(String statement) {
+    this.statement = statement;
+  }
+
+  public List<String> getMetricNames() {
+    return metricNames == null || metricNames.isEmpty() ? null : metricNames;
+  }
+
+  public StringBuilder getConditionClause() {
+    StringBuilder sb = new StringBuilder();
+    boolean appendConjunction = false;
+    StringBuilder metricsLike = new StringBuilder();
+    StringBuilder metricsIn = new StringBuilder();
+
+    if (getMetricNames() != null) {
+      for (String name : getMetricNames()) {
+        if (name.contains("%")) {
+          if (metricsLike.length() > 1) {
+            metricsLike.append(" OR ");
+          }
+          metricsLike.append("METRIC_NAME LIKE ?");
+        } else {
+          if (metricsIn.length() > 0) {
+            metricsIn.append(", ");
+          }
+          metricsIn.append("?");
+        }
+      }
+
+      if (metricsIn.length()>0) {
+        sb.append("(METRIC_NAME IN (");
+        sb.append(metricsIn);
+        sb.append(")");
+        appendConjunction = true;
+      }
+
+      if (metricsLike.length() > 0) {
+        if (appendConjunction) {
+          sb.append(" OR ");
+        } else {
+          sb.append("(");
+        }
+        sb.append(metricsLike);
+        appendConjunction = true;
+      }
+
+      if (appendConjunction) {
+        sb.append(")");
+      }
+    }
+
+    appendConjunction = append(sb, appendConjunction, getHostname(), " HOSTNAME = ?");
+    appendConjunction = append(sb, appendConjunction, getAppId(), " APP_ID = ?");
+    appendConjunction = append(sb, appendConjunction, getInstanceId(), " INSTANCE_ID = ?");
+    appendConjunction = append(sb, appendConjunction, getStartTime(), " SERVER_TIME >= ?");
+    append(sb, appendConjunction, getEndTime(), " SERVER_TIME < ?");
+
+    return sb;
+  }
+
+  protected static boolean append(StringBuilder sb,
+                                  boolean appendConjunction,
+                                  Object value, String str) {
+    if (value != null) {
+      if (appendConjunction) {
+        sb.append(" AND");
+      }
+
+      sb.append(str);
+      appendConjunction = true;
+    }
+    return appendConjunction;
+  }
+
+  public String getHostname() {
+    return hostname == null || hostname.isEmpty() ? null : hostname;
+  }
+
+  public Precision getPrecision() {
+    return precision;
+  }
+
+  public void setPrecision(Precision precision) {
+    this.precision = precision;
+  }
+
+  public String getAppId() {
+    if (appId != null && !appId.isEmpty()) {
+      if (!(appId.equals("HOST") || appId.equals("FLUME_HANDLER")) ) {
+        return appId.toLowerCase();
+      } else {
+        return appId;
+      }
+    }
+    return null;
+  }
+
+  public String getInstanceId() {
+    return instanceId == null || instanceId.isEmpty() ? null : instanceId;
+  }
+
+  /**
+   * Convert to millis.
+   */
+  public Long getStartTime() {
+    if (startTime == null) {
+      return null;
+    } else if (startTime < 9999999999l) {
+      return startTime * 1000;
+    } else {
+      return startTime;
+    }
+  }
+
+  public Long getEndTime() {
+    if (endTime == null) {
+      return null;
+    }
+    if (endTime < 9999999999l) {
+      return endTime * 1000;
+    } else {
+      return endTime;
+    }
+  }
+
+  public void setNoLimit() {
+    this.noLimit = true;
+  }
+
+  public Integer getLimit() {
+    if (noLimit) {
+      return null;
+    }
+    return limit == null ? PhoenixHBaseAccessor.RESULTSET_LIMIT : limit;
+  }
+
+  public boolean isGrouped() {
+    return grouped;
+  }
+
+  public boolean isPointInTime() {
+    return getStartTime() == null && getEndTime() == null;
+  }
+
+  public boolean isEmpty() {
+    return (metricNames == null || metricNames.isEmpty())
+      && (hostname == null || hostname.isEmpty())
+      && (appId == null || appId.isEmpty())
+      && (instanceId == null || instanceId.isEmpty())
+      && startTime == null
+      && endTime == null;
+  }
+
+  public Integer getFetchSize() {
+    return fetchSize;
+  }
+
+  public void setFetchSize(Integer fetchSize) {
+    this.fetchSize = fetchSize;
+  }
+
+  public void addOrderByColumn(String column) {
+    orderByColumns.add(column);
+  }
+
+  public String getOrderByClause(boolean asc) {
+    String orderByStr = " ORDER BY ";
+    if (!orderByColumns.isEmpty()) {
+      StringBuilder sb = new StringBuilder(orderByStr);
+      for (String orderByColumn : orderByColumns) {
+        if (sb.length() != orderByStr.length()) {
+          sb.append(", ");
+        }
+        sb.append(orderByColumn);
+        if (!asc) {
+          sb.append(" DESC");
+        }
+      }
+      sb.append(" ");
+      return sb.toString();
+    }
+    return null;
+  }
+
+  @Override
+  public String toString() {
+    return "Condition{" +
+      "metricNames=" + metricNames +
+      ", hostname='" + hostname + '\'' +
+      ", appId='" + appId + '\'' +
+      ", instanceId='" + instanceId + '\'' +
+      ", startTime=" + startTime +
+      ", endTime=" + endTime +
+      ", limit=" + limit +
+      ", grouped=" + grouped +
+      ", orderBy=" + orderByColumns +
+      ", noLimit=" + noLimit +
+      '}';
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/DefaultPhoenixDataSource.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/DefaultPhoenixDataSource.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/DefaultPhoenixDataSource.java
new file mode 100644
index 0000000..562049b
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/DefaultPhoenixDataSource.java
@@ -0,0 +1,77 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query;
+
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+
+public class DefaultPhoenixDataSource implements ConnectionProvider {
+
+  static final Log LOG = LogFactory.getLog(DefaultPhoenixDataSource.class);
+  private static final String ZOOKEEPER_CLIENT_PORT =
+    "hbase.zookeeper.property.clientPort";
+  private static final String ZOOKEEPER_QUORUM = "hbase.zookeeper.quorum";
+  private static final String ZNODE_PARENT = "zookeeper.znode.parent";
+
+  private static final String connectionUrl = "jdbc:phoenix:%s:%s:%s";
+  private final String url;
+
+  public DefaultPhoenixDataSource(Configuration hbaseConf) {
+    String zookeeperClientPort = hbaseConf.getTrimmed(ZOOKEEPER_CLIENT_PORT,
+      "2181");
+    String zookeeperQuorum = hbaseConf.getTrimmed(ZOOKEEPER_QUORUM);
+    String znodeParent = hbaseConf.getTrimmed(ZNODE_PARENT, "/hbase");
+    if (zookeeperQuorum == null || zookeeperQuorum.isEmpty()) {
+      throw new IllegalStateException("Unable to find Zookeeper quorum to " +
+        "access HBase store using Phoenix.");
+    }
+
+    url = String.format(connectionUrl,
+      zookeeperQuorum,
+      zookeeperClientPort,
+      znodeParent);
+  }
+
+  /**
+   * Get JDBC connection to HBase store. Assumption is that the hbase
+   * configuration is present on the classpath and loaded by the caller into
+   * the Configuration object.
+   * Phoenix already caches the HConnection between the client and HBase
+   * cluster.
+   *
+   * @return @java.sql.Connection
+   */
+  public Connection getConnection() throws SQLException {
+
+    LOG.debug("Metric store connection url: " + url);
+    try {
+      return DriverManager.getConnection(url);
+    } catch (SQLException e) {
+      LOG.warn("Unable to connect to HBase store using Phoenix.", e);
+
+      throw e;
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/PhoenixTransactSQL.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/PhoenixTransactSQL.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/PhoenixTransactSQL.java
new file mode 100644
index 0000000..636999f
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/PhoenixTransactSQL.java
@@ -0,0 +1,573 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixHBaseAccessor;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.Precision;
+
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+import java.util.Collections;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Encapsulate all metrics related SQL queries.
+ */
+public class PhoenixTransactSQL {
+
+  public static final Log LOG = LogFactory.getLog(PhoenixTransactSQL.class);
+
+  /**
+   * Create table to store individual metric records.
+   */
+  public static final String CREATE_METRICS_TABLE_SQL = "CREATE TABLE IF NOT " +
+    "EXISTS METRIC_RECORD (METRIC_NAME VARCHAR, " +
+    "HOSTNAME VARCHAR, " +
+    "SERVER_TIME UNSIGNED_LONG NOT NULL, " +
+    "APP_ID VARCHAR, " +
+    "INSTANCE_ID VARCHAR, " +
+    "START_TIME UNSIGNED_LONG, " +
+    "UNITS CHAR(20), " +
+    "METRIC_SUM DOUBLE, " +
+    "METRIC_COUNT UNSIGNED_INT, " +
+    "METRIC_MAX DOUBLE, " +
+    "METRIC_MIN DOUBLE, " +
+    "METRICS VARCHAR CONSTRAINT pk " +
+    "PRIMARY KEY (METRIC_NAME, HOSTNAME, SERVER_TIME, APP_ID, " +
+    "INSTANCE_ID)) DATA_BLOCK_ENCODING='%s', IMMUTABLE_ROWS=true, " +
+    "TTL=%s, COMPRESSION='%s'";
+
+  public static final String CREATE_METRICS_AGGREGATE_HOURLY_TABLE_SQL =
+    "CREATE TABLE IF NOT EXISTS METRIC_RECORD_HOURLY " +
+      "(METRIC_NAME VARCHAR, " +
+      "HOSTNAME VARCHAR, " +
+      "APP_ID VARCHAR, " +
+      "INSTANCE_ID VARCHAR, " +
+      "SERVER_TIME UNSIGNED_LONG NOT NULL, " +
+      "UNITS CHAR(20), " +
+      "METRIC_SUM DOUBLE," +
+      "METRIC_COUNT UNSIGNED_INT, " +
+      "METRIC_MAX DOUBLE," +
+      "METRIC_MIN DOUBLE CONSTRAINT pk " +
+      "PRIMARY KEY (METRIC_NAME, HOSTNAME, APP_ID, INSTANCE_ID, " +
+      "SERVER_TIME)) DATA_BLOCK_ENCODING='%s', IMMUTABLE_ROWS=true, " +
+      "TTL=%s, COMPRESSION='%s'";
+
+  public static final String CREATE_METRICS_AGGREGATE_MINUTE_TABLE_SQL =
+    "CREATE TABLE IF NOT EXISTS METRIC_RECORD_MINUTE " +
+      "(METRIC_NAME VARCHAR, " +
+      "HOSTNAME VARCHAR, " +
+      "APP_ID VARCHAR, " +
+      "INSTANCE_ID VARCHAR, " +
+      "SERVER_TIME UNSIGNED_LONG NOT NULL, " +
+      "UNITS CHAR(20), " +
+      "METRIC_SUM DOUBLE," +
+      "METRIC_COUNT UNSIGNED_INT, " +
+      "METRIC_MAX DOUBLE," +
+      "METRIC_MIN DOUBLE CONSTRAINT pk " +
+      "PRIMARY KEY (METRIC_NAME, HOSTNAME, APP_ID, INSTANCE_ID, " +
+      "SERVER_TIME)) DATA_BLOCK_ENCODING='%s', IMMUTABLE_ROWS=true, TTL=%s," +
+      " COMPRESSION='%s'";
+
+  public static final String CREATE_METRICS_CLUSTER_AGGREGATE_TABLE_SQL =
+    "CREATE TABLE IF NOT EXISTS METRIC_AGGREGATE " +
+      "(METRIC_NAME VARCHAR, " +
+      "APP_ID VARCHAR, " +
+      "INSTANCE_ID VARCHAR, " +
+      "SERVER_TIME UNSIGNED_LONG NOT NULL, " +
+      "UNITS CHAR(20), " +
+      "METRIC_SUM DOUBLE, " +
+      "HOSTS_COUNT UNSIGNED_INT, " +
+      "METRIC_MAX DOUBLE, " +
+      "METRIC_MIN DOUBLE " +
+      "CONSTRAINT pk PRIMARY KEY (METRIC_NAME, APP_ID, INSTANCE_ID, " +
+      "SERVER_TIME)) DATA_BLOCK_ENCODING='%s', IMMUTABLE_ROWS=true, " +
+      "TTL=%s, COMPRESSION='%s'";
+
+  public static final String CREATE_METRICS_CLUSTER_AGGREGATE_HOURLY_TABLE_SQL =
+    "CREATE TABLE IF NOT EXISTS METRIC_AGGREGATE_HOURLY " +
+      "(METRIC_NAME VARCHAR, " +
+      "APP_ID VARCHAR, " +
+      "INSTANCE_ID VARCHAR, " +
+      "SERVER_TIME UNSIGNED_LONG NOT NULL, " +
+      "UNITS CHAR(20), " +
+      "METRIC_SUM DOUBLE, " +
+      "METRIC_COUNT UNSIGNED_INT, " +
+      "METRIC_MAX DOUBLE, " +
+      "METRIC_MIN DOUBLE " +
+      "CONSTRAINT pk PRIMARY KEY (METRIC_NAME, APP_ID, INSTANCE_ID, " +
+      "SERVER_TIME)) DATA_BLOCK_ENCODING='%s', IMMUTABLE_ROWS=true, " +
+      "TTL=%s, COMPRESSION='%s'";
+
+  /**
+   * ALTER table to set new options
+   */
+  public static final String ALTER_SQL = "ALTER TABLE %s SET TTL=%s";
+
+  /**
+   * Insert into metric records table.
+   */
+  public static final String UPSERT_METRICS_SQL = "UPSERT INTO %s " +
+    "(METRIC_NAME, HOSTNAME, APP_ID, INSTANCE_ID, SERVER_TIME, START_TIME, " +
+    "UNITS, " +
+    "METRIC_SUM, " +
+    "METRIC_MAX, " +
+    "METRIC_MIN, " +
+    "METRIC_COUNT, " +
+    "METRICS) VALUES " +
+    "(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
+
+  public static final String UPSERT_CLUSTER_AGGREGATE_SQL = "UPSERT INTO " +
+    "METRIC_AGGREGATE (METRIC_NAME, APP_ID, INSTANCE_ID, SERVER_TIME, " +
+    "UNITS, " +
+    "METRIC_SUM, " +
+    "HOSTS_COUNT, " +
+    "METRIC_MAX, " +
+    "METRIC_MIN) " +
+    "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)";
+
+  public static final String UPSERT_CLUSTER_AGGREGATE_TIME_SQL = "UPSERT INTO" +
+    " %s (METRIC_NAME, APP_ID, INSTANCE_ID, SERVER_TIME, " +
+    "UNITS, " +
+    "METRIC_SUM, " +
+    "METRIC_COUNT, " +
+    "METRIC_MAX, " +
+    "METRIC_MIN) " +
+    "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)";
+
+
+  public static final String UPSERT_AGGREGATE_RECORD_SQL = "UPSERT INTO " +
+    "%s (METRIC_NAME, HOSTNAME, APP_ID, INSTANCE_ID, " +
+    "SERVER_TIME, " +
+    "UNITS, " +
+    "METRIC_SUM, " +
+    "METRIC_MAX, " +
+    "METRIC_MIN," +
+    "METRIC_COUNT) " +
+    "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
+
+  /**
+   * Retrieve a set of rows from metrics records table.
+   */
+  public static final String GET_METRIC_SQL = "SELECT %s METRIC_NAME, " +
+    "HOSTNAME, APP_ID, INSTANCE_ID, SERVER_TIME, START_TIME, UNITS, " +
+    "METRIC_SUM, " +
+    "METRIC_MAX, " +
+    "METRIC_MIN, " +
+    "METRIC_COUNT, " +
+    "METRICS " +
+    "FROM %s";
+
+  public static final String GET_METRIC_AGGREGATE_ONLY_SQL = "SELECT %s " +
+    "METRIC_NAME, HOSTNAME, APP_ID, INSTANCE_ID, SERVER_TIME, " +
+    "UNITS, " +
+    "METRIC_SUM, " +
+    "METRIC_MAX, " +
+    "METRIC_MIN, " +
+    "METRIC_COUNT " +
+    "FROM %s";
+
+  public static final String GET_CLUSTER_AGGREGATE_SQL = "SELECT %s " +
+    "METRIC_NAME, APP_ID, " +
+    "INSTANCE_ID, SERVER_TIME, " +
+    "UNITS, " +
+    "METRIC_SUM, " +
+    "HOSTS_COUNT, " +
+    "METRIC_MAX, " +
+    "METRIC_MIN " +
+    "FROM %s";
+
+  public static final String GET_CLUSTER_AGGREGATE_HOURLY_SQL = "SELECT %s " +
+      "METRIC_NAME, APP_ID, " +
+      "INSTANCE_ID, SERVER_TIME, " +
+      "UNITS, " +
+      "METRIC_SUM, " +
+      "METRIC_COUNT, " +
+      "METRIC_MAX, " +
+      "METRIC_MIN " +
+      "FROM %s";
+
+  public static final String METRICS_RECORD_TABLE_NAME = "METRIC_RECORD";
+  public static final String METRICS_AGGREGATE_MINUTE_TABLE_NAME =
+    "METRIC_RECORD_MINUTE";
+  public static final String METRICS_AGGREGATE_HOURLY_TABLE_NAME =
+    "METRIC_RECORD_HOURLY";
+  public static final String METRICS_CLUSTER_AGGREGATE_TABLE_NAME =
+    "METRIC_AGGREGATE";
+  public static final String METRICS_CLUSTER_AGGREGATE_HOURLY_TABLE_NAME =
+    "METRIC_AGGREGATE_HOURLY";
+  public static final String DEFAULT_TABLE_COMPRESSION = "SNAPPY";
+  public static final String DEFAULT_ENCODING = "FAST_DIFF";
+  public static final long NATIVE_TIME_RANGE_DELTA = 120000; // 2 minutes
+  public static final long HOUR = 3600000; // 1 hour
+  public static final long DAY = 86400000; // 1 day
+
+  /**
+   * Filter to optimize HBase scan by using file timestamps. This prevents
+   * a full table scan of metric records.
+   *
+   * @return Phoenix Hint String
+   */
+  public static String getNaiveTimeRangeHint(Long startTime, Long delta) {
+    return String.format("/*+ NATIVE_TIME_RANGE(%s) */", (startTime - delta));
+  }
+
+  public static PreparedStatement prepareGetMetricsSqlStmt(
+    Connection connection, Condition condition) throws SQLException {
+
+    validateConditionIsNotEmpty(condition);
+    validateRowCountLimit(condition);
+
+    String stmtStr;
+    if (condition.getStatement() != null) {
+      stmtStr = condition.getStatement();
+    } else {
+
+      String metricsTable;
+      String query;
+      if (condition.getPrecision() == null) {
+        long endTime = condition.getEndTime() == null ? System.currentTimeMillis() : condition.getEndTime();
+        long startTime = condition.getStartTime() == null ? 0 : condition.getStartTime();
+        Long timeRange = endTime - startTime;
+        if (timeRange > 5 * DAY) {
+          metricsTable = METRICS_AGGREGATE_HOURLY_TABLE_NAME;
+          query = GET_METRIC_AGGREGATE_ONLY_SQL;
+          condition.setPrecision(Precision.HOURS);
+        } else if (timeRange > 10 * HOUR) {
+          metricsTable = METRICS_AGGREGATE_MINUTE_TABLE_NAME;
+          query = GET_METRIC_AGGREGATE_ONLY_SQL;
+          condition.setPrecision(Precision.MINUTES);
+        } else {
+          metricsTable = METRICS_RECORD_TABLE_NAME;
+          query = GET_METRIC_SQL;
+          condition.setPrecision(Precision.SECONDS);
+        }
+      } else {
+        switch (condition.getPrecision()) {
+          case HOURS:
+            metricsTable = METRICS_AGGREGATE_HOURLY_TABLE_NAME;
+            query = GET_METRIC_AGGREGATE_ONLY_SQL;
+            break;
+          case MINUTES:
+            metricsTable = METRICS_AGGREGATE_MINUTE_TABLE_NAME;
+            query = GET_METRIC_AGGREGATE_ONLY_SQL;
+            break;
+          default:
+            metricsTable = METRICS_RECORD_TABLE_NAME;
+            query = GET_METRIC_SQL;
+        }
+      }
+
+      stmtStr = String.format(query,
+        getNaiveTimeRangeHint(condition.getStartTime(), NATIVE_TIME_RANGE_DELTA),
+        metricsTable);
+    }
+
+    StringBuilder sb = new StringBuilder(stmtStr);
+    sb.append(" WHERE ");
+    sb.append(condition.getConditionClause());
+    String orderByClause = condition.getOrderByClause(true);
+
+    if (orderByClause != null) {
+      sb.append(orderByClause);
+    } else {
+      sb.append(" ORDER BY METRIC_NAME, SERVER_TIME ");
+    }
+    if (condition.getLimit() != null) {
+      sb.append(" LIMIT ").append(condition.getLimit());
+    }
+
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("SQL: " + sb.toString() + ", condition: " + condition);
+    }
+    PreparedStatement stmt = connection.prepareStatement(sb.toString());
+    int pos = 1;
+    if (condition.getMetricNames() != null) {
+      for (; pos <= condition.getMetricNames().size(); pos++) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Setting pos: " + pos + ", value = " + condition.getMetricNames().get(pos - 1));
+        }
+        stmt.setString(pos, condition.getMetricNames().get(pos - 1));
+      }
+    }
+    if (condition.getHostname() != null) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Setting pos: " + pos + ", value: " + condition.getHostname());
+      }
+      stmt.setString(pos++, condition.getHostname());
+    }
+    if (condition.getAppId() != null) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Setting pos: " + pos + ", value: " + condition.getAppId());
+      }
+      stmt.setString(pos++, condition.getAppId());
+    }
+    if (condition.getInstanceId() != null) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Setting pos: " + pos + ", value: " + condition.getInstanceId());
+      }
+      stmt.setString(pos++, condition.getInstanceId());
+    }
+    if (condition.getStartTime() != null) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Setting pos: " + pos + ", value: " + condition.getStartTime());
+      }
+      stmt.setLong(pos++, condition.getStartTime());
+    }
+    if (condition.getEndTime() != null) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Setting pos: " + pos + ", value: " + condition.getEndTime());
+      }
+      stmt.setLong(pos, condition.getEndTime());
+    }
+    if (condition.getFetchSize() != null) {
+      stmt.setFetchSize(condition.getFetchSize());
+    }
+
+    return stmt;
+  }
+
+  private static void validateConditionIsNotEmpty(Condition condition) {
+    if (condition.isEmpty()) {
+      throw new IllegalArgumentException("Condition is empty.");
+    }
+  }
+
+  private static void validateRowCountLimit(Condition condition) {
+    if (condition.getMetricNames() == null
+      || condition.getMetricNames().size() ==0 ) {
+      //aggregator can use empty metrics query
+      return;
+    }
+
+    long range = condition.getEndTime() - condition.getStartTime();
+    long rowsPerMetric = TimeUnit.MILLISECONDS.toHours(range) + 1;
+
+    Precision precision = condition.getPrecision();
+    // for minutes and seconds we can use the rowsPerMetric computed based on
+    // minutes
+    if (precision != null && precision == Precision.HOURS) {
+      rowsPerMetric = TimeUnit.MILLISECONDS.toHours(range) + 1;
+    }
+
+    long totalRowsRequested = rowsPerMetric * condition.getMetricNames().size();
+    if (totalRowsRequested > PhoenixHBaseAccessor.RESULTSET_LIMIT) {
+      throw new IllegalArgumentException("The time range query for " +
+        "precision table exceeds row count limit, please query aggregate " +
+        "table instead.");
+    }
+  }
+
+  public static PreparedStatement prepareGetLatestMetricSqlStmt(
+    Connection connection, Condition condition) throws SQLException {
+
+    validateConditionIsNotEmpty(condition);
+
+    if (condition.getMetricNames() == null
+      || condition.getMetricNames().size() == 0) {
+      throw new IllegalArgumentException("Point in time query without " +
+        "metric names not supported ");
+    }
+
+    String stmtStr;
+    if (condition.getStatement() != null) {
+      stmtStr = condition.getStatement();
+    } else {
+      stmtStr = String.format(GET_METRIC_SQL,
+        "",
+        METRICS_RECORD_TABLE_NAME);
+    }
+
+    StringBuilder sb = new StringBuilder(stmtStr);
+    sb.append(" WHERE ");
+    sb.append(condition.getConditionClause());
+    String orderByClause = condition.getOrderByClause(false);
+    if (orderByClause != null) {
+      sb.append(orderByClause);
+    } else {
+      sb.append(" ORDER BY METRIC_NAME DESC, HOSTNAME DESC, SERVER_TIME DESC ");
+    }
+
+    sb.append(" LIMIT ").append(condition.getMetricNames().size());
+
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("SQL: " + sb.toString() + ", condition: " + condition);
+    }
+    PreparedStatement stmt = connection.prepareStatement(sb.toString());
+    int pos = 1;
+    if (condition.getMetricNames() != null) {
+      //IGNORE condition limit, set one based on number of metric names
+      for (; pos <= condition.getMetricNames().size(); pos++) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Setting pos: " + pos + ", value = " + condition.getMetricNames().get(pos - 1));
+        }
+        stmt.setString(pos, condition.getMetricNames().get(pos - 1));
+      }
+    }
+    if (condition.getHostname() != null) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Setting pos: " + pos + ", value: " + condition.getHostname());
+      }
+      stmt.setString(pos++, condition.getHostname());
+    }
+    if (condition.getAppId() != null) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Setting pos: " + pos + ", value: " + condition.getAppId());
+      }
+      stmt.setString(pos++, condition.getAppId());
+    }
+    if (condition.getInstanceId() != null) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Setting pos: " + pos + ", value: " + condition.getInstanceId());
+      }
+      stmt.setString(pos++, condition.getInstanceId());
+    }
+
+    if (condition.getFetchSize() != null) {
+      stmt.setFetchSize(condition.getFetchSize());
+    }
+
+    return stmt;
+  }
+
+  public static PreparedStatement prepareGetAggregateSqlStmt(
+    Connection connection, Condition condition) throws SQLException {
+
+    validateConditionIsNotEmpty(condition);
+
+    String metricsAggregateTable;
+    String queryStmt;
+    if (condition.getPrecision() == null) {
+      long endTime = condition.getEndTime() == null ? System.currentTimeMillis() : condition.getEndTime();
+      long startTime = condition.getStartTime() == null ? 0 : condition.getStartTime();
+      Long timeRange = endTime - startTime;
+      if (timeRange > 5 * DAY) {
+        metricsAggregateTable = METRICS_CLUSTER_AGGREGATE_HOURLY_TABLE_NAME;
+        queryStmt = GET_CLUSTER_AGGREGATE_HOURLY_SQL;
+        condition.setPrecision(Precision.HOURS);
+      } else {
+        metricsAggregateTable = METRICS_CLUSTER_AGGREGATE_TABLE_NAME;
+        queryStmt = GET_CLUSTER_AGGREGATE_SQL;
+        condition.setPrecision(Precision.SECONDS);
+      }
+    } else {
+      switch (condition.getPrecision()) {
+        case HOURS:
+          metricsAggregateTable = METRICS_CLUSTER_AGGREGATE_HOURLY_TABLE_NAME;
+          queryStmt = GET_CLUSTER_AGGREGATE_HOURLY_SQL;
+          break;
+        default:
+          metricsAggregateTable = METRICS_CLUSTER_AGGREGATE_TABLE_NAME;
+          queryStmt = GET_CLUSTER_AGGREGATE_SQL;
+      }
+    }
+
+    StringBuilder sb = new StringBuilder(queryStmt);
+    sb.append(" WHERE ");
+    sb.append(condition.getConditionClause());
+    sb.append(" ORDER BY METRIC_NAME, SERVER_TIME");
+    if (condition.getLimit() != null) {
+      sb.append(" LIMIT ").append(condition.getLimit());
+    }
+
+    String query = String.format(sb.toString(),
+      PhoenixTransactSQL.getNaiveTimeRangeHint(condition.getStartTime(),
+        NATIVE_TIME_RANGE_DELTA), metricsAggregateTable);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("SQL => " + query + ", condition => " + condition);
+    }
+    PreparedStatement stmt = connection.prepareStatement(query);
+    int pos = 1;
+    if (condition.getMetricNames() != null) {
+      for (; pos <= condition.getMetricNames().size(); pos++) {
+        stmt.setString(pos, condition.getMetricNames().get(pos - 1));
+      }
+    }
+    // TODO: Upper case all strings on POST
+    if (condition.getAppId() != null) {
+      stmt.setString(pos++, condition.getAppId());
+    }
+    if (condition.getInstanceId() != null) {
+      stmt.setString(pos++, condition.getInstanceId());
+    }
+    if (condition.getStartTime() != null) {
+      stmt.setLong(pos++, condition.getStartTime());
+    }
+    if (condition.getEndTime() != null) {
+      stmt.setLong(pos, condition.getEndTime());
+    }
+
+    return stmt;
+  }
+
+  public static PreparedStatement prepareGetLatestAggregateMetricSqlStmt(
+    Connection connection, Condition condition) throws SQLException {
+
+    validateConditionIsNotEmpty(condition);
+
+    String stmtStr;
+    if (condition.getStatement() != null) {
+      stmtStr = condition.getStatement();
+    } else {
+      stmtStr = String.format(GET_CLUSTER_AGGREGATE_SQL, "",
+          METRICS_CLUSTER_AGGREGATE_TABLE_NAME);
+    }
+
+    StringBuilder sb = new StringBuilder(stmtStr);
+    sb.append(" WHERE ");
+    sb.append(condition.getConditionClause());
+    String orderByClause = condition.getOrderByClause(false);
+    if (orderByClause != null) {
+      sb.append(orderByClause);
+    } else {
+      sb.append(" ORDER BY METRIC_NAME DESC, SERVER_TIME DESC  ");
+    }
+
+    sb.append(" LIMIT ").append(condition.getMetricNames().size());
+
+    String query = sb.toString();
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("SQL: " + query + ", condition: " + condition);
+    }
+
+    PreparedStatement stmt = connection.prepareStatement(query);
+    int pos = 1;
+    if (condition.getMetricNames() != null) {
+      for (; pos <= condition.getMetricNames().size(); pos++) {
+        stmt.setString(pos, condition.getMetricNames().get(pos - 1));
+      }
+    }
+    if (condition.getAppId() != null) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Setting pos: " + pos + ", value: " + condition.getAppId());
+      }
+      stmt.setString(pos++, condition.getAppId());
+    }
+    if (condition.getInstanceId() != null) {
+      stmt.setString(pos++, condition.getInstanceId());
+    }
+
+    return stmt;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/SplitByMetricNamesCondition.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/SplitByMetricNamesCondition.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/SplitByMetricNamesCondition.java
new file mode 100644
index 0000000..00d6a82
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/SplitByMetricNamesCondition.java
@@ -0,0 +1,165 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query;
+
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.Precision;
+
+import java.util.Collections;
+import java.util.List;
+
+public class SplitByMetricNamesCondition implements Condition {
+  private final Condition adaptee;
+  private String currentMetric;
+
+  public SplitByMetricNamesCondition(Condition condition){
+    this.adaptee = condition;
+  }
+
+  @Override
+  public boolean isEmpty() {
+    return adaptee.isEmpty();
+  }
+
+  @Override
+  public List<String> getMetricNames() {
+    return Collections.singletonList(currentMetric);
+  }
+
+  @Override
+  public boolean isPointInTime() {
+    return adaptee.isPointInTime();
+  }
+
+  @Override
+  public boolean isGrouped() {
+    return adaptee.isGrouped();
+  }
+
+  @Override
+  public void setStatement(String statement) {
+    adaptee.setStatement(statement);
+  }
+
+  @Override
+  public String getHostname() {
+    return adaptee.getHostname();
+  }
+
+  @Override
+  public Precision getPrecision() {
+    return adaptee.getPrecision();
+  }
+
+  @Override
+  public void setPrecision(Precision precision) {
+    adaptee.setPrecision(precision);
+  }
+
+  @Override
+  public String getAppId() {
+    return adaptee.getAppId();
+  }
+
+  @Override
+  public String getInstanceId() {
+    return adaptee.getInstanceId();
+  }
+
+  @Override
+  public StringBuilder getConditionClause() {
+    StringBuilder sb = new StringBuilder();
+    boolean appendConjunction = false;
+
+    if (getMetricNames() != null) {
+      for (String name : getMetricNames()) {
+        if (sb.length() > 1) {
+          sb.append(" OR ");
+        }
+        sb.append("METRIC_NAME = ?");
+      }
+
+      appendConjunction = true;
+    }
+
+    appendConjunction = DefaultCondition.append(sb, appendConjunction,
+      getHostname(), " HOSTNAME = ?");
+    appendConjunction = DefaultCondition.append(sb, appendConjunction,
+      getAppId(), " APP_ID = ?");
+    appendConjunction = DefaultCondition.append(sb, appendConjunction,
+      getInstanceId(), " INSTANCE_ID = ?");
+    appendConjunction = DefaultCondition.append(sb, appendConjunction,
+      getStartTime(), " SERVER_TIME >= ?");
+    DefaultCondition.append(sb, appendConjunction, getEndTime(),
+      " SERVER_TIME < ?");
+
+    return sb;
+  }
+
+  @Override
+  public String getOrderByClause(boolean asc) {
+    return adaptee.getOrderByClause(asc);
+  }
+
+  @Override
+  public String getStatement() {
+    return adaptee.getStatement();
+  }
+
+  @Override
+  public Long getStartTime() {
+    return adaptee.getStartTime();
+  }
+
+  @Override
+  public Long getEndTime() {
+    return adaptee.getEndTime();
+  }
+
+  @Override
+  public Integer getLimit() {
+    return adaptee.getLimit();
+  }
+
+  @Override
+  public Integer getFetchSize() {
+    return adaptee.getFetchSize();
+  }
+
+  @Override
+  public void setFetchSize(Integer fetchSize) {
+    adaptee.setFetchSize(fetchSize);
+  }
+
+  @Override
+  public void addOrderByColumn(String column) {
+    adaptee.addOrderByColumn(column);
+  }
+
+  @Override
+  public void setNoLimit() {
+    adaptee.setNoLimit();
+  }
+
+  public List<String> getOriginalMetricNames() {
+    return adaptee.getMetricNames();
+  }
+
+  public void setCurrentMetric(String currentMetric) {
+    this.currentMetric = currentMetric;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java
index 3720852..e1d256d 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java
@@ -24,8 +24,7 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.service.Service.STATE;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics
-  .timeline.DefaultPhoenixDataSource;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.DefaultPhoenixDataSource;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics
   .timeline.PhoenixHBaseAccessor;
 import org.apache.zookeeper.ClientCnxn;

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/AbstractMiniHBaseClusterTest.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/AbstractMiniHBaseClusterTest.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/AbstractMiniHBaseClusterTest.java
index b11a977..90c03e4 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/AbstractMiniHBaseClusterTest.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/AbstractMiniHBaseClusterTest.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.ConnectionProvider;
 import org.apache.phoenix.hbase.index.write.IndexWriterUtils;
 import org.apache.phoenix.query.BaseTest;
 import org.apache.phoenix.query.QueryServices;
@@ -37,7 +38,7 @@ import java.util.HashMap;
 import java.util.Map;
 import java.util.Properties;
 
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.LOG;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.LOG;
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 import static org.assertj.core.api.Assertions.assertThat;
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/AbstractTimelineAggregatorTest.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/AbstractTimelineAggregatorTest.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/AbstractTimelineAggregatorTest.java
index 969192d..c22e734 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/AbstractTimelineAggregatorTest.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/AbstractTimelineAggregatorTest.java
@@ -18,6 +18,9 @@
 package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.AbstractTimelineAggregator;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.Condition;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL;
 import org.apache.hadoop.yarn.util.Clock;
 import org.junit.Before;
 import org.junit.Test;
@@ -59,7 +62,7 @@ public class AbstractTimelineAggregatorTest {
     agg = new AbstractTimelineAggregator(
       null, metricsConf, clock) {
       @Override
-      protected boolean doWork(long startTime, long endTime) {
+      public boolean doWork(long startTime, long endTime) {
         startTimeInDoWork.set(startTime);
         endTimeInDoWork.set(endTime);
         actualRuns++;
@@ -68,7 +71,7 @@ public class AbstractTimelineAggregatorTest {
       }
 
       @Override
-      protected PhoenixTransactSQL.Condition
+      protected Condition
       prepareMetricQueryCondition(long startTime, long endTime) {
         return null;
       }
@@ -89,7 +92,7 @@ public class AbstractTimelineAggregatorTest {
       }
 
       @Override
-      protected boolean isDisabled() {
+      public boolean isDisabled() {
         return false;
       }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/FunctionTest.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/FunctionTest.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/FunctionTest.java
index 2a389ac..af9c6bb 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/FunctionTest.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/FunctionTest.java
@@ -17,11 +17,12 @@
  */
 package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
 
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.Function;
 import org.junit.Test;
 
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.Function.fromMetricName;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.Function.ReadFunction.AVG;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.Function.PostProcessingFunction.RATE;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.Function.fromMetricName;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.Function.ReadFunction.AVG;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.Function.PostProcessingFunction.RATE;
 import static org.assertj.core.api.Assertions.assertThat;
 
 public class FunctionTest {


[2/7] ambari git commit: AMBARI-10290. Expose avaialble host metrics across hostcomponents. Support HostComponent host metrics. (swagle)

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/23faabc4/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/metrics.json b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/metrics.json
index 21db93b..08c2b50 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/metrics.json
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/metrics.json
@@ -3,109 +3,111 @@
     "Component": [
       {
         "type": "org.apache.ambari.server.controller.metrics.RestMetricsPropertyProvider",
-        "properties" : {
+        "properties": {
           "default_port": "8745",
           "port_config_type": "storm-site",
           "port_property_name": "storm.port",
           "protocol": "http"
         },
-        "metrics": {"default":{
-          "metrics/api/cluster/summary/tasks.total":
-          {
-            "metric": "/api/cluster/summary##tasks.total",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/api/cluster/summary/slots.total": {
-            "metric": "/api/cluster/summary##slots.total",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/api/cluster/summary/slots.free": {
-            "metric": "/api/cluster/summary##slots.free",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/api/cluster/summary/supervisors": {
-            "metric": "/api/cluster/summary##supervisors",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/api/cluster/summary/executors.total": {
-            "metric": "/api/cluster/summary##executors.total",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/api/cluster/summary/slots.used": {
-            "metric": "/api/cluster/summary##slots.used",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/api/cluster/summary/topologies": {
-            "metric": "/api/cluster/summary##topologies",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/api/cluster/summary/nimbus.uptime": {
-            "metric": "/api/cluster/summary##nimbus.uptime",
-            "pointInTime": true,
-            "temporal": false
+        "metrics": {
+          "default": {
+            "metrics/api/cluster/summary/tasks.total": {
+              "metric": "/api/cluster/summary##tasks.total",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/cluster/summary/slots.total": {
+              "metric": "/api/cluster/summary##slots.total",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/cluster/summary/slots.free": {
+              "metric": "/api/cluster/summary##slots.free",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/cluster/summary/supervisors": {
+              "metric": "/api/cluster/summary##supervisors",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/cluster/summary/executors.total": {
+              "metric": "/api/cluster/summary##executors.total",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/cluster/summary/slots.used": {
+              "metric": "/api/cluster/summary##slots.used",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/cluster/summary/topologies": {
+              "metric": "/api/cluster/summary##topologies",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/cluster/summary/nimbus.uptime": {
+              "metric": "/api/cluster/summary##nimbus.uptime",
+              "pointInTime": true,
+              "temporal": false
+            }
           }
-        }}
+        }
       }
     ],
     "HostComponent": [
       {
         "type": "org.apache.ambari.server.controller.metrics.RestMetricsPropertyProvider",
-        "properties" : {
+        "properties": {
           "default_port": "8745",
           "port_config_type": "storm-site",
           "port_property_name": "storm.port",
           "protocol": "http"
         },
-        "metrics": {"default":{
-          "metrics/api/cluster/summary/tasks.total":
-          {
-            "metric": "/api/cluster/summary##tasks.total",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/api/cluster/summary/slots.total": {
-            "metric": "/api/cluster/summary##slots.total",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/api/cluster/summary/slots.free": {
-            "metric": "/api/cluster/summary##slots.free",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/api/cluster/summary/supervisors": {
-            "metric": "/api/cluster/summary##supervisors",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/api/cluster/summary/executors.total": {
-            "metric": "/api/cluster/summary##executors.total",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/api/cluster/summary/slots.used": {
-            "metric": "/api/cluster/summary##slots.used",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/api/cluster/summary/topologies": {
-            "metric": "/api/cluster/summary##topologies",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/api/cluster/summary/nimbus.uptime": {
-            "metric": "/api/cluster/summary##nimbus.uptime",
-            "pointInTime": true,
-            "temporal": false
+        "metrics": {
+          "default": {
+            "metrics/api/cluster/summary/tasks.total": {
+              "metric": "/api/cluster/summary##tasks.total",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/cluster/summary/slots.total": {
+              "metric": "/api/cluster/summary##slots.total",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/cluster/summary/slots.free": {
+              "metric": "/api/cluster/summary##slots.free",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/cluster/summary/supervisors": {
+              "metric": "/api/cluster/summary##supervisors",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/cluster/summary/executors.total": {
+              "metric": "/api/cluster/summary##executors.total",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/cluster/summary/slots.used": {
+              "metric": "/api/cluster/summary##slots.used",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/cluster/summary/topologies": {
+              "metric": "/api/cluster/summary##topologies",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/cluster/summary/nimbus.uptime": {
+              "metric": "/api/cluster/summary##nimbus.uptime",
+              "pointInTime": true,
+              "temporal": false
+            }
           }
-        }}
+        }
       }
     ]
   },
@@ -113,302 +115,320 @@
     "Component": [
       {
         "type": "ganglia",
-        "metrics": {"default":{
-          "metrics/boottime": {
-            "metric": "boottime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/bytes_in": {
-            "metric": "bytes_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/bytes_out": {
-            "metric": "bytes_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_aidle": {
-            "metric": "cpu_aidle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load_fifteen": {
-            "metric": "load_fifteen",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load_five": {
-            "metric": "load_five",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load_one": {
-            "metric": "load_one",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_buffers": {
-            "metric": "mem_buffers",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/disk/part_max_used": {
-            "metric": "part_max_used",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/pkts_in": {
-            "metric": "pkts_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/pkts_out": {
-            "metric": "pkts_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/process/proc_run": {
-            "metric": "proc_run",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/process/proc_total": {
-            "metric": "proc_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/swap_total": {
-            "metric": "swap_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/nimbus/jvm/memory/heap/committed": {
-            "metric": "Nimbus.JVM.Memory.Heap.committed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/nimbus/jvm/memory/heap/init": {
-            "metric": "Nimbus.JVM.Memory.Heap.init",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/nimbus/jvm/memory/heap/max": {
-            "metric": "Nimbus.JVM.Memory.Heap.max",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/nimbus/jvm/memory/heap/used": {
-            "metric": "Nimbus.JVM.Memory.Heap.used",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/nimbus/jvm/memory/nonheap/committed": {
-            "metric": "Nimbus.JVM.Memory.NonHeap.committed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/nimbus/jvm/memory/nonheap/init": {
-            "metric": "Nimbus.JVM.Memory.NonHeap.init",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/nimbus/jvm/memory/nonheap/max": {
-            "metric": "Nimbus.JVM.Memory.NonHeap.max",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/nimbus/jvm/memory/nonheap/used": {
-            "metric": "Nimbus.JVM.Memory.NonHeap.used",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/nimbus/jvm/os/processcputime": {
-            "metric": "Nimbus.JVM.OS.ProcessCpuTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/nimbus/jvm/threading/daemonthreadcount": {
-            "metric": "Nimbus.JVM.Threading.DaemonThreadCount",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/nimbus/jvm/threading/threadcount": {
-            "metric": "Nimbus.JVM.Threading.ThreadCount",
-            "pointInTime": true,
-            "temporal": true
-          },
+        "metrics": {
+          "default": {
+            "metrics/boottime": {
+              "metric": "boottime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/network/bytes_in": {
+              "metric": "bytes_in",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/network/bytes_out": {
+              "metric": "bytes_out",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/cpu/cpu_aidle": {
+              "metric": "cpu_aidle",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/load_fifteen": {
+              "metric": "load_fifteen",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/load_five": {
+              "metric": "load_five",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/load_one": {
+              "metric": "load_one",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/memory/mem_buffers": {
+              "metric": "mem_buffers",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/disk/part_max_used": {
+              "metric": "part_max_used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/network/pkts_in": {
+              "metric": "pkts_in",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/network/pkts_out": {
+              "metric": "pkts_out",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/process/proc_run": {
+              "metric": "proc_run",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/process/proc_total": {
+              "metric": "proc_total",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/memory/swap_total": {
+              "metric": "swap_total",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/heap/committed": {
+              "metric": "Nimbus.JVM.Memory.Heap.committed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/heap/init": {
+              "metric": "Nimbus.JVM.Memory.Heap.init",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/heap/max": {
+              "metric": "Nimbus.JVM.Memory.Heap.max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/heap/used": {
+              "metric": "Nimbus.JVM.Memory.Heap.used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/nonheap/committed": {
+              "metric": "Nimbus.JVM.Memory.NonHeap.committed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/nonheap/init": {
+              "metric": "Nimbus.JVM.Memory.NonHeap.init",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/nonheap/max": {
+              "metric": "Nimbus.JVM.Memory.NonHeap.max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/nonheap/used": {
+              "metric": "Nimbus.JVM.Memory.NonHeap.used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/os/processcputime": {
+              "metric": "Nimbus.JVM.OS.ProcessCpuTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/threading/daemonthreadcount": {
+              "metric": "Nimbus.JVM.Threading.DaemonThreadCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/threading/threadcount": {
+              "metric": "Nimbus.JVM.Threading.ThreadCount",
+              "pointInTime": true,
+              "temporal": true
+            },
 
-          "metrics/storm/nimbus/freeslots": {
-            "metric": "Free Slots",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/nimbus/supervisors": {
-            "metric": "Supervisors",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/nimbus/topologies": {
-            "metric": "Topologies",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/nimbus/totalexecutors": {
-            "metric": "Total Executors",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/nimbus/totalslots": {
-            "metric": "Total Slots",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/nimbus/totaltasks": {
-            "metric": "Total Tasks",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/nimbus/usedslots": {
-            "metric": "Used Slots",
-            "pointInTime": true,
-            "temporal": true
+            "metrics/storm/nimbus/freeslots": {
+              "metric": "Free Slots",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/supervisors": {
+              "metric": "Supervisors",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/topologies": {
+              "metric": "Topologies",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/totalexecutors": {
+              "metric": "Total Executors",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/totalslots": {
+              "metric": "Total Slots",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/totaltasks": {
+              "metric": "Total Tasks",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/usedslots": {
+              "metric": "Used Slots",
+              "pointInTime": true,
+              "temporal": true
+            }
           }
-        }}
+        }
       }
     ],
     "HostComponent": [
       {
         "type": "ganglia",
-        "metrics": {"default":{
-          "metrics/boottime": {
-            "metric": "boottime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/bytes_in": {
-            "metric": "bytes_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/bytes_out": {
-            "metric": "bytes_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_aidle": {
-            "metric": "cpu_aidle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load_fifteen": {
-            "metric": "load_fifteen",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load_five": {
-            "metric": "load_five",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load_one": {
-            "metric": "load_one",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_buffers": {
-            "metric": "mem_buffers",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/disk/part_max_used": {
-            "metric": "part_max_used",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/pkts_in": {
-            "metric": "pkts_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/pkts_out": {
-            "metric": "pkts_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/process/proc_run": {
-            "metric": "proc_run",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/process/proc_total": {
-            "metric": "proc_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/swap_total": {
-            "metric": "swap_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/nimbus/jvm/memory/heap/committed": {
-            "metric": "Nimbus.JVM.Memory.Heap.committed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/nimbus/jvm/memory/heap/init": {
-            "metric": "Nimbus.JVM.Memory.Heap.init",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/nimbus/jvm/memory/heap/max": {
-            "metric": "Nimbus.JVM.Memory.Heap.max",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/nimbus/jvm/memory/heap/used": {
-            "metric": "Nimbus.JVM.Memory.Heap.used",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/nimbus/jvm/memory/nonheap/committed": {
-            "metric": "Nimbus.JVM.Memory.NonHeap.committed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/nimbus/jvm/memory/nonheap/init": {
-            "metric": "Nimbus.JVM.Memory.NonHeap.init",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/nimbus/jvm/memory/nonheap/max": {
-            "metric": "Nimbus.JVM.Memory.NonHeap.max",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/nimbus/jvm/memory/nonheap/used": {
-            "metric": "Nimbus.JVM.Memory.NonHeap.used",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/nimbus/jvm/os/processcputime": {
-            "metric": "Nimbus.JVM.OS.ProcessCpuTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/nimbus/jvm/threading/daemonthreadcount": {
-            "metric": "Nimbus.JVM.Threading.DaemonThreadCount",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/nimbus/jvm/threading/threadcount": {
-            "metric": "Nimbus.JVM.Threading.ThreadCount",
-            "pointInTime": true,
-            "temporal": true
-          }
+        "metrics": {
+          "default": {
+            "metrics/boottime": {
+              "metric": "boottime",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/network/bytes_in": {
+              "metric": "bytes_in",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/network/bytes_out": {
+              "metric": "bytes_out",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/cpu/cpu_aidle": {
+              "metric": "cpu_aidle",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/load_fifteen": {
+              "metric": "load_fifteen",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/load_five": {
+              "metric": "load_five",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/load_one": {
+              "metric": "load_one",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/memory/mem_buffers": {
+              "metric": "mem_buffers",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/disk/part_max_used": {
+              "metric": "part_max_used",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/network/pkts_in": {
+              "metric": "pkts_in",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/network/pkts_out": {
+              "metric": "pkts_out",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/process/proc_run": {
+              "metric": "proc_run",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/process/proc_total": {
+              "metric": "proc_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/memory/swap_total": {
+              "metric": "swap_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/storm/nimbus/jvm/memory/heap/committed": {
+              "metric": "Nimbus.JVM.Memory.Heap.committed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/heap/init": {
+              "metric": "Nimbus.JVM.Memory.Heap.init",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/heap/max": {
+              "metric": "Nimbus.JVM.Memory.Heap.max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/heap/used": {
+              "metric": "Nimbus.JVM.Memory.Heap.used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/nonheap/committed": {
+              "metric": "Nimbus.JVM.Memory.NonHeap.committed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/nonheap/init": {
+              "metric": "Nimbus.JVM.Memory.NonHeap.init",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/nonheap/max": {
+              "metric": "Nimbus.JVM.Memory.NonHeap.max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/nonheap/used": {
+              "metric": "Nimbus.JVM.Memory.NonHeap.used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/os/processcputime": {
+              "metric": "Nimbus.JVM.OS.ProcessCpuTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/threading/daemonthreadcount": {
+              "metric": "Nimbus.JVM.Threading.DaemonThreadCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/threading/threadcount": {
+              "metric": "Nimbus.JVM.Threading.ThreadCount",
+              "pointInTime": true,
+              "temporal": true
+            }
 
-        }}
+          }
+        }
       }
     ]
   },
@@ -416,375 +436,393 @@
     "Component": [
       {
         "type": "ganglia",
-        "metrics": {"default":{
-          "metrics/boottime": {
-            "metric": "boottime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/bytes_in": {
-            "metric": "bytes_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/bytes_out": {
-            "metric": "bytes_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_aidle": {
-            "metric": "cpu_aidle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load_fifteen": {
-            "metric": "load_fifteen",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load_five": {
-            "metric": "load_five",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load_one": {
-            "metric": "load_one",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_buffers": {
-            "metric": "mem_buffers",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/disk/part_max_used": {
-            "metric": "part_max_used",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/pkts_in": {
-            "metric": "pkts_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/pkts_out": {
-            "metric": "pkts_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/process/proc_run": {
-            "metric": "proc_run",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/process/proc_total": {
-            "metric": "proc_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/swap_total": {
-            "metric": "swap_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/supervisor/jvm/memory/heap/committed": {
-            "metric": "Supervisor.JVM.Memory.Heap.committed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/supervisor/jvm/memory/heap/init": {
-            "metric": "Supervisor.JVM.Memory.Heap.init",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/supervisor/jvm/memory/heap/max": {
-            "metric": "Supervisor.JVM.Memory.Heap.max",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/supervisor/jvm/memory/heap/used": {
-            "metric": "Supervisor.JVM.Memory.Heap.used",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/supervisor/jvm/memory/nonheap/committed": {
-            "metric": "Supervisor.JVM.Memory.NonHeap.committed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/supervisor/jvm/memory/nonheap/init": {
-            "metric": "Supervisor.JVM.Memory.NonHeap.init",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/supervisor/jvm/memory/nonheap/max": {
-            "metric": "Supervisor.JVM.Memory.NonHeap.max",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/supervisor/jvm/memory/nonheap/used": {
-            "metric": "Supervisor.JVM.Memory.NonHeap.used",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/supervisor/jvm/os/processcputime": {
-            "metric": "Supervisor.JVM.OS.ProcessCpuTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/supervisor/jvm/threading/daemonthreadcount": {
-            "metric": "Supervisor.JVM.Threading.DaemonThreadCount",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/supervisor/jvm/threading/threadcount": {
-            "metric": "Supervisor.JVM.Threading.ThreadCount",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/worker/$1/jvm/memory/heap/committed": {
-            "metric": "Worker.(.+).JVM.Memory.Heap.committed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/worker/$1/jvm/memory/heap/init": {
-            "metric": "Worker.(.+).JVM.Memory.Heap.init",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/worker/$1/jvm/memory/heap/max": {
-            "metric": "Worker.(.+).JVM.Memory.Heap.max",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/worker/$1/jvm/memory/heap/used": {
-            "metric": "Worker.(.+).JVM.Memory.Heap.used",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/worker/$1/jvm/memory/nonheap/committed": {
-            "metric": "Worker.(.+).JVM.Memory.NonHeap.committed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/worker/$1/jvm/memory/nonheap/init": {
-            "metric": "Worker.(.+).JVM.Memory.NonHeap.init",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/worker/$1/jvm/memory/nonheap/max": {
-            "metric": "Worker.(.+).JVM.Memory.NonHeap.max",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/worker/$1/jvm/memory/nonheap/used": {
-            "metric": "Worker.(.+).JVM.Memory.NonHeap.used",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/worker/$1/jvm/os/processcputime": {
-            "metric": "Worker.(.+).JVM.OS.ProcessCpuTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/worker/$1/jvm/threading/daemonthreadcount": {
-            "metric": "Worker.(.+).JVM.Threading.DaemonThreadCount",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/worker/$1/jvm/threading/threadcount": {
-            "metric": "Worker.(.+).JVM.Threading.ThreadCount",
-            "pointInTime": true,
-            "temporal": true
+        "metrics": {
+          "default": {
+            "metrics/boottime": {
+              "metric": "boottime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/network/bytes_in": {
+              "metric": "bytes_in",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/network/bytes_out": {
+              "metric": "bytes_out",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/cpu/cpu_aidle": {
+              "metric": "cpu_aidle",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/load_fifteen": {
+              "metric": "load_fifteen",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/load_five": {
+              "metric": "load_five",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/load_one": {
+              "metric": "load_one",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/memory/mem_buffers": {
+              "metric": "mem_buffers",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/disk/part_max_used": {
+              "metric": "part_max_used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/network/pkts_in": {
+              "metric": "pkts_in",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/network/pkts_out": {
+              "metric": "pkts_out",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/process/proc_run": {
+              "metric": "proc_run",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/process/proc_total": {
+              "metric": "proc_total",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/memory/swap_total": {
+              "metric": "swap_total",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/heap/committed": {
+              "metric": "Supervisor.JVM.Memory.Heap.committed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/heap/init": {
+              "metric": "Supervisor.JVM.Memory.Heap.init",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/heap/max": {
+              "metric": "Supervisor.JVM.Memory.Heap.max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/heap/used": {
+              "metric": "Supervisor.JVM.Memory.Heap.used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/nonheap/committed": {
+              "metric": "Supervisor.JVM.Memory.NonHeap.committed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/nonheap/init": {
+              "metric": "Supervisor.JVM.Memory.NonHeap.init",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/nonheap/max": {
+              "metric": "Supervisor.JVM.Memory.NonHeap.max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/nonheap/used": {
+              "metric": "Supervisor.JVM.Memory.NonHeap.used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/os/processcputime": {
+              "metric": "Supervisor.JVM.OS.ProcessCpuTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/threading/daemonthreadcount": {
+              "metric": "Supervisor.JVM.Threading.DaemonThreadCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/threading/threadcount": {
+              "metric": "Supervisor.JVM.Threading.ThreadCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/heap/committed": {
+              "metric": "Worker.(.+).JVM.Memory.Heap.committed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/heap/init": {
+              "metric": "Worker.(.+).JVM.Memory.Heap.init",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/heap/max": {
+              "metric": "Worker.(.+).JVM.Memory.Heap.max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/heap/used": {
+              "metric": "Worker.(.+).JVM.Memory.Heap.used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/nonheap/committed": {
+              "metric": "Worker.(.+).JVM.Memory.NonHeap.committed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/nonheap/init": {
+              "metric": "Worker.(.+).JVM.Memory.NonHeap.init",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/nonheap/max": {
+              "metric": "Worker.(.+).JVM.Memory.NonHeap.max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/nonheap/used": {
+              "metric": "Worker.(.+).JVM.Memory.NonHeap.used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/os/processcputime": {
+              "metric": "Worker.(.+).JVM.OS.ProcessCpuTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/threading/daemonthreadcount": {
+              "metric": "Worker.(.+).JVM.Threading.DaemonThreadCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/threading/threadcount": {
+              "metric": "Worker.(.+).JVM.Threading.ThreadCount",
+              "pointInTime": true,
+              "temporal": true
+            }
           }
-        }}
+        }
       }
     ],
     "HostComponent": [
       {
         "type": "ganglia",
-        "metrics": {"default":{
-          "metrics/boottime": {
-            "metric": "boottime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/bytes_in": {
-            "metric": "bytes_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/bytes_out": {
-            "metric": "bytes_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_aidle": {
-            "metric": "cpu_aidle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load_fifteen": {
-            "metric": "load_fifteen",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load_five": {
-            "metric": "load_five",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load_one": {
-            "metric": "load_one",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_buffers": {
-            "metric": "mem_buffers",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/disk/part_max_used": {
-            "metric": "part_max_used",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/pkts_in": {
-            "metric": "pkts_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/pkts_out": {
-            "metric": "pkts_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/process/proc_run": {
-            "metric": "proc_run",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/process/proc_total": {
-            "metric": "proc_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/swap_total": {
-            "metric": "swap_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/supervisor/jvm/memory/heap/committed": {
-            "metric": "Supervisor.JVM.Memory.Heap.committed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/supervisor/jvm/memory/heap/init": {
-            "metric": "Supervisor.JVM.Memory.Heap.init",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/supervisor/jvm/memory/heap/max": {
-            "metric": "Supervisor.JVM.Memory.Heap.max",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/supervisor/jvm/memory/heap/used": {
-            "metric": "Supervisor.JVM.Memory.Heap.used",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/supervisor/jvm/memory/nonheap/committed": {
-            "metric": "Supervisor.JVM.Memory.NonHeap.committed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/supervisor/jvm/memory/nonheap/init": {
-            "metric": "Supervisor.JVM.Memory.NonHeap.init",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/supervisor/jvm/memory/nonheap/max": {
-            "metric": "Supervisor.JVM.Memory.NonHeap.max",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/supervisor/jvm/memory/nonheap/used": {
-            "metric": "Supervisor.JVM.Memory.NonHeap.used",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/supervisor/jvm/os/processcputime": {
-            "metric": "Supervisor.JVM.OS.ProcessCpuTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/supervisor/jvm/threading/daemonthreadcount": {
-            "metric": "Supervisor.JVM.Threading.DaemonThreadCount",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/supervisor/jvm/threading/threadcount": {
-            "metric": "Supervisor.JVM.Threading.ThreadCount",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/worker/$1/jvm/memory/heap/committed": {
-            "metric": "Worker.(.+).JVM.Memory.Heap.committed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/worker/$1/jvm/memory/heap/init": {
-            "metric": "Worker.(.+).JVM.Memory.Heap.init",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/worker/$1/jvm/memory/heap/max": {
-            "metric": "Worker.(.+).JVM.Memory.Heap.max",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/worker/$1/jvm/memory/heap/used": {
-            "metric": "Worker.(.+).JVM.Memory.Heap.used",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/worker/$1/jvm/memory/nonheap/committed": {
-            "metric": "Worker.(.+).JVM.Memory.NonHeap.committed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/worker/$1/jvm/memory/nonheap/init": {
-            "metric": "Worker.(.+).JVM.Memory.NonHeap.init",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/worker/$1/jvm/memory/nonheap/max": {
-            "metric": "Worker.(.+).JVM.Memory.NonHeap.max",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/worker/$1/jvm/memory/nonheap/used": {
-            "metric": "Worker.(.+).JVM.Memory.NonHeap.used",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/worker/$1/jvm/os/processcputime": {
-            "metric": "Worker.(.+).JVM.OS.ProcessCpuTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/worker/$1/jvm/threading/daemonthreadcount": {
-            "metric": "Worker.(.+).JVM.Threading.DaemonThreadCount",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/storm/worker/$1/jvm/threading/threadcount": {
-            "metric": "Worker.(.+).JVM.Threading.ThreadCount",
-            "pointInTime": true,
-            "temporal": true
+        "metrics": {
+          "default": {
+            "metrics/boottime": {
+              "metric": "boottime",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/network/bytes_in": {
+              "metric": "bytes_in",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/network/bytes_out": {
+              "metric": "bytes_out",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_aidle": {
+              "metric": "cpu_aidle",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/load_fifteen": {
+              "metric": "load_fifteen",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/load_five": {
+              "metric": "load_five",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/load_one": {
+              "metric": "load_one",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_buffers": {
+              "metric": "mem_buffers",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/part_max_used": {
+              "metric": "part_max_used",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/network/pkts_in": {
+              "metric": "pkts_in",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/network/pkts_out": {
+              "metric": "pkts_out",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/process/proc_run": {
+              "metric": "proc_run",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/process/proc_total": {
+              "metric": "proc_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/swap_total": {
+              "metric": "swap_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/storm/supervisor/jvm/memory/heap/committed": {
+              "metric": "Supervisor.JVM.Memory.Heap.committed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/heap/init": {
+              "metric": "Supervisor.JVM.Memory.Heap.init",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/heap/max": {
+              "metric": "Supervisor.JVM.Memory.Heap.max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/heap/used": {
+              "metric": "Supervisor.JVM.Memory.Heap.used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/nonheap/committed": {
+              "metric": "Supervisor.JVM.Memory.NonHeap.committed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/nonheap/init": {
+              "metric": "Supervisor.JVM.Memory.NonHeap.init",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/nonheap/max": {
+              "metric": "Supervisor.JVM.Memory.NonHeap.max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/nonheap/used": {
+              "metric": "Supervisor.JVM.Memory.NonHeap.used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/os/processcputime": {
+              "metric": "Supervisor.JVM.OS.ProcessCpuTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/threading/daemonthreadcount": {
+              "metric": "Supervisor.JVM.Threading.DaemonThreadCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/threading/threadcount": {
+              "metric": "Supervisor.JVM.Threading.ThreadCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/heap/committed": {
+              "metric": "Worker.(.+).JVM.Memory.Heap.committed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/heap/init": {
+              "metric": "Worker.(.+).JVM.Memory.Heap.init",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/heap/max": {
+              "metric": "Worker.(.+).JVM.Memory.Heap.max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/heap/used": {
+              "metric": "Worker.(.+).JVM.Memory.Heap.used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/nonheap/committed": {
+              "metric": "Worker.(.+).JVM.Memory.NonHeap.committed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/nonheap/init": {
+              "metric": "Worker.(.+).JVM.Memory.NonHeap.init",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/nonheap/max": {
+              "metric": "Worker.(.+).JVM.Memory.NonHeap.max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/nonheap/used": {
+              "metric": "Worker.(.+).JVM.Memory.NonHeap.used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/os/processcputime": {
+              "metric": "Worker.(.+).JVM.OS.ProcessCpuTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/threading/daemonthreadcount": {
+              "metric": "Worker.(.+).JVM.Threading.DaemonThreadCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/threading/threadcount": {
+              "metric": "Worker.(.+).JVM.Threading.ThreadCount",
+              "pointInTime": true,
+              "temporal": true
+            }
           }
-        }}
+        }
       }
     ]
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/23faabc4/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/metrics.json b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/metrics.json
index c719141..ce71a8c 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/metrics.json
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/metrics.json
@@ -5,6 +5,153 @@
         "type": "ganglia",
         "metrics": {
           "default": {
+            "metrics/cpu/cpu_idle":{
+              "metric":"cpu_idle",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/cpu/cpu_nice":{
+              "metric":"cpu_nice",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/cpu/cpu_system":{
+              "metric":"cpu_system",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/cpu/cpu_user":{
+              "metric":"cpu_user",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/cpu/cpu_wio":{
+              "metric":"cpu_wio",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/disk_free":{
+              "metric":"disk_free",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/disk_total":{
+              "metric":"disk_total",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/load/load_fifteen":{
+              "metric":"load_fifteen",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/load/load_five":{
+              "metric":"load_five",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/load/load_one":{
+              "metric":"load_one",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/mem_buffers":{
+              "metric":"mem_buffers",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/mem_cached":{
+              "metric":"mem_cached",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/mem_free":{
+              "metric":"mem_free",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/mem_shared":{
+              "metric":"mem_shared",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_total":{
+              "metric":"mem_total",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/swap_free":{
+              "metric":"swap_free",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/swap_total":{
+              "metric":"swap_total",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/network/bytes_in":{
+              "metric":"bytes_in",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/network/bytes_out":{
+              "metric":"bytes_out",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/network/pkts_in":{
+              "metric":"pkts_in",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/network/pkts_out":{
+              "metric":"pkts_out",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/process/proc_run":{
+              "metric":"proc_run",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/process/proc_total":{
+              "metric":"proc_total",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/read_count":{
+              "metric":"read_count",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/write_count":{
+              "metric":"write_count",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/read_bytes":{
+              "metric":"read_bytes",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/write_bytes":{
+              "metric":"write_bytes",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/read_time":{
+              "metric":"read_time",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/write_time":{
+              "metric":"write_time",
+              "pointInTime":true,
+              "temporal":true
+            },
             "metrics/jvm/memHeapCommittedM": {
               "metric": "jvm.JvmMetrics.MemHeapCommittedM",
               "pointInTime": false,
@@ -75,16 +222,6 @@
               "pointInTime": false,
               "temporal": true
             },
-            "metrics/process/proc_run": {
-              "metric": "proc_run",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/swap_total": {
-              "metric": "swap_total",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/rpc/ReceivedBytes": {
               "metric": "rpc.rpc.ReceivedBytes",
               "pointInTime": false,
@@ -100,11 +237,6 @@
               "pointInTime": false,
               "temporal": true
             },
-            "metrics/process/proc_total": {
-              "metric": "proc_total",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/yarn/AllocatedGB": {
               "metric": "yarn.NodeManagerMetrics.AllocatedGB",
               "pointInTime": true,
@@ -140,11 +272,6 @@
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/memory/mem_buffers": {
-              "metric": "mem_buffers",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/ugi/loginSuccess_num_ops": {
               "metric": "ugi.ugi.LoginSuccessNumOps",
               "pointInTime": false,
@@ -165,31 +292,16 @@
               "pointInTime": false,
               "temporal": true
             },
-            "metrics/network/bytes_out": {
-              "metric": "bytes_out",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/cpu/cpu_aidle": {
               "metric": "cpu_aidle",
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/network/bytes_in": {
-              "metric": "bytes_in",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/boottime": {
               "metric": "boottime",
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/network/pkts_out": {
-              "metric": "pkts_out",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/jvm/memNonHeapCommittedM": {
               "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
               "pointInTime": false,
@@ -279,11 +391,6 @@
               "metric": "rpc.metrics.RpcAuthenticationFailures",
               "pointInTime": false,
               "temporal": true
-            },
-            "metrics/network/pkts_in": {
-              "metric": "pkts_in",
-              "pointInTime": true,
-              "temporal": true
             }
           }
         }
@@ -293,6 +400,180 @@
       {
         "type": "ganglia",
         "metrics": {
+          "metrics/cpu/cpu_idle":{
+            "metric":"cpu_idle",
+            "pointInTime":true,
+            "temporal":true,
+            "amsHostMetric":true
+          },
+          "metrics/cpu/cpu_nice":{
+            "metric":"cpu_nice",
+            "pointInTime":true,
+            "temporal":true,
+            "amsHostMetric":true
+          },
+          "metrics/cpu/cpu_system":{
+            "metric":"cpu_system",
+            "pointInTime":true,
+            "temporal":true,
+            "amsHostMetric":true
+          },
+          "metrics/cpu/cpu_user":{
+            "metric":"cpu_user",
+            "pointInTime":true,
+            "temporal":true,
+            "amsHostMetric":true
+          },
+          "metrics/cpu/cpu_wio":{
+            "metric":"cpu_wio",
+            "pointInTime":true,
+            "temporal":true,
+            "amsHostMetric":true
+          },
+          "metrics/disk/disk_free":{
+            "metric":"disk_free",
+            "pointInTime":true,
+            "temporal":true,
+            "amsHostMetric":true
+          },
+          "metrics/disk/disk_total":{
+            "metric":"disk_total",
+            "pointInTime":true,
+            "temporal":true,
+            "amsHostMetric":true
+          },
+          "metrics/load/load_fifteen":{
+            "metric":"load_fifteen",
+            "pointInTime":true,
+            "temporal":true,
+            "amsHostMetric":true
+          },
+          "metrics/load/load_five":{
+            "metric":"load_five",
+            "pointInTime":true,
+            "temporal":true,
+            "amsHostMetric":true
+          },
+          "metrics/load/load_one":{
+            "metric":"load_one",
+            "pointInTime":true,
+            "temporal":true,
+            "amsHostMetric":true
+          },
+          "metrics/memory/mem_buffers":{
+            "metric":"mem_buffers",
+            "pointInTime":true,
+            "temporal":true,
+            "amsHostMetric":true
+          },
+          "metrics/memory/mem_cached":{
+            "metric":"mem_cached",
+            "pointInTime":true,
+            "temporal":true,
+            "amsHostMetric":true
+          },
+          "metrics/memory/mem_free":{
+            "metric":"mem_free",
+            "pointInTime":true,
+            "temporal":true,
+            "amsHostMetric":true
+          },
+          "metrics/memory/mem_shared":{
+            "metric":"mem_shared",
+            "pointInTime":true,
+            "temporal":true,
+            "amsHostMetric":true
+          },
+          "metrics/memory/mem_total":{
+            "metric":"mem_total",
+            "pointInTime":true,
+            "temporal":true,
+            "amsHostMetric":true
+          },
+          "metrics/memory/swap_free":{
+            "metric":"swap_free",
+            "pointInTime":true,
+            "temporal":true,
+            "amsHostMetric":true
+          },
+          "metrics/memory/swap_total":{
+            "metric":"swap_total",
+            "pointInTime":true,
+            "temporal":true,
+            "amsHostMetric":true
+          },
+          "metrics/network/bytes_in":{
+            "metric":"bytes_in",
+            "pointInTime":true,
+            "temporal":true,
+            "amsHostMetric":true
+          },
+          "metrics/network/bytes_out":{
+            "metric":"bytes_out",
+            "pointInTime":true,
+            "temporal":true,
+            "amsHostMetric":true
+          },
+          "metrics/network/pkts_in":{
+            "metric":"pkts_in",
+            "pointInTime":true,
+            "temporal":true,
+            "amsHostMetric":true
+          },
+          "metrics/network/pkts_out":{
+            "metric":"pkts_out",
+            "pointInTime":true,
+            "temporal":true,
+            "amsHostMetric":true
+          },
+          "metrics/process/proc_run":{
+            "metric":"proc_run",
+            "pointInTime":true,
+            "temporal":true,
+            "amsHostMetric":true
+          },
+          "metrics/process/proc_total":{
+            "metric":"proc_total",
+            "pointInTime":true,
+            "temporal":true,
+            "amsHostMetric":true
+          },
+          "metrics/disk/read_count":{
+            "metric":"read_count",
+            "pointInTime":true,
+            "temporal":true,
+            "amsHostMetric":true
+          },
+          "metrics/disk/write_count":{
+            "metric":"write_count",
+            "pointInTime":true,
+            "temporal":true,
+            "amsHostMetric":true
+          },
+          "metrics/disk/read_bytes":{
+            "metric":"read_bytes",
+            "pointInTime":true,
+            "temporal":true,
+            "amsHostMetric":true
+          },
+          "metrics/disk/write_bytes":{
+            "metric":"write_bytes",
+            "pointInTime":true,
+            "temporal":true,
+            "amsHostMetric":true
+          },
+          "metrics/disk/read_time":{
+            "metric":"read_time",
+            "pointInTime":true,
+            "temporal":true,
+            "amsHostMetric":true
+          },
+          "metrics/disk/write_time":{
+            "metric":"write_time",
+            "pointInTime":true,
+            "temporal":true,
+            "amsHostMetric":true
+          },
           "default": {
             "metrics/jvm/memHeapCommittedM": {
               "metric": "jvm.JvmMetrics.MemHeapCommittedM",

http://git-wip-us.apache.org/repos/asf/ambari/blob/23faabc4/ambari-server/src/main/resources/ganglia_properties.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/ganglia_properties.json b/ambari-server/src/main/resources/ganglia_properties.json
index 5984b73..05360da 100644
--- a/ambari-server/src/main/resources/ganglia_properties.json
+++ b/ambari-server/src/main/resources/ganglia_properties.json
@@ -112,28 +112,28 @@
         "pointInTime":true,
         "temporal":true
       },
-      "metrics/cpu/cpu_aidle":{
-        "metric":"cpu_aidle",
+      "metrics/cpu/cpu_num":{
+        "metric":"cpu_num",
         "pointInTime":true,
         "temporal":true
       },
-      "metrics/cpu/cpu_idle":{
-        "metric":"cpu_idle",
+      "metrics/cpu/cpu_speed":{
+        "metric":"cpu_speed",
         "pointInTime":true,
         "temporal":true
       },
-      "metrics/cpu/cpu_nice":{
-        "metric":"cpu_nice",
+      "metrics/cpu/cpu_aidle":{
+        "metric":"cpu_aidle",
         "pointInTime":true,
         "temporal":true
       },
-      "metrics/cpu/cpu_num":{
-        "metric":"cpu_num",
+      "metrics/cpu/cpu_idle":{
+        "metric":"cpu_idle",
         "pointInTime":true,
         "temporal":true
       },
-      "metrics/cpu/cpu_speed":{
-        "metric":"cpu_speed",
+      "metrics/cpu/cpu_nice":{
+        "metric":"cpu_nice",
         "pointInTime":true,
         "temporal":true
       },
@@ -162,173 +162,203 @@
         "pointInTime":true,
         "temporal":true
       },
-      "metrics/jvm/gcCount":{
-        "metric":"jvm.metrics.gcCount",
+      "metrics/load/load_fifteen":{
+        "metric":"load_fifteen",
         "pointInTime":true,
         "temporal":true
       },
-      "metrics/jvm/gcTimeMillis":{
-        "metric":"jvm.metrics.gcTimeMillis",
+      "metrics/load/load_five":{
+        "metric":"load_five",
         "pointInTime":true,
         "temporal":true
       },
-      "metrics/jvm/logError":{
-        "metric":"jvm.metrics.logError",
+      "metrics/load/load_one":{
+        "metric":"load_one",
         "pointInTime":true,
         "temporal":true
       },
-      "metrics/jvm/logFatal":{
-        "metric":"jvm.metrics.logFatal",
+      "metrics/memory/mem_buffers":{
+        "metric":"mem_buffers",
         "pointInTime":true,
         "temporal":true
       },
-      "metrics/jvm/logInfo":{
-        "metric":"jvm.metrics.logInfo",
+      "metrics/memory/mem_cached":{
+        "metric":"mem_cached",
         "pointInTime":true,
         "temporal":true
       },
-      "metrics/jvm/logWarn":{
-        "metric":"jvm.metrics.logWarn",
+      "metrics/memory/mem_free":{
+        "metric":"mem_free",
         "pointInTime":true,
         "temporal":true
       },
-      "metrics/jvm/maxMemoryM":{
-        "metric":"jvm.metrics.maxMemoryM",
+      "metrics/memory/mem_shared":{
+        "metric":"mem_shared",
         "pointInTime":true,
         "temporal":true
       },
-      "metrics/jvm/memHeapCommittedM":{
-        "metric":"jvm.metrics.memHeapCommittedM",
+      "metrics/memory/mem_total":{
+        "metric":"mem_total",
         "pointInTime":true,
         "temporal":true
       },
-      "metrics/jvm/memHeapUsedM":{
-        "metric":"jvm.metrics.memHeapUsedM",
+      "metrics/memory/swap_free":{
+        "metric":"swap_free",
         "pointInTime":true,
         "temporal":true
       },
-      "metrics/jvm/memNonHeapCommittedM":{
-        "metric":"jvm.metrics.memNonHeapCommittedM",
+      "metrics/memory/swap_total":{
+        "metric":"swap_total",
         "pointInTime":true,
         "temporal":true
       },
-      "metrics/jvm/memNonHeapUsedM":{
-        "metric":"jvm.metrics.memNonHeapUsedM",
+      "metrics/network/bytes_in":{
+        "metric":"bytes_in",
         "pointInTime":true,
         "temporal":true
       },
-      "metrics/jvm/threadsBlocked":{
-        "metric":"jvm.metrics.threadsBlocked",
+      "metrics/network/bytes_out":{
+        "metric":"bytes_out",
         "pointInTime":true,
         "temporal":true
       },
-      "metrics/jvm/threadsNew":{
-        "metric":"jvm.metrics.threadsNew",
+      "metrics/network/pkts_in":{
+        "metric":"pkts_in",
         "pointInTime":true,
         "temporal":true
       },
-      "metrics/jvm/threadsRunnable":{
-        "metric":"jvm.metrics.threadsRunnable",
+      "metrics/network/pkts_out":{
+        "metric":"pkts_out",
         "pointInTime":true,
         "temporal":true
       },
-      "metrics/jvm/threadsTerminated":{
-        "metric":"jvm.metrics.threadsTerminated",
+      "metrics/part_max_used":{
+        "metric":"part_max_used",
         "pointInTime":true,
         "temporal":true
       },
-      "metrics/jvm/threadsTimedWaiting":{
-        "metric":"jvm.metrics.threadsTimedWaiting",
+      "metrics/process/proc_run":{
+        "metric":"proc_run",
         "pointInTime":true,
         "temporal":true
       },
-      "metrics/jvm/threadsWaiting":{
-        "metric":"jvm.metrics.threadsWaiting",
+      "metrics/process/proc_total":{
+        "metric":"proc_total",
         "pointInTime":true,
         "temporal":true
       },
-      "metrics/load/load_fifteen":{
-        "metric":"load_fifteen",
+      "metrics/disk/read_count":{
+        "metric":"read_count",
         "pointInTime":true,
         "temporal":true
       },
-      "metrics/load/load_five":{
-        "metric":"load_five",
+      "metrics/disk/write_count":{
+        "metric":"write_count",
         "pointInTime":true,
         "temporal":true
       },
-      "metrics/load/load_one":{
-        "metric":"load_one",
+      "metrics/disk/read_bytes":{
+        "metric":"read_bytes",
         "pointInTime":true,
         "temporal":true
       },
-      "metrics/memory/mem_buffers":{
-        "metric":"mem_buffers",
+      "metrics/disk/write_bytes":{
+        "metric":"write_bytes",
         "pointInTime":true,
         "temporal":true
       },
-      "metrics/memory/mem_cached":{
-        "metric":"mem_cached",
+      "metrics/disk/read_time":{
+        "metric":"read_time",
         "pointInTime":true,
         "temporal":true
       },
-      "metrics/memory/mem_free":{
-        "metric":"mem_free",
+      "metrics/disk/write_time":{
+        "metric":"write_time",
         "pointInTime":true,
         "temporal":true
       },
-      "metrics/memory/mem_shared":{
-        "metric":"mem_shared",
+      "metrics/jvm/gcCount":{
+        "metric":"jvm.metrics.gcCount",
         "pointInTime":true,
         "temporal":true
       },
-      "metrics/memory/mem_total":{
-        "metric":"mem_total",
+      "metrics/jvm/gcTimeMillis":{
+        "metric":"jvm.metrics.gcTimeMillis",
         "pointInTime":true,
         "temporal":true
       },
-      "metrics/memory/swap_free":{
-        "metric":"swap_free",
+      "metrics/jvm/logError":{
+        "metric":"jvm.metrics.logError",
         "pointInTime":true,
         "temporal":true
       },
-      "metrics/memory/swap_total":{
-        "metric":"swap_total",
+      "metrics/jvm/logFatal":{
+        "metric":"jvm.metrics.logFatal",
         "pointInTime":true,
         "temporal":true
       },
-      "metrics/network/bytes_in":{
-        "metric":"bytes_in",
+      "metrics/jvm/logInfo":{
+        "metric":"jvm.metrics.logInfo",
         "pointInTime":true,
         "temporal":true
       },
-      "metrics/network/bytes_out":{
-        "metric":"bytes_out",
+      "metrics/jvm/logWarn":{
+        "metric":"jvm.metrics.logWarn",
         "pointInTime":true,
         "temporal":true
       },
-      "metrics/network/pkts_in":{
-        "metric":"pkts_in",
+      "metrics/jvm/maxMemoryM":{
+        "metric":"jvm.metrics.maxMemoryM",
         "pointInTime":true,
         "temporal":true
       },
-      "metrics/network/pkts_out":{
-        "metric":"pkts_out",
+      "metrics/jvm/memHeapCommittedM":{
+        "metric":"jvm.metrics.memHeapCommittedM",
         "pointInTime":true,
         "temporal":true
       },
-      "metrics/part_max_used":{
-        "metric":"part_max_used",
+      "metrics/jvm/memHeapUsedM":{
+        "metric":"jvm.metrics.memHeapUsedM",
         "pointInTime":true,
         "temporal":true
       },
-      "metrics/process/proc_run":{
-        "metric":"proc_run",
+      "metrics/jvm/memNonHeapCommittedM":{
+        "metric":"jvm.metrics.memNonHeapCommittedM",
         "pointInTime":true,
         "temporal":true
       },
-      "metrics/process/proc_total":{
-        "metric":"proc_total",
+      "metrics/jvm/memNonHeapUsedM":{
+        "metric":"jvm.metrics.memNonHeapUsedM",
+        "pointInTime":true,
+        "temporal":true
+      },
+      "metrics/jvm/threadsBlocked":{
+        "metric":"jvm.metrics.threadsBlocked",
+        "pointInTime":true,
+        "temporal":true
+      },
+      "metrics/jvm/threadsNew":{
+        "metric":"jvm.metrics.threadsNew",
+        "pointInTime":true,
+        "temporal":true
+      },
+      "metrics/jvm/threadsRunnable":{
+        "metric":"jvm.metrics.threadsRunnable",
+        "pointInTime":true,
+        "temporal":true
+      },
+      "metrics/jvm/threadsTerminated":{
+        "metric":"jvm.metrics.threadsTerminated",
+        "pointInTime":true,
+        "temporal":true
+      },
+      "metrics/jvm/threadsTimedWaiting":{
+        "metric":"jvm.metrics.threadsTimedWaiting",
+        "pointInTime":true,
+        "temporal":true
+      },
+      "metrics/jvm/threadsWaiting":{
+        "metric":"jvm.metrics.threadsWaiting",
         "pointInTime":true,
         "temporal":true
       },
@@ -2426,7 +2456,7 @@
         "temporal":true
       },
       "metrics/dfs/datanode/blocks_replicated":{
-        "metric":"dfs.datanode.blocks_replicated",
+        "metric":"dfs.datanode.BlocksReplicated",
         "pointInTime":true,
         "temporal":true
       },
@@ -11830,47 +11860,56 @@
       "metrics/boottime":{
         "metric":"boottime",
         "pointInTime":true,
-        "temporal":true
+        "temporal":true,
+        "amsHostMetric":true
       },
       "metrics/cpu/cpu_aidle":{
         "metric":"cpu_aidle",
         "pointInTime":true,
-        "temporal":true
+        "temporal":true,
+        "amsHostMetric":true
       },
       "metrics/cpu/cpu_idle":{
         "metric":"cpu_idle",
         "pointInTime":true,
-        "temporal":true
+        "temporal":true,
+        "amsHostMetric":true
       },
       "metrics/cpu/cpu_nice":{
         "metric":"cpu_nice",
         "pointInTime":true,
-        "temporal":true
+        "temporal":true,
+        "amsHostMetric":true
       },
       "metrics/cpu/cpu_num":{
         "metric":"cpu_num",
         "pointInTime":true,
-        "temporal":true
+        "temporal":true,
+        "amsHostMetric":true
       },
       "metrics/cpu/cpu_speed":{
         "metric":"cpu_speed",
         "pointInTime":true,
-        "temporal":true
+        "temporal":true,
+        "amsHostMetric":true
       },
       "metrics/cpu/cpu_system":{
         "metric":"cpu_system",
         "pointInTime":true,
-        "temporal":true
+        "temporal":true,
+        "amsHostMetric":true
       },
       "metrics/cpu/cpu_user":{
         "metric":"cpu_user",
         "pointInTime":true,
-        "temporal":true
+        "temporal":true,
+        "amsHostMetric":true
       },
       "metrics/cpu/cpu_wio":{
         "metric":"cpu_wio",
         "pointInTime":true,
-        "temporal":true
+        "temporal":true,
+        "amsHostMetric":true
       },
       "metrics/dfs/datanode/blockChecksumOp_avg_time":{
         "metric":"dfs.datanode.blockChecksumOp_avg_time",
@@ -11913,7 +11952,7 @@
         "temporal":true
       },
       "metrics/dfs/datanode/blocks_replicated":{
-        "metric":"dfs.datanode.blocks_replicated",
+        "metric":"dfs.datanode.BlocksReplicated",
         "pointInTime":true,
         "temporal":true
       },
@@ -12010,17 +12049,20 @@
       "metrics/disk/disk_free":{
         "metric":"disk_free",
         "pointInTime":true,
-        "temporal":true
+        "temporal":true,
+        "amsHostMetric":true
       },
       "metrics/disk/disk_total":{
         "metric":"disk_total",
         "pointInTime":true,
-        "temporal":true
+        "temporal":true,
+        "amsHostMetric":true
       },
       "metrics/disk/part_max_used":{
         "metric":"part_max_used",
         "pointInTime":true,
-        "temporal":true
+        "temporal":true,
+        "amsHostMetric":true
       },
       "metrics/jvm/gcCount":{
         "metric":"jvm.metrics.gcCount",
@@ -12110,17 +12152,20 @@
       "metrics/load/load_fifteen":{
         "metric":"load_fifteen",
         "pointInTime":true,
-        "temporal":true
+        "temporal":true,
+        "amsHostMetric":true
       },
       "metrics/load/load_five":{
         "metric":"load_five",
         "pointInTime":true,
-        "temporal":true
+        "temporal":true,
+        "amsHostMetric":true
       },
       "metrics/load/load_one":{
         "metric":"load_one",
         "pointInTime":true,
-        "temporal":true
+        "temporal":true,
+        "amsHostMetric":true
       },
       "metrics/memory/mem_buffers":{
         "metric":"mem_buffers",
@@ -12130,17 +12175,20 @@
       "metrics/memory/mem_cached":{
         "metric":"mem_cached",
         "pointInTime":true,
-        "temporal":true
+        "temporal":true,
+        "amsHostMetric":true
       },
       "metrics/memory/mem_free":{
         "metric":"mem_free",
         "pointInTime":true,
-        "temporal":true
+        "temporal":true,
+        "amsHostMetric":true
       },
       "metrics/memory/mem_shared":{
         "metric":"mem_shared",
         "pointInTime":true,
-        "temporal":true
+        "temporal":true,
+        "amsHostMetric":true
       },
       "metrics/memory/mem_total":{
         "metric":"mem_total",
@@ -12150,42 +12198,50 @@
       "metrics/memory/swap_free":{
         "metric":"swap_free",
         "pointInTime":true,
-        "temporal":true
+        "temporal":true,
+        "amsHostMetric":true
       },
       "metrics/memory/swap_total":{
         "metric":"swap_total",
         "pointInTime":true,
-        "temporal":true
+        "temporal":true,
+        "amsHostMetric":true
       },
       "metrics/network/bytes_in":{
         "metric":"bytes_in",
         "pointInTime":true,
-        "temporal":true
+        "temporal":true,
+        "amsHostMetric":true
       },
       "metrics/network/bytes_out":{
         "metric":"bytes_out",
         "pointInTime":true,
-        "temporal":true
+        "temporal":true,
+        "amsHostMetric":true
       },
       "metrics/network/pkts_in":{
         "metric":"pkts_in",
         "pointInTime":true,
-        "temporal":true
+        "temporal":true,
+        "amsHostMetric":true
       },
       "metrics/network/pkts_out":{
         "metric":"pkts_out",
         "pointInTime":true,
-        "temporal":true
+        "temporal":true,
+        "amsHostMetric":true
       },
       "metrics/process/proc_run":{
         "metric":"proc_run",
         "pointInTime":true,
-        "temporal":true
+        "temporal":true,
+        "amsHostMetric":true
       },
       "metrics/process/proc_total":{
         "metric":"proc_total",
         "pointInTime":true,
-        "temporal":true
+        "temporal":true,
+        "amsHostMetric":true
       },
       "metrics/rpc/NumOpenConnections":{
         "metric":"rpc.rpc.NumOpenConnections",

http://git-wip-us.apache.org/repos/asf/ambari/blob/23faabc4/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProviderTest.java
index b00ca4b..8e5d017 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProviderTest.java
@@ -42,6 +42,7 @@ import org.apache.ambari.server.controller.utilities.PredicateBuilder;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
+import org.apache.ambari.server.state.stack.Metric;
 import org.apache.ambari.server.state.stack.MetricDefinition;
 import org.apache.ambari.server.state.stack.WidgetLayout;
 import org.junit.After;
@@ -119,7 +120,12 @@ public class StackArtifactResourceProviderTest {
     Assert.assertEquals(1, ((ArrayList) descriptor.get("Component")).size());
     MetricDefinition md = (MetricDefinition) ((ArrayList) descriptor.get
       ("Component")).iterator().next();
+
+    Metric m1 = md.getMetrics().get("metrics/dfs/datanode/heartBeats_avg_time");
+    Metric m2 = md.getMetrics().get("metrics/rpc/closeRegion_num_ops");
     Assert.assertEquals(326, md.getMetrics().size());
+    Assert.assertTrue(m1.isAmsHostMetric());
+    Assert.assertFalse(m2.isAmsHostMetric());
 
     verify(managementController);
   }


[4/7] ambari git commit: AMBARI-10290. Expose avaialble host metrics across hostcomponents. (swagle)

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStoreTest.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStoreTest.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStoreTest.java
index e610403..8233b3f 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStoreTest.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStoreTest.java
@@ -17,14 +17,15 @@
  */
 package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
 
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.Function;
 import org.junit.Test;
 
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.Function.ReadFunction.AVG;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.Function.PostProcessingFunction.RATE;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.Function.ReadFunction.AVG;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.Function.PostProcessingFunction.RATE;
 import static org.assertj.core.api.Assertions.*;
 
 public class HBaseTimelineMetricStoreTest {

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITClusterAggregator.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITClusterAggregator.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITClusterAggregator.java
index f9dda46..da3d703b 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITClusterAggregator.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITClusterAggregator.java
@@ -19,27 +19,35 @@ package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline
 
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricClusterAggregate;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineClusterMetric;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineClusterMetricReader;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineMetricClusterAggregator;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineMetricClusterAggregatorHourly;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.Condition;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.DefaultCondition;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL;
 import org.junit.After;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
-
 import java.sql.Connection;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.Map;
-
 import static junit.framework.Assert.assertEquals;
+import static junit.framework.Assert.assertNotNull;
 import static junit.framework.Assert.fail;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.Condition;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.DefaultCondition;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.GET_CLUSTER_AGGREGATE_SQL;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.METRICS_CLUSTER_AGGREGATE_TABLE_NAME;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.NATIVE_TIME_RANGE_DELTA;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.MetricTestHelper.prepareSingleTimelineMetric;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.MetricTestHelper.createEmptyTimelineClusterMetric;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.MetricTestHelper.prepareSingleTimelineMetric;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.CLUSTER_AGGREGATOR_APP_IDS;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.GET_CLUSTER_AGGREGATE_SQL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.METRICS_CLUSTER_AGGREGATE_TABLE_NAME;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.NATIVE_TIME_RANGE_DELTA;
 
 public class ITClusterAggregator extends AbstractMiniHBaseClusterTest {
   private Connection conn;
@@ -126,8 +134,7 @@ public class ITClusterAggregator extends AbstractMiniHBaseClusterTest {
 
 
   @Test
-  public void testShouldAggregateClusterIgnoringInstance() throws
-    Exception {
+  public void testShouldAggregateClusterIgnoringInstance() throws Exception {
     // GIVEN
     TimelineMetricClusterAggregator agg =
       new TimelineMetricClusterAggregator(hdb, new Configuration());
@@ -370,6 +377,57 @@ public class ITClusterAggregator extends AbstractMiniHBaseClusterTest {
     assertEquals("Two hourly aggregated row expected ", 2, count);
   }
 
+  @Test
+  public void testAppLevelHostMetricAggregates() throws Exception {
+    Configuration conf = new Configuration();
+    conf.set(CLUSTER_AGGREGATOR_APP_IDS, "app1");
+    TimelineMetricClusterAggregator agg = new TimelineMetricClusterAggregator(hdb, conf);
+
+    long startTime = System.currentTimeMillis();
+    long ctime = startTime;
+    long minute = 60 * 1000;
+    hdb.insertMetricRecords(prepareSingleTimelineMetric((ctime), "local1",
+      "app1", null, "app_metric_random", 1));
+    ctime += 10;
+    hdb.insertMetricRecords(prepareSingleTimelineMetric(ctime, "local1",
+      "cpu_user", 1));
+    ctime += 10;
+    hdb.insertMetricRecords(prepareSingleTimelineMetric(ctime, "local2",
+      "cpu_user", 2));
+
+    // WHEN
+    long endTime = ctime + minute;
+    boolean success = agg.doWork(startTime, endTime);
+
+    //THEN
+    Condition condition = new DefaultCondition(
+      Collections.singletonList("cpu_user"), null, "app1", null,
+      startTime, endTime, null, null, true);
+    condition.setStatement(String.format(GET_CLUSTER_AGGREGATE_SQL,
+      PhoenixTransactSQL.getNaiveTimeRangeHint(startTime, NATIVE_TIME_RANGE_DELTA),
+      METRICS_CLUSTER_AGGREGATE_TABLE_NAME));
+
+    PreparedStatement pstmt = PhoenixTransactSQL.prepareGetMetricsSqlStmt
+      (conn, condition);
+    ResultSet rs = pstmt.executeQuery();
+
+    int recordCount = 0;
+    TimelineClusterMetric currentMetric = null;
+    MetricClusterAggregate currentHostAggregate = null;
+    while (rs.next()) {
+      currentMetric = metricReader.fromResultSet(rs);
+      currentHostAggregate = PhoenixHBaseAccessor.getMetricClusterAggregateFromResultSet(rs);
+      recordCount++;
+    }
+    Assert.assertEquals(4, recordCount);
+    assertNotNull(currentMetric);
+    assertEquals("cpu_user", currentMetric.getMetricName());
+    assertEquals("app1", currentMetric.getAppId());
+    assertNotNull(currentHostAggregate);
+    assertEquals(1, currentHostAggregate.getNumberOfHosts());
+    assertEquals(1.0d, currentHostAggregate.getSum());
+  }
+
   private ResultSet executeQuery(String query) throws SQLException {
     Connection conn = getConnection(getUrl());
     Statement stmt = conn.createStatement();

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITMetricAggregator.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITMetricAggregator.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITMetricAggregator.java
index 9f07fd7..66420d9 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITMetricAggregator.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITMetricAggregator.java
@@ -20,10 +20,15 @@ package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricHostAggregate;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineMetricAggregator;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineMetricAggregatorFactory;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.Condition;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.DefaultCondition;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
-
 import java.sql.Connection;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
@@ -32,16 +37,13 @@ import java.util.Arrays;
 import java.util.Comparator;
 import java.util.HashMap;
 import java.util.Map;
-
 import static junit.framework.Assert.assertEquals;
 import static junit.framework.Assert.assertTrue;
 import static junit.framework.Assert.fail;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.Condition;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.DefaultCondition;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.GET_METRIC_AGGREGATE_ONLY_SQL;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.METRICS_AGGREGATE_HOURLY_TABLE_NAME;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.METRICS_AGGREGATE_MINUTE_TABLE_NAME;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.NATIVE_TIME_RANGE_DELTA;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.GET_METRIC_AGGREGATE_ONLY_SQL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.METRICS_AGGREGATE_HOURLY_TABLE_NAME;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.METRICS_AGGREGATE_MINUTE_TABLE_NAME;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.NATIVE_TIME_RANGE_DELTA;
 import static org.assertj.core.api.Assertions.assertThat;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.MetricTestHelper.createEmptyTimelineMetric;
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITPhoenixHBaseAccessor.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITPhoenixHBaseAccessor.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITPhoenixHBaseAccessor.java
index e84476f..11c4d0f 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITPhoenixHBaseAccessor.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITPhoenixHBaseAccessor.java
@@ -20,6 +20,17 @@ package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.Function;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricClusterAggregate;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricHostAggregate;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineClusterMetric;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineMetricAggregator;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineMetricAggregatorFactory;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineMetricClusterAggregator;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineMetricClusterAggregatorHourly;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.Condition;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.DefaultCondition;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -36,7 +47,7 @@ import java.util.Map;
 
 import static junit.framework.Assert.assertEquals;
 import static junit.framework.Assert.assertTrue;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.METRICS_AGGREGATE_MINUTE_TABLE_NAME;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.METRICS_AGGREGATE_MINUTE_TABLE_NAME;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.MetricTestHelper.prepareSingleTimelineMetric;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.MetricTestHelper.createEmptyTimelineMetric;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.MetricTestHelper.createMetricHostAggregate;
@@ -90,7 +101,7 @@ public class ITPhoenixHBaseAccessor extends AbstractMiniHBaseClusterTest {
 
     // WHEN
     long endTime = ctime + minute;
-    PhoenixTransactSQL.Condition condition = new PhoenixTransactSQL.DefaultCondition(
+    Condition condition = new DefaultCondition(
         Collections.singletonList("disk_free"), "local1", null, null, startTime,
         endTime, Precision.SECONDS, null, true);
     TimelineMetrics timelineMetrics = hdb.getMetricRecords(condition,
@@ -125,7 +136,7 @@ public class ITPhoenixHBaseAccessor extends AbstractMiniHBaseClusterTest {
     assertTrue(success);
 
     // WHEN
-    PhoenixTransactSQL.Condition condition = new PhoenixTransactSQL.DefaultCondition(
+    Condition condition = new DefaultCondition(
         Collections.singletonList("disk_free"), "local1", null, null, startTime,
         endTime, Precision.MINUTES, null, false);
     TimelineMetrics timelineMetrics = hdb.getMetricRecords(condition,
@@ -176,7 +187,7 @@ public class ITPhoenixHBaseAccessor extends AbstractMiniHBaseClusterTest {
     assertTrue(success);
 
     // WHEN
-    PhoenixTransactSQL.Condition condition = new PhoenixTransactSQL.DefaultCondition(
+    Condition condition = new DefaultCondition(
         Collections.singletonList("disk_used"), "test_host", "test_app", null,
         startTime, endTime, Precision.HOURS, null, true);
     TimelineMetrics timelineMetrics = hdb.getMetricRecords(condition,
@@ -217,7 +228,7 @@ public class ITPhoenixHBaseAccessor extends AbstractMiniHBaseClusterTest {
     assertTrue(success);
 
     // WHEN
-    PhoenixTransactSQL.Condition condition = new PhoenixTransactSQL.DefaultCondition(
+    Condition condition = new DefaultCondition(
         Collections.singletonList("disk_free"), null, null, null,
         startTime, endTime, Precision.SECONDS, null, true);
     TimelineMetrics timelineMetrics = hdb.getAggregateMetricRecords(condition,
@@ -259,7 +270,7 @@ public class ITPhoenixHBaseAccessor extends AbstractMiniHBaseClusterTest {
     assertTrue(success);
 
     // WHEN
-    PhoenixTransactSQL.Condition condition = new PhoenixTransactSQL.DefaultCondition(
+    Condition condition = new DefaultCondition(
         Collections.singletonList("disk_used"), null, null, null,
         startTime, ctime + minute, Precision.HOURS, null, true);
     TimelineMetrics timelineMetrics = hdb.getAggregateMetricRecords(condition,

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/MetricTestHelper.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/MetricTestHelper.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/MetricTestHelper.java
index 54b511e..bdcd6df 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/MetricTestHelper.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/MetricTestHelper.java
@@ -19,6 +19,8 @@ package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline
 
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricHostAggregate;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineClusterMetric;
 
 import java.util.Arrays;
 import java.util.HashMap;
@@ -53,7 +55,20 @@ public class MetricTestHelper {
                                                         double val) {
     TimelineMetrics m = new TimelineMetrics();
     m.setMetrics(Arrays.asList(
-        createTimelineMetric(startTime, metricName, host, instanceId, val)));
+        createTimelineMetric(startTime, metricName, host, null, instanceId, val)));
+
+    return m;
+  }
+
+  public static TimelineMetrics prepareSingleTimelineMetric(long startTime,
+                                                            String host,
+                                                            String appId,
+                                                            String instanceId,
+                                                            String metricName,
+                                                            double val) {
+    TimelineMetrics m = new TimelineMetrics();
+    m.setMetrics(Arrays.asList(
+      createTimelineMetric(startTime, metricName, host, appId, instanceId, val)));
 
     return m;
   }
@@ -62,11 +77,12 @@ public class MetricTestHelper {
   public static TimelineMetric createTimelineMetric(long startTime,
                                                 String metricName,
                                                 String host,
+                                                String appId,
                                                 String instanceId,
                                                 double val) {
     TimelineMetric m = new TimelineMetric();
-    m.setAppId("host");
     m.setHostName(host);
+    m.setAppId(appId != null ? appId : "host");
     m.setInstanceId(instanceId);
     m.setMetricName(metricName);
     m.setStartTime(startTime);

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestMetricHostAggregate.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestMetricHostAggregate.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestMetricHostAggregate.java
index 5d8ba96..d6c1814 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestMetricHostAggregate.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestMetricHostAggregate.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics
   .timeline;
 
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricHostAggregate;
 import org.junit.Test;
 
 import static org.assertj.core.api.Assertions.assertThat;

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestPhoenixTransactSQL.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestPhoenixTransactSQL.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestPhoenixTransactSQL.java
index 038f3de..2aefce3 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestPhoenixTransactSQL.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestPhoenixTransactSQL.java
@@ -17,25 +17,22 @@
  */
 package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
 
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.Condition;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.DefaultCondition;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.SplitByMetricNamesCondition;
 import org.easymock.Capture;
 import org.junit.Assert;
 import org.junit.Test;
-
 import java.sql.Connection;
 import java.sql.PreparedStatement;
 import java.sql.SQLException;
 import java.util.Arrays;
 import java.util.Collections;
-
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.Condition;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.DefaultCondition;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.SplitByMetricNamesCondition;
-
 import static org.easymock.EasyMock.createNiceMock;
 import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.replay;
 import static org.easymock.EasyMock.verify;
-
 import org.easymock.EasyMock;
 
 public class TestPhoenixTransactSQL {

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
index fa85556..99760eb 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
@@ -248,4 +248,12 @@
       different mount point from the one for hbase.rootdir in embedded mode.
     </description>
   </property>
+  <property>
+    <name>timeline.metrics.service.cluster.aggregator.appIds</name>
+    <value>datanode,nodemanager,hbase,nimbus</value>
+    <description>
+      List of application ids to use for aggregating host level metrics for
+      an application. Example: bytes_read across Yarn Nodemanagers.
+    </description>
+  </property>
 </configuration>


[7/7] ambari git commit: AMBARI-10290. Expose avaialble host metrics across hostcomponents. (swagle)

Posted by sw...@apache.org.
AMBARI-10290. Expose avaialble host metrics across hostcomponents. (swagle)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b93452ed
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b93452ed
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b93452ed

Branch: refs/heads/trunk
Commit: b93452edab3d93a7217751192145eab3944876c1
Parents: 81f311b
Author: Siddharth Wagle <sw...@hortonworks.com>
Authored: Tue Mar 31 16:07:12 2015 -0700
Committer: Siddharth Wagle <sw...@hortonworks.com>
Committed: Thu Apr 2 14:55:29 2015 -0700

----------------------------------------------------------------------
 .../timeline/AbstractTimelineAggregator.java    | 271 ------
 .../metrics/timeline/AggregatorUtils.java       |  59 --
 .../metrics/timeline/ConnectionProvider.java    |  29 -
 .../timeline/DefaultPhoenixDataSource.java      |  77 --
 .../metrics/timeline/Function.java              | 169 ----
 .../timeline/HBaseTimelineMetricStore.java      |  10 +-
 .../metrics/timeline/MetricAggregate.java       | 110 ---
 .../timeline/MetricClusterAggregate.java        |  74 --
 .../metrics/timeline/MetricHostAggregate.java   |  81 --
 .../metrics/timeline/PhoenixHBaseAccessor.java  |  61 +-
 .../metrics/timeline/PhoenixTransactSQL.java    | 970 -------------------
 .../metrics/timeline/TimelineClusterMetric.java |  97 --
 .../timeline/TimelineClusterMetricReader.java   |  42 -
 .../timeline/TimelineMetricAggregator.java      | 145 ---
 .../TimelineMetricAggregatorFactory.java        |  99 --
 .../TimelineMetricClusterAggregator.java        | 223 -----
 .../TimelineMetricClusterAggregatorHourly.java  | 177 ----
 .../timeline/TimelineMetricConfiguration.java   |   6 +
 .../metrics/timeline/TimelineMetricReader.java  |  65 --
 .../aggregators/AbstractTimelineAggregator.java | 270 ++++++
 .../timeline/aggregators/AggregatorUtils.java   |  59 ++
 .../metrics/timeline/aggregators/Function.java  | 169 ++++
 .../timeline/aggregators/MetricAggregate.java   | 110 +++
 .../aggregators/MetricClusterAggregate.java     |  73 ++
 .../aggregators/MetricHostAggregate.java        |  81 ++
 .../aggregators/TimelineClusterMetric.java      |  97 ++
 .../TimelineClusterMetricReader.java            |  42 +
 .../aggregators/TimelineMetricAggregator.java   | 147 +++
 .../TimelineMetricAggregatorFactory.java        |  98 ++
 .../TimelineMetricAppAggregator.java            | 169 ++++
 .../TimelineMetricClusterAggregator.java        | 235 +++++
 .../TimelineMetricClusterAggregatorHourly.java  | 175 ++++
 .../aggregators/TimelineMetricReadHelper.java   |  66 ++
 .../metrics/timeline/query/Condition.java       |  46 +
 .../timeline/query/ConnectionProvider.java      |  29 +
 .../timeline/query/DefaultCondition.java        | 258 +++++
 .../query/DefaultPhoenixDataSource.java         |  77 ++
 .../timeline/query/PhoenixTransactSQL.java      | 573 +++++++++++
 .../query/SplitByMetricNamesCondition.java      | 165 ++++
 .../TestApplicationHistoryServer.java           |   3 +-
 .../timeline/AbstractMiniHBaseClusterTest.java  |   3 +-
 .../AbstractTimelineAggregatorTest.java         |   9 +-
 .../metrics/timeline/FunctionTest.java          |   7 +-
 .../timeline/HBaseTimelineMetricStoreTest.java  |   5 +-
 .../metrics/timeline/ITClusterAggregator.java   |  78 +-
 .../metrics/timeline/ITMetricAggregator.java    |  18 +-
 .../timeline/ITPhoenixHBaseAccessor.java        |  23 +-
 .../metrics/timeline/MetricTestHelper.java      |  20 +-
 .../timeline/TestMetricHostAggregate.java       |   1 +
 .../timeline/TestPhoenixTransactSQL.java        |  11 +-
 .../0.1.0/configuration/ams-site.xml            |   8 +
 51 files changed, 3130 insertions(+), 2760 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/AbstractTimelineAggregator.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/AbstractTimelineAggregator.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/AbstractTimelineAggregator.java
deleted file mode 100644
index 4af3db7..0000000
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/AbstractTimelineAggregator.java
+++ /dev/null
@@ -1,271 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.yarn.util.Clock;
-import org.apache.hadoop.yarn.util.SystemClock;
-
-import java.io.File;
-import java.io.IOException;
-import java.sql.Connection;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.util.Date;
-
-import static java.util.concurrent.TimeUnit.SECONDS;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.AGGREGATOR_CHECKPOINT_DELAY;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.RESULTSET_FETCH_SIZE;
-
-public abstract class AbstractTimelineAggregator implements Runnable {
-  protected final PhoenixHBaseAccessor hBaseAccessor;
-  private final Log LOG;
-
-  private Clock clock;
-  protected final long checkpointDelayMillis;
-  protected final Integer resultsetFetchSize;
-  protected Configuration metricsConf;
-
-  public AbstractTimelineAggregator(PhoenixHBaseAccessor hBaseAccessor,
-                                    Configuration metricsConf) {
-    this(hBaseAccessor, metricsConf, new SystemClock());
-  }
-
-  public AbstractTimelineAggregator(PhoenixHBaseAccessor hBaseAccessor,
-                                    Configuration metricsConf, Clock clk) {
-    this.hBaseAccessor = hBaseAccessor;
-    this.metricsConf = metricsConf;
-    this.checkpointDelayMillis = SECONDS.toMillis(
-      metricsConf.getInt(AGGREGATOR_CHECKPOINT_DELAY, 120));
-    this.resultsetFetchSize = metricsConf.getInt(RESULTSET_FETCH_SIZE, 2000);
-    this.LOG = LogFactory.getLog(this.getClass());
-    this.clock = clk;
-  }
-
-  @Override
-  public void run() {
-    LOG.info("Started Timeline aggregator thread @ " + new Date());
-    Long SLEEP_INTERVAL = getSleepIntervalMillis();
-
-    while (true) {
-      long sleepTime = runOnce(SLEEP_INTERVAL);
-
-      try {
-        Thread.sleep(sleepTime);
-      } catch (InterruptedException e) {
-        LOG.info("Sleep interrupted, continuing with aggregation.");
-      }
-    }
-  }
-
-  /**
-   * Access relaxed for tests
-   */
-  protected long runOnce(Long SLEEP_INTERVAL) {
-    long currentTime = clock.getTime();
-    long lastCheckPointTime = readLastCheckpointSavingOnFirstRun(currentTime);
-    long sleepTime = SLEEP_INTERVAL;
-
-    if (lastCheckPointTime != -1) {
-      LOG.info("Last check point time: " + lastCheckPointTime + ", lagBy: "
-        + ((clock.getTime() - lastCheckPointTime) / 1000)
-        + " seconds.");
-
-      long startTime = clock.getTime();
-      boolean success = doWork(lastCheckPointTime,
-        lastCheckPointTime + SLEEP_INTERVAL);
-      long executionTime = clock.getTime() - startTime;
-      long delta = SLEEP_INTERVAL - executionTime;
-
-      if (delta > 0) {
-        // Sleep for (configured sleep - time to execute task)
-        sleepTime = delta;
-      } else {
-        // No sleep because last run took too long to execute
-        LOG.info("Aggregator execution took too long, " +
-          "cancelling sleep. executionTime = " + executionTime);
-        sleepTime = 1;
-      }
-
-      LOG.debug("Aggregator sleep interval = " + sleepTime);
-
-      if (success) {
-        try {
-          // Comment to bug fix:
-          // cannot just save lastCheckPointTime + SLEEP_INTERVAL,
-          // it has to be verified so it is not a time in the future
-          // checkpoint says what was aggregated, and there is no way
-          // the future metrics were aggregated!
-          saveCheckPoint(Math.min(currentTime, lastCheckPointTime +
-            SLEEP_INTERVAL));
-        } catch (IOException io) {
-          LOG.warn("Error saving checkpoint, restarting aggregation at " +
-            "previous checkpoint.");
-        }
-      }
-    }
-
-    return sleepTime;
-  }
-
-  private long readLastCheckpointSavingOnFirstRun(long currentTime) {
-    long lastCheckPointTime = -1;
-
-    try {
-      lastCheckPointTime = readCheckPoint();
-      if (isLastCheckPointTooOld(lastCheckPointTime)) {
-        LOG.warn("Last Checkpoint is too old, discarding last checkpoint. " +
-          "lastCheckPointTime = " + lastCheckPointTime);
-        lastCheckPointTime = -1;
-      }
-      if (lastCheckPointTime == -1) {
-        // Assuming first run, save checkpoint and sleep.
-        // Set checkpoint to 2 minutes in the past to allow the
-        // agents/collectors to catch up
-        LOG.info("Saving checkpoint time on first run." +
-          (currentTime - checkpointDelayMillis));
-        saveCheckPoint(currentTime - checkpointDelayMillis);
-      }
-    } catch (IOException io) {
-      LOG.warn("Unable to write last checkpoint time. Resuming sleep.", io);
-    }
-    return lastCheckPointTime;
-  }
-
-  private boolean isLastCheckPointTooOld(long checkpoint) {
-    // first checkpoint is saved checkpointDelayMillis in the past,
-    // so here we also need to take it into account
-    return checkpoint != -1 &&
-      ((clock.getTime() - checkpoint - checkpointDelayMillis) >
-        getCheckpointCutOffIntervalMillis());
-  }
-
-  protected long readCheckPoint() {
-    try {
-      File checkpoint = new File(getCheckpointLocation());
-      if (checkpoint.exists()) {
-        String contents = FileUtils.readFileToString(checkpoint);
-        if (contents != null && !contents.isEmpty()) {
-          return Long.parseLong(contents);
-        }
-      }
-    } catch (IOException io) {
-      LOG.debug(io);
-    }
-    return -1;
-  }
-
-  protected void saveCheckPoint(long checkpointTime) throws IOException {
-    File checkpoint = new File(getCheckpointLocation());
-    if (!checkpoint.exists()) {
-      boolean done = checkpoint.createNewFile();
-      if (!done) {
-        throw new IOException("Could not create checkpoint at location, " +
-          getCheckpointLocation());
-      }
-    }
-    FileUtils.writeStringToFile(checkpoint, String.valueOf(checkpointTime));
-  }
-
-  /**
-   * Read metrics written during the time interval and save the sum and total
-   * in the aggregate table.
-   *
-   * @param startTime Sample start time
-   * @param endTime Sample end time
-   */
-  protected boolean doWork(long startTime, long endTime) {
-    LOG.info("Start aggregation cycle @ " + new Date() + ", " +
-      "startTime = " + new Date(startTime) + ", endTime = " + new Date(endTime));
-
-    boolean success = true;
-    PhoenixTransactSQL.Condition condition =
-      prepareMetricQueryCondition(startTime, endTime);
-
-    Connection conn = null;
-    PreparedStatement stmt = null;
-    ResultSet rs = null;
-
-    try {
-      conn = hBaseAccessor.getConnection();
-      // FLUME 2. aggregate and ignore the instance
-      stmt = PhoenixTransactSQL.prepareGetMetricsSqlStmt(conn, condition);
-
-      LOG.debug("Query issued @: " + new Date());
-      rs = stmt.executeQuery();
-      LOG.debug("Query returned @: " + new Date());
-
-      aggregate(rs, startTime, endTime);
-      LOG.info("End aggregation cycle @ " + new Date());
-
-    } catch (SQLException e) {
-      LOG.error("Exception during aggregating metrics.", e);
-      success = false;
-    } catch (IOException e) {
-      LOG.error("Exception during aggregating metrics.", e);
-      success = false;
-    } finally {
-      if (rs != null) {
-        try {
-          rs.close();
-        } catch (SQLException e) {
-          // Ignore
-        }
-      }
-      if (stmt != null) {
-        try {
-          stmt.close();
-        } catch (SQLException e) {
-          // Ignore
-        }
-      }
-      if (conn != null) {
-        try {
-          conn.close();
-        } catch (SQLException sql) {
-          // Ignore
-        }
-      }
-    }
-
-    LOG.info("End aggregation cycle @ " + new Date());
-    return success;
-  }
-
-  protected abstract PhoenixTransactSQL.Condition
-  prepareMetricQueryCondition(long startTime, long endTime);
-
-  protected abstract void aggregate(ResultSet rs, long startTime, long endTime)
-    throws IOException, SQLException;
-
-  protected abstract Long getSleepIntervalMillis();
-
-  protected abstract Integer getCheckpointCutOffMultiplier();
-
-  protected Long getCheckpointCutOffIntervalMillis() {
-    return getCheckpointCutOffMultiplier() * getSleepIntervalMillis();
-  }
-
-  protected abstract boolean isDisabled();
-
-  protected abstract String getCheckpointLocation();
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/AggregatorUtils.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/AggregatorUtils.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/AggregatorUtils.java
deleted file mode 100644
index fbea248..0000000
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/AggregatorUtils.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
-
-
-import java.util.Map;
-
-/**
- *
- */
-public class AggregatorUtils {
-
-  public static double[] calculateAggregates(Map<Long, Double> metricValues) {
-    double[] values = new double[4];
-    double max = Double.MIN_VALUE;
-    double min = Double.MAX_VALUE;
-    double sum = 0.0;
-    int metricCount = 0;
-
-    if (metricValues != null && !metricValues.isEmpty()) {
-      for (Double value : metricValues.values()) {
-        // TODO: Some nulls in data - need to investigate null values from host
-        if (value != null) {
-          if (value > max) {
-            max = value;
-          }
-          if (value < min) {
-            min = value;
-          }
-          sum += value;
-        }
-      }
-      metricCount = metricValues.values().size();
-    }
-    // BR: WHY ZERO is a good idea?
-    values[0] = sum;
-    values[1] = max != Double.MIN_VALUE ? max : 0.0;
-    values[2] = min != Double.MAX_VALUE ? min : 0.0;
-    values[3] = metricCount;
-
-    return values;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ConnectionProvider.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ConnectionProvider.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ConnectionProvider.java
deleted file mode 100644
index 34da78b..0000000
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ConnectionProvider.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
-
-
-import java.sql.Connection;
-import java.sql.SQLException;
-
-/**
- *
- */
-public interface ConnectionProvider {
-  public Connection getConnection() throws SQLException;
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/DefaultPhoenixDataSource.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/DefaultPhoenixDataSource.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/DefaultPhoenixDataSource.java
deleted file mode 100644
index 47db730..0000000
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/DefaultPhoenixDataSource.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
-
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.SQLException;
-
-public class DefaultPhoenixDataSource implements ConnectionProvider {
-
-  static final Log LOG = LogFactory.getLog(DefaultPhoenixDataSource.class);
-  private static final String ZOOKEEPER_CLIENT_PORT =
-    "hbase.zookeeper.property.clientPort";
-  private static final String ZOOKEEPER_QUORUM = "hbase.zookeeper.quorum";
-  private static final String ZNODE_PARENT = "zookeeper.znode.parent";
-
-  private static final String connectionUrl = "jdbc:phoenix:%s:%s:%s";
-  private final String url;
-
-  public DefaultPhoenixDataSource(Configuration hbaseConf) {
-    String zookeeperClientPort = hbaseConf.getTrimmed(ZOOKEEPER_CLIENT_PORT,
-      "2181");
-    String zookeeperQuorum = hbaseConf.getTrimmed(ZOOKEEPER_QUORUM);
-    String znodeParent = hbaseConf.getTrimmed(ZNODE_PARENT, "/hbase");
-    if (zookeeperQuorum == null || zookeeperQuorum.isEmpty()) {
-      throw new IllegalStateException("Unable to find Zookeeper quorum to " +
-        "access HBase store using Phoenix.");
-    }
-
-    url = String.format(connectionUrl,
-      zookeeperQuorum,
-      zookeeperClientPort,
-      znodeParent);
-  }
-
-  /**
-   * Get JDBC connection to HBase store. Assumption is that the hbase
-   * configuration is present on the classpath and loaded by the caller into
-   * the Configuration object.
-   * Phoenix already caches the HConnection between the client and HBase
-   * cluster.
-   *
-   * @return @java.sql.Connection
-   */
-  public Connection getConnection() throws SQLException {
-
-    LOG.debug("Metric store connection url: " + url);
-    try {
-      return DriverManager.getConnection(url);
-    } catch (SQLException e) {
-      LOG.warn("Unable to connect to HBase store using Phoenix.", e);
-
-      throw e;
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/Function.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/Function.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/Function.java
deleted file mode 100644
index 11245d8..0000000
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/Function.java
+++ /dev/null
@@ -1,169 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
-
-/**
- * Is used to determine metrics aggregate table.
- *
- * @see org.apache.hadoop.yarn.server.applicationhistoryservice.webapp.TimelineWebServices#getTimelineMetric
- * @see org.apache.hadoop.yarn.server.applicationhistoryservice.webapp.TimelineWebServices#getTimelineMetrics
- */
-public class Function {
-  public static Function DEFAULT_VALUE_FUNCTION =
-    new Function(ReadFunction.VALUE, null);
-  private static final String SUFFIX_SEPARATOR = "\\._";
-
-  private ReadFunction readFunction = ReadFunction.VALUE;
-  private PostProcessingFunction postProcessingFunction = null;
-
-  public Function(){
-
-  }
-
-  public Function(ReadFunction readFunction,
-                  PostProcessingFunction ppFunction){
-    if (readFunction!=null){
-      this.readFunction = readFunction ;
-    }
-    this.postProcessingFunction = ppFunction;
-  }
-
-  public static Function fromMetricName(String metricName){
-    // gets postprocessing, and aggregation function
-    // ex. Metric._rate._avg
-    String[] parts = metricName.split(SUFFIX_SEPARATOR);
-
-    ReadFunction readFunction = ReadFunction.VALUE;
-    PostProcessingFunction ppFunction = null;
-
-      if (parts.length == 3) {
-        ppFunction = PostProcessingFunction.getFunction(parts[1]);
-        readFunction = ReadFunction.getFunction(parts[2]);
-      } else if (parts.length == 2) {
-        ppFunction = null;
-        readFunction = ReadFunction.getFunction(parts[1]);
-      }
-
-
-    return new Function(readFunction, ppFunction);
-  }
-
-  public String getSuffix(){
-    return (postProcessingFunction == null)? readFunction.getSuffix() :
-      postProcessingFunction.getSuffix() + readFunction.getSuffix();
-  }
-
-  public ReadFunction getReadFunction() {
-    return readFunction;
-  }
-
-  @Override
-  public String toString() {
-    return "Function{" +
-      "readFunction=" + readFunction +
-      ", postProcessingFunction=" + postProcessingFunction +
-      '}';
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (!(o instanceof Function)) return false;
-
-    Function function = (Function) o;
-
-    return postProcessingFunction == function.postProcessingFunction
-      && readFunction == function.readFunction;
-
-  }
-
-  @Override
-  public int hashCode() {
-    int result = readFunction.hashCode();
-    result = 31 * result + (postProcessingFunction != null ?
-      postProcessingFunction.hashCode() : 0);
-    return result;
-  }
-
-  public enum PostProcessingFunction {
-    NONE(""),
-    RATE("._rate");
-
-    PostProcessingFunction(String suffix){
-      this.suffix = suffix;
-    }
-
-    private String suffix = "";
-
-    public String getSuffix(){
-      return suffix;
-    }
-
-    public static PostProcessingFunction getFunction(String functionName) throws
-      FunctionFormatException {
-      if (functionName == null) {
-        return NONE;
-      }
-
-      try {
-        return PostProcessingFunction.valueOf(functionName.toUpperCase());
-      } catch (IllegalArgumentException e) {
-        throw new FunctionFormatException("Function should be value, avg, min, " +
-          "max", e);
-      }
-    }
-  }
-
-  public enum ReadFunction {
-    VALUE(""),
-    AVG("._avg"),
-    MIN("._min"),
-    MAX("._max"),
-    SUM("._sum");
-
-    private final String suffix;
-
-    ReadFunction(String suffix){
-      this.suffix = suffix;
-    }
-
-    public String getSuffix() {
-      return suffix;
-    }
-
-    public static ReadFunction getFunction(String functionName) throws
-      FunctionFormatException {
-      if (functionName == null) {
-        return VALUE;
-      }
-      try {
-        return ReadFunction.valueOf(functionName.toUpperCase());
-      } catch (IllegalArgumentException e) {
-        throw new FunctionFormatException(
-          "Function should be value, avg, min, max. Got " + functionName, e);
-      }
-    }
-  }
-
-  public static class FunctionFormatException extends IllegalArgumentException {
-    public FunctionFormatException(String message, Throwable cause) {
-      super(message, cause);
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
index a4980b4..1fac404 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
@@ -25,6 +25,14 @@ import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.Function;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineMetricAggregator;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineMetricAggregatorFactory;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineMetricClusterAggregator;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineMetricClusterAggregatorHourly;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.Condition;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.DefaultCondition;
+
 import java.io.IOException;
 import java.sql.SQLException;
 import java.util.ArrayList;
@@ -33,8 +41,6 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.TreeMap;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.Condition;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.DefaultCondition;
 
 public class HBaseTimelineMetricStore extends AbstractService
     implements TimelineMetricStore {

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/MetricAggregate.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/MetricAggregate.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/MetricAggregate.java
deleted file mode 100644
index 61e15d7..0000000
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/MetricAggregate.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
-
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.codehaus.jackson.annotate.JsonProperty;
-import org.codehaus.jackson.annotate.JsonSubTypes;
-import org.codehaus.jackson.map.ObjectMapper;
-
-import java.io.IOException;
-
-/**
-*
-*/
-@JsonSubTypes({@JsonSubTypes.Type(value = MetricClusterAggregate.class),
-  @JsonSubTypes.Type(value = MetricHostAggregate.class)})
-@InterfaceAudience.Public
-@InterfaceStability.Unstable
-public class MetricAggregate {
-  private static final ObjectMapper mapper = new ObjectMapper();
-
-  protected Double sum = 0.0;
-  protected Double deviation;
-  protected Double max = Double.MIN_VALUE;
-  protected Double min = Double.MAX_VALUE;
-
-  public MetricAggregate() {
-  }
-
-  MetricAggregate(Double sum, Double deviation, Double max,
-                  Double min) {
-    this.sum = sum;
-    this.deviation = deviation;
-    this.max = max;
-    this.min = min;
-  }
-
-  void updateSum(Double sum) {
-    this.sum += sum;
-  }
-
-  void updateMax(Double max) {
-    if (max > this.max) {
-      this.max = max;
-    }
-  }
-
-  void updateMin(Double min) {
-    if (min < this.min) {
-      this.min = min;
-    }
-  }
-
-  @JsonProperty("sum")
-  Double getSum() {
-    return sum;
-  }
-
-  @JsonProperty("deviation")
-  Double getDeviation() {
-    return deviation;
-  }
-
-  @JsonProperty("max")
-  Double getMax() {
-    return max;
-  }
-
-  @JsonProperty("min")
-  Double getMin() {
-    return min;
-  }
-
-  public void setSum(Double sum) {
-    this.sum = sum;
-  }
-
-  public void setDeviation(Double deviation) {
-    this.deviation = deviation;
-  }
-
-  public void setMax(Double max) {
-    this.max = max;
-  }
-
-  public void setMin(Double min) {
-    this.min = min;
-  }
-
-  public String toJSON() throws IOException {
-    return mapper.writeValueAsString(this);
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/MetricClusterAggregate.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/MetricClusterAggregate.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/MetricClusterAggregate.java
deleted file mode 100644
index c13c85f..0000000
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/MetricClusterAggregate.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
-
-
-import org.codehaus.jackson.annotate.JsonCreator;
-import org.codehaus.jackson.annotate.JsonProperty;
-
-/**
-*
-*/
-public class MetricClusterAggregate extends MetricAggregate {
-  private int numberOfHosts;
-
-  @JsonCreator
-  public MetricClusterAggregate() {
-  }
-
-  MetricClusterAggregate(Double sum, int numberOfHosts, Double deviation,
-                         Double max, Double min) {
-    super(sum, deviation, max, min);
-    this.numberOfHosts = numberOfHosts;
-  }
-
-  @JsonProperty("numberOfHosts")
-  int getNumberOfHosts() {
-    return numberOfHosts;
-  }
-
-  void updateNumberOfHosts(int count) {
-    this.numberOfHosts += count;
-  }
-
-  public void setNumberOfHosts(int numberOfHosts) {
-    this.numberOfHosts = numberOfHosts;
-  }
-
-  /**
-   * Find and update min, max and avg for a minute
-   */
-  void updateAggregates(MetricClusterAggregate hostAggregate) {
-    updateMax(hostAggregate.getMax());
-    updateMin(hostAggregate.getMin());
-    updateSum(hostAggregate.getSum());
-    updateNumberOfHosts(hostAggregate.getNumberOfHosts());
-  }
-
-  @Override
-  public String toString() {
-//    MetricClusterAggregate
-    return "MetricAggregate{" +
-      "sum=" + sum +
-      ", numberOfHosts=" + numberOfHosts +
-      ", deviation=" + deviation +
-      ", max=" + max +
-      ", min=" + min +
-      '}';
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/MetricHostAggregate.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/MetricHostAggregate.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/MetricHostAggregate.java
deleted file mode 100644
index 02cc207..0000000
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/MetricHostAggregate.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
-
-
-import org.codehaus.jackson.annotate.JsonCreator;
-import org.codehaus.jackson.annotate.JsonProperty;
-
-/**
- * Represents a collection of minute based aggregation of values for
- * resolution greater than a minute.
- */
-public class MetricHostAggregate extends MetricAggregate {
-
-  private long numberOfSamples = 0;
-
-  @JsonCreator
-  public MetricHostAggregate() {
-    super(0.0, 0.0, Double.MIN_VALUE, Double.MAX_VALUE);
-  }
-
-  public MetricHostAggregate(Double sum, int numberOfSamples,
-                             Double deviation,
-                             Double max, Double min) {
-    super(sum, deviation, max, min);
-    this.numberOfSamples = numberOfSamples;
-  }
-
-  @JsonProperty("numberOfSamples")
-  long getNumberOfSamples() {
-    return numberOfSamples == 0 ? 1 : numberOfSamples;
-  }
-
-  void updateNumberOfSamples(long count) {
-    this.numberOfSamples += count;
-  }
-
-  public void setNumberOfSamples(long numberOfSamples) {
-    this.numberOfSamples = numberOfSamples;
-  }
-
-  public double getAvg() {
-    return sum / numberOfSamples;
-  }
-
-  /**
-   * Find and update min, max and avg for a minute
-   */
-  void updateAggregates(MetricHostAggregate hostAggregate) {
-    updateMax(hostAggregate.getMax());
-    updateMin(hostAggregate.getMin());
-    updateSum(hostAggregate.getSum());
-    updateNumberOfSamples(hostAggregate.getNumberOfSamples());
-  }
-
-  @Override
-  public String toString() {
-    return "MetricHostAggregate{" +
-      "sum=" + sum +
-      ", numberOfSamples=" + numberOfSamples +
-      ", deviation=" + deviation +
-      ", max=" + max +
-      ", min=" + min +
-      '}';
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java
index 6a38517..2e78912 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java
@@ -24,6 +24,17 @@ import org.apache.hadoop.hbase.util.RetryCounter;
 import org.apache.hadoop.hbase.util.RetryCounterFactory;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.AggregatorUtils;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.Function;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricClusterAggregate;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricHostAggregate;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineClusterMetric;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineMetricReadHelper;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.Condition;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.ConnectionProvider;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.DefaultPhoenixDataSource;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.SplitByMetricNamesCondition;
 import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.codehaus.jackson.map.ObjectMapper;
@@ -41,25 +52,6 @@ import java.util.Map;
 import java.util.TreeMap;
 import java.util.concurrent.TimeUnit;
 import static java.util.concurrent.TimeUnit.SECONDS;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.ALTER_SQL;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.CREATE_METRICS_AGGREGATE_HOURLY_TABLE_SQL;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.CREATE_METRICS_AGGREGATE_MINUTE_TABLE_SQL;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.CREATE_METRICS_CLUSTER_AGGREGATE_HOURLY_TABLE_SQL;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.CREATE_METRICS_CLUSTER_AGGREGATE_TABLE_SQL;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.CREATE_METRICS_TABLE_SQL;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.Condition;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.SplitByMetricNamesCondition;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.DEFAULT_ENCODING;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.DEFAULT_TABLE_COMPRESSION;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.METRICS_AGGREGATE_HOURLY_TABLE_NAME;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.METRICS_AGGREGATE_MINUTE_TABLE_NAME;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.METRICS_CLUSTER_AGGREGATE_HOURLY_TABLE_NAME;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.METRICS_CLUSTER_AGGREGATE_TABLE_NAME;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.METRICS_RECORD_TABLE_NAME;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.UPSERT_AGGREGATE_RECORD_SQL;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.UPSERT_CLUSTER_AGGREGATE_SQL;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.UPSERT_CLUSTER_AGGREGATE_TIME_SQL;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.UPSERT_METRICS_SQL;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.CLUSTER_HOUR_TABLE_TTL;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.CLUSTER_MINUTE_TABLE_TTL;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.GLOBAL_MAX_RETRIES;
@@ -70,6 +62,23 @@ import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.ti
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.HOST_HOUR_TABLE_TTL;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.HOST_MINUTE_TABLE_TTL;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.PRECISION_TABLE_TTL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.ALTER_SQL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.CREATE_METRICS_AGGREGATE_HOURLY_TABLE_SQL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.CREATE_METRICS_AGGREGATE_MINUTE_TABLE_SQL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.CREATE_METRICS_CLUSTER_AGGREGATE_HOURLY_TABLE_SQL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.CREATE_METRICS_CLUSTER_AGGREGATE_TABLE_SQL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.CREATE_METRICS_TABLE_SQL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.DEFAULT_ENCODING;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.DEFAULT_TABLE_COMPRESSION;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.METRICS_AGGREGATE_HOURLY_TABLE_NAME;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.METRICS_AGGREGATE_MINUTE_TABLE_NAME;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.METRICS_CLUSTER_AGGREGATE_HOURLY_TABLE_NAME;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.METRICS_CLUSTER_AGGREGATE_TABLE_NAME;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.METRICS_RECORD_TABLE_NAME;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.UPSERT_AGGREGATE_RECORD_SQL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.UPSERT_CLUSTER_AGGREGATE_SQL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.UPSERT_CLUSTER_AGGREGATE_TIME_SQL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.UPSERT_METRICS_SQL;
 
 /**
  * Provides a facade over the Phoenix API to access HBase schema
@@ -77,7 +86,7 @@ import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.ti
 public class PhoenixHBaseAccessor {
 
   private static final Log LOG = LogFactory.getLog(PhoenixHBaseAccessor.class);
-  private static final TimelineMetricReader timelineMetricReader = new TimelineMetricReader();
+  private static final TimelineMetricReadHelper TIMELINE_METRIC_READ_HELPER = new TimelineMetricReadHelper();
   private final Configuration hbaseConf;
   private final Configuration metricsConf;
   private final RetryCounterFactory retryCounterFactory;
@@ -151,14 +160,14 @@ public class PhoenixHBaseAccessor {
 
   private static TimelineMetric getLastTimelineMetricFromResultSet(ResultSet rs)
     throws SQLException, IOException {
-    TimelineMetric metric = timelineMetricReader
+    TimelineMetric metric = TIMELINE_METRIC_READ_HELPER
       .getTimelineMetricCommonsFromResultSet(rs);
     metric.setMetricValues(readLastMetricValueFromJSON(rs.getString("METRICS")));
 
     return metric;
   }
 
-  static TimelineMetric getAggregatedTimelineMetricFromResultSet(
+  public static TimelineMetric getAggregatedTimelineMetricFromResultSet(
     ResultSet rs, Function f) throws SQLException, IOException {
 
     TimelineMetric metric = new TimelineMetric();
@@ -214,7 +223,7 @@ public class PhoenixHBaseAccessor {
     return (Map<Long, Double>) mapper.readValue(json, metricValuesTypeRef);
   }
 
-  static TimelineMetric getTimelineMetricKeyFromResultSet(ResultSet rs)
+  public static TimelineMetric getTimelineMetricKeyFromResultSet(ResultSet rs)
     throws SQLException, IOException {
     TimelineMetric metric = new TimelineMetric();
     metric.setMetricName(rs.getString("METRIC_NAME"));
@@ -226,7 +235,7 @@ public class PhoenixHBaseAccessor {
     return metric;
   }
 
-  static MetricHostAggregate getMetricHostAggregateFromResultSet(ResultSet rs)
+  public static MetricHostAggregate getMetricHostAggregateFromResultSet(ResultSet rs)
     throws SQLException {
     MetricHostAggregate metricHostAggregate = new MetricHostAggregate();
     metricHostAggregate.setSum(rs.getDouble("METRIC_SUM"));
@@ -238,7 +247,7 @@ public class PhoenixHBaseAccessor {
     return metricHostAggregate;
   }
 
-  static MetricClusterAggregate getMetricClusterAggregateFromResultSet(ResultSet rs)
+  public static MetricClusterAggregate getMetricClusterAggregateFromResultSet(ResultSet rs)
     throws SQLException {
     MetricClusterAggregate agg = new MetricClusterAggregate();
     agg.setSum(rs.getDouble("METRIC_SUM"));
@@ -474,7 +483,7 @@ public class PhoenixHBaseAccessor {
     }
     else {
       TimelineMetric metric;
-      metric = timelineMetricReader.getTimelineMetricFromResultSet(rs);
+      metric = TIMELINE_METRIC_READ_HELPER.getTimelineMetricFromResultSet(rs);
 
       if (condition.isGrouped()) {
         metrics.addOrMergeTimelineMetric(metric);

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixTransactSQL.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixTransactSQL.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixTransactSQL.java
deleted file mode 100644
index 2cdefa9..0000000
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixTransactSQL.java
+++ /dev/null
@@ -1,970 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import java.sql.Connection;
-import java.sql.PreparedStatement;
-import java.sql.SQLException;
-import java.util.Collections;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
-
-/**
- * Encapsulate all metrics related SQL queries.
- */
-public class PhoenixTransactSQL {
-
-  static final Log LOG = LogFactory.getLog(PhoenixTransactSQL.class);
-  /**
-   * Create table to store individual metric records.
-   */
-  public static final String CREATE_METRICS_TABLE_SQL = "CREATE TABLE IF NOT " +
-    "EXISTS METRIC_RECORD (METRIC_NAME VARCHAR, " +
-    "HOSTNAME VARCHAR, " +
-    "SERVER_TIME UNSIGNED_LONG NOT NULL, " +
-    "APP_ID VARCHAR, " +
-    "INSTANCE_ID VARCHAR, " +
-    "START_TIME UNSIGNED_LONG, " +
-    "UNITS CHAR(20), " +
-    "METRIC_SUM DOUBLE, " +
-    "METRIC_COUNT UNSIGNED_INT, " +
-    "METRIC_MAX DOUBLE, " +
-    "METRIC_MIN DOUBLE, " +
-    "METRICS VARCHAR CONSTRAINT pk " +
-    "PRIMARY KEY (METRIC_NAME, HOSTNAME, SERVER_TIME, APP_ID, " +
-    "INSTANCE_ID)) DATA_BLOCK_ENCODING='%s', IMMUTABLE_ROWS=true, " +
-    "TTL=%s, COMPRESSION='%s'";
-
-  public static final String CREATE_METRICS_AGGREGATE_HOURLY_TABLE_SQL =
-    "CREATE TABLE IF NOT EXISTS METRIC_RECORD_HOURLY " +
-      "(METRIC_NAME VARCHAR, " +
-      "HOSTNAME VARCHAR, " +
-      "APP_ID VARCHAR, " +
-      "INSTANCE_ID VARCHAR, " +
-      "SERVER_TIME UNSIGNED_LONG NOT NULL, " +
-      "UNITS CHAR(20), " +
-      "METRIC_SUM DOUBLE," +
-      "METRIC_COUNT UNSIGNED_INT, " +
-      "METRIC_MAX DOUBLE," +
-      "METRIC_MIN DOUBLE CONSTRAINT pk " +
-      "PRIMARY KEY (METRIC_NAME, HOSTNAME, APP_ID, INSTANCE_ID, " +
-      "SERVER_TIME)) DATA_BLOCK_ENCODING='%s', IMMUTABLE_ROWS=true, " +
-      "TTL=%s, COMPRESSION='%s'";
-
-  public static final String CREATE_METRICS_AGGREGATE_MINUTE_TABLE_SQL =
-    "CREATE TABLE IF NOT EXISTS METRIC_RECORD_MINUTE " +
-      "(METRIC_NAME VARCHAR, " +
-      "HOSTNAME VARCHAR, " +
-      "APP_ID VARCHAR, " +
-      "INSTANCE_ID VARCHAR, " +
-      "SERVER_TIME UNSIGNED_LONG NOT NULL, " +
-      "UNITS CHAR(20), " +
-      "METRIC_SUM DOUBLE," +
-      "METRIC_COUNT UNSIGNED_INT, " +
-      "METRIC_MAX DOUBLE," +
-      "METRIC_MIN DOUBLE CONSTRAINT pk " +
-      "PRIMARY KEY (METRIC_NAME, HOSTNAME, APP_ID, INSTANCE_ID, " +
-      "SERVER_TIME)) DATA_BLOCK_ENCODING='%s', IMMUTABLE_ROWS=true, TTL=%s," +
-      " COMPRESSION='%s'";
-
-  public static final String CREATE_METRICS_CLUSTER_AGGREGATE_TABLE_SQL =
-    "CREATE TABLE IF NOT EXISTS METRIC_AGGREGATE " +
-      "(METRIC_NAME VARCHAR, " +
-      "APP_ID VARCHAR, " +
-      "INSTANCE_ID VARCHAR, " +
-      "SERVER_TIME UNSIGNED_LONG NOT NULL, " +
-      "UNITS CHAR(20), " +
-      "METRIC_SUM DOUBLE, " +
-      "HOSTS_COUNT UNSIGNED_INT, " +
-      "METRIC_MAX DOUBLE, " +
-      "METRIC_MIN DOUBLE " +
-      "CONSTRAINT pk PRIMARY KEY (METRIC_NAME, APP_ID, INSTANCE_ID, " +
-      "SERVER_TIME)) DATA_BLOCK_ENCODING='%s', IMMUTABLE_ROWS=true, " +
-      "TTL=%s, COMPRESSION='%s'";
-
-  public static final String CREATE_METRICS_CLUSTER_AGGREGATE_HOURLY_TABLE_SQL =
-    "CREATE TABLE IF NOT EXISTS METRIC_AGGREGATE_HOURLY " +
-      "(METRIC_NAME VARCHAR, " +
-      "APP_ID VARCHAR, " +
-      "INSTANCE_ID VARCHAR, " +
-      "SERVER_TIME UNSIGNED_LONG NOT NULL, " +
-      "UNITS CHAR(20), " +
-      "METRIC_SUM DOUBLE, " +
-      "METRIC_COUNT UNSIGNED_INT, " +
-      "METRIC_MAX DOUBLE, " +
-      "METRIC_MIN DOUBLE " +
-      "CONSTRAINT pk PRIMARY KEY (METRIC_NAME, APP_ID, INSTANCE_ID, " +
-      "SERVER_TIME)) DATA_BLOCK_ENCODING='%s', IMMUTABLE_ROWS=true, " +
-      "TTL=%s, COMPRESSION='%s'";
-
-  /**
-   * ALTER table to set new options
-   */
-  public static final String ALTER_SQL = "ALTER TABLE %s SET TTL=%s";
-
-  /**
-   * Insert into metric records table.
-   */
-  public static final String UPSERT_METRICS_SQL = "UPSERT INTO %s " +
-    "(METRIC_NAME, HOSTNAME, APP_ID, INSTANCE_ID, SERVER_TIME, START_TIME, " +
-    "UNITS, " +
-    "METRIC_SUM, " +
-    "METRIC_MAX, " +
-    "METRIC_MIN, " +
-    "METRIC_COUNT, " +
-    "METRICS) VALUES " +
-    "(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
-
-  public static final String UPSERT_CLUSTER_AGGREGATE_SQL = "UPSERT INTO " +
-    "METRIC_AGGREGATE (METRIC_NAME, APP_ID, INSTANCE_ID, SERVER_TIME, " +
-    "UNITS, " +
-    "METRIC_SUM, " +
-    "HOSTS_COUNT, " +
-    "METRIC_MAX, " +
-    "METRIC_MIN) " +
-    "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)";
-
-  public static final String UPSERT_CLUSTER_AGGREGATE_TIME_SQL = "UPSERT INTO" +
-    " %s (METRIC_NAME, APP_ID, INSTANCE_ID, SERVER_TIME, " +
-    "UNITS, " +
-    "METRIC_SUM, " +
-    "METRIC_COUNT, " +
-    "METRIC_MAX, " +
-    "METRIC_MIN) " +
-    "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)";
-
-
-  public static final String UPSERT_AGGREGATE_RECORD_SQL = "UPSERT INTO " +
-    "%s (METRIC_NAME, HOSTNAME, APP_ID, INSTANCE_ID, " +
-    "SERVER_TIME, " +
-    "UNITS, " +
-    "METRIC_SUM, " +
-    "METRIC_MAX, " +
-    "METRIC_MIN," +
-    "METRIC_COUNT) " +
-    "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
-
-  /**
-   * Retrieve a set of rows from metrics records table.
-   */
-  public static final String GET_METRIC_SQL = "SELECT %s METRIC_NAME, " +
-    "HOSTNAME, APP_ID, INSTANCE_ID, SERVER_TIME, START_TIME, UNITS, " +
-    "METRIC_SUM, " +
-    "METRIC_MAX, " +
-    "METRIC_MIN, " +
-    "METRIC_COUNT, " +
-    "METRICS " +
-    "FROM %s";
-
-  public static final String GET_METRIC_AGGREGATE_ONLY_SQL = "SELECT %s " +
-    "METRIC_NAME, HOSTNAME, APP_ID, INSTANCE_ID, SERVER_TIME, " +
-    "UNITS, " +
-    "METRIC_SUM, " +
-    "METRIC_MAX, " +
-    "METRIC_MIN, " +
-    "METRIC_COUNT " +
-    "FROM %s";
-
-  public static final String GET_CLUSTER_AGGREGATE_SQL = "SELECT %s " +
-    "METRIC_NAME, APP_ID, " +
-    "INSTANCE_ID, SERVER_TIME, " +
-    "UNITS, " +
-    "METRIC_SUM, " +
-    "HOSTS_COUNT, " +
-    "METRIC_MAX, " +
-    "METRIC_MIN " +
-    "FROM %s";
-
-  public static final String GET_CLUSTER_AGGREGATE_HOURLY_SQL = "SELECT %s " +
-      "METRIC_NAME, APP_ID, " +
-      "INSTANCE_ID, SERVER_TIME, " +
-      "UNITS, " +
-      "METRIC_SUM, " +
-      "METRIC_COUNT, " +
-      "METRIC_MAX, " +
-      "METRIC_MIN " +
-      "FROM %s";
-
-  public static final String METRICS_RECORD_TABLE_NAME = "METRIC_RECORD";
-  public static final String METRICS_AGGREGATE_MINUTE_TABLE_NAME =
-    "METRIC_RECORD_MINUTE";
-  public static final String METRICS_AGGREGATE_HOURLY_TABLE_NAME =
-    "METRIC_RECORD_HOURLY";
-  public static final String METRICS_CLUSTER_AGGREGATE_TABLE_NAME =
-    "METRIC_AGGREGATE";
-  public static final String METRICS_CLUSTER_AGGREGATE_HOURLY_TABLE_NAME =
-    "METRIC_AGGREGATE_HOURLY";
-  public static final String DEFAULT_TABLE_COMPRESSION = "SNAPPY";
-  public static final String DEFAULT_ENCODING = "FAST_DIFF";
-  public static final long NATIVE_TIME_RANGE_DELTA = 120000; // 2 minutes
-  public static final long HOUR = 3600000; // 1 hour
-  public static final long DAY = 86400000; // 1 day
-
-  /**
-   * Filter to optimize HBase scan by using file timestamps. This prevents
-   * a full table scan of metric records.
-   *
-   * @return Phoenix Hint String
-   */
-  public static String getNaiveTimeRangeHint(Long startTime, Long delta) {
-    return String.format("/*+ NATIVE_TIME_RANGE(%s) */", (startTime - delta));
-  }
-
-  public static PreparedStatement prepareGetMetricsSqlStmt(
-    Connection connection, Condition condition) throws SQLException {
-
-    validateConditionIsNotEmpty(condition);
-    validateRowCountLimit(condition);
-
-    String stmtStr;
-    if (condition.getStatement() != null) {
-      stmtStr = condition.getStatement();
-    } else {
-
-      String metricsTable;
-      String query;
-      if (condition.getPrecision() == null) {
-        long endTime = condition.getEndTime() == null ? System.currentTimeMillis() : condition.getEndTime();
-        long startTime = condition.getStartTime() == null ? 0 : condition.getStartTime();
-        Long timeRange = endTime - startTime;
-        if (timeRange > 5 * DAY) {
-          metricsTable = METRICS_AGGREGATE_HOURLY_TABLE_NAME;
-          query = GET_METRIC_AGGREGATE_ONLY_SQL;
-          condition.setPrecision(Precision.HOURS);
-        } else if (timeRange > 10 * HOUR) {
-          metricsTable = METRICS_AGGREGATE_MINUTE_TABLE_NAME;
-          query = GET_METRIC_AGGREGATE_ONLY_SQL;
-          condition.setPrecision(Precision.MINUTES);
-        } else {
-          metricsTable = METRICS_RECORD_TABLE_NAME;
-          query = GET_METRIC_SQL;
-          condition.setPrecision(Precision.SECONDS);
-        }
-      } else {
-        switch (condition.getPrecision()) {
-          case HOURS:
-            metricsTable = METRICS_AGGREGATE_HOURLY_TABLE_NAME;
-            query = GET_METRIC_AGGREGATE_ONLY_SQL;
-            break;
-          case MINUTES:
-            metricsTable = METRICS_AGGREGATE_MINUTE_TABLE_NAME;
-            query = GET_METRIC_AGGREGATE_ONLY_SQL;
-            break;
-          default:
-            metricsTable = METRICS_RECORD_TABLE_NAME;
-            query = GET_METRIC_SQL;
-        }
-      }
-
-      stmtStr = String.format(query,
-        getNaiveTimeRangeHint(condition.getStartTime(), NATIVE_TIME_RANGE_DELTA),
-        metricsTable);
-    }
-
-    StringBuilder sb = new StringBuilder(stmtStr);
-    sb.append(" WHERE ");
-    sb.append(condition.getConditionClause());
-    String orderByClause = condition.getOrderByClause(true);
-
-    if (orderByClause != null) {
-      sb.append(orderByClause);
-    } else {
-      sb.append(" ORDER BY METRIC_NAME, SERVER_TIME ");
-    }
-    if (condition.getLimit() != null) {
-      sb.append(" LIMIT ").append(condition.getLimit());
-    }
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("SQL: " + sb.toString() + ", condition: " + condition);
-    }
-    PreparedStatement stmt = connection.prepareStatement(sb.toString());
-    int pos = 1;
-    if (condition.getMetricNames() != null) {
-      for (; pos <= condition.getMetricNames().size(); pos++) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Setting pos: " + pos + ", value = " + condition.getMetricNames().get(pos - 1));
-        }
-        stmt.setString(pos, condition.getMetricNames().get(pos - 1));
-      }
-    }
-    if (condition.getHostname() != null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Setting pos: " + pos + ", value: " + condition.getHostname());
-      }
-      stmt.setString(pos++, condition.getHostname());
-    }
-    if (condition.getAppId() != null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Setting pos: " + pos + ", value: " + condition.getAppId());
-      }
-      stmt.setString(pos++, condition.getAppId());
-    }
-    if (condition.getInstanceId() != null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Setting pos: " + pos + ", value: " + condition.getInstanceId());
-      }
-      stmt.setString(pos++, condition.getInstanceId());
-    }
-    if (condition.getStartTime() != null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Setting pos: " + pos + ", value: " + condition.getStartTime());
-      }
-      stmt.setLong(pos++, condition.getStartTime());
-    }
-    if (condition.getEndTime() != null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Setting pos: " + pos + ", value: " + condition.getEndTime());
-      }
-      stmt.setLong(pos, condition.getEndTime());
-    }
-    if (condition.getFetchSize() != null) {
-      stmt.setFetchSize(condition.getFetchSize());
-    }
-
-    return stmt;
-  }
-
-  private static void validateConditionIsNotEmpty(Condition condition) {
-    if (condition.isEmpty()) {
-      throw new IllegalArgumentException("Condition is empty.");
-    }
-  }
-
-  private static void validateRowCountLimit(Condition condition) {
-    if (condition.getMetricNames() == null
-      || condition.getMetricNames().size() ==0 ) {
-      //aggregator can use empty metrics query
-      return;
-    }
-
-    long range = condition.getEndTime() - condition.getStartTime();
-    long rowsPerMetric = TimeUnit.MILLISECONDS.toHours(range) + 1;
-
-    Precision precision = condition.getPrecision();
-    // for minutes and seconds we can use the rowsPerMetric computed based on
-    // minutes
-    if (precision != null && precision == Precision.HOURS) {
-      rowsPerMetric = TimeUnit.MILLISECONDS.toHours(range) + 1;
-    }
-
-    long totalRowsRequested = rowsPerMetric * condition.getMetricNames().size();
-    if (totalRowsRequested > PhoenixHBaseAccessor.RESULTSET_LIMIT) {
-      throw new IllegalArgumentException("The time range query for " +
-        "precision table exceeds row count limit, please query aggregate " +
-        "table instead.");
-    }
-  }
-
-  public static PreparedStatement prepareGetLatestMetricSqlStmt(
-    Connection connection, Condition condition) throws SQLException {
-
-    validateConditionIsNotEmpty(condition);
-
-    if (condition.getMetricNames() == null
-      || condition.getMetricNames().size() == 0) {
-      throw new IllegalArgumentException("Point in time query without " +
-        "metric names not supported ");
-    }
-
-    String stmtStr;
-    if (condition.getStatement() != null) {
-      stmtStr = condition.getStatement();
-    } else {
-      stmtStr = String.format(GET_METRIC_SQL,
-        "",
-        METRICS_RECORD_TABLE_NAME);
-    }
-
-    StringBuilder sb = new StringBuilder(stmtStr);
-    sb.append(" WHERE ");
-    sb.append(condition.getConditionClause());
-    String orderByClause = condition.getOrderByClause(false);
-    if (orderByClause != null) {
-      sb.append(orderByClause);
-    } else {
-      sb.append(" ORDER BY METRIC_NAME DESC, HOSTNAME DESC, SERVER_TIME DESC ");
-    }
-
-    sb.append(" LIMIT ").append(condition.getMetricNames().size());
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("SQL: " + sb.toString() + ", condition: " + condition);
-    }
-    PreparedStatement stmt = connection.prepareStatement(sb.toString());
-    int pos = 1;
-    if (condition.getMetricNames() != null) {
-      //IGNORE condition limit, set one based on number of metric names
-      for (; pos <= condition.getMetricNames().size(); pos++) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Setting pos: " + pos + ", value = " + condition.getMetricNames().get(pos - 1));
-        }
-        stmt.setString(pos, condition.getMetricNames().get(pos - 1));
-      }
-    }
-    if (condition.getHostname() != null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Setting pos: " + pos + ", value: " + condition.getHostname());
-      }
-      stmt.setString(pos++, condition.getHostname());
-    }
-    if (condition.getAppId() != null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Setting pos: " + pos + ", value: " + condition.getAppId());
-      }
-      stmt.setString(pos++, condition.getAppId());
-    }
-    if (condition.getInstanceId() != null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Setting pos: " + pos + ", value: " + condition.getInstanceId());
-      }
-      stmt.setString(pos++, condition.getInstanceId());
-    }
-
-    if (condition.getFetchSize() != null) {
-      stmt.setFetchSize(condition.getFetchSize());
-    }
-
-    return stmt;
-  }
-
-  public static PreparedStatement prepareGetAggregateSqlStmt(
-    Connection connection, Condition condition) throws SQLException {
-
-    validateConditionIsNotEmpty(condition);
-
-    String metricsAggregateTable;
-    String queryStmt;
-    if (condition.getPrecision() == null) {
-      long endTime = condition.getEndTime() == null ? System.currentTimeMillis() : condition.getEndTime();
-      long startTime = condition.getStartTime() == null ? 0 : condition.getStartTime();
-      Long timeRange = endTime - startTime;
-      if (timeRange > 5 * DAY) {
-        metricsAggregateTable = METRICS_CLUSTER_AGGREGATE_HOURLY_TABLE_NAME;
-        queryStmt = GET_CLUSTER_AGGREGATE_HOURLY_SQL;
-        condition.setPrecision(Precision.HOURS);
-      } else {
-        metricsAggregateTable = METRICS_CLUSTER_AGGREGATE_TABLE_NAME;
-        queryStmt = GET_CLUSTER_AGGREGATE_SQL;
-        condition.setPrecision(Precision.SECONDS);
-      }
-    } else {
-      switch (condition.getPrecision()) {
-        case HOURS:
-          metricsAggregateTable = METRICS_CLUSTER_AGGREGATE_HOURLY_TABLE_NAME;
-          queryStmt = GET_CLUSTER_AGGREGATE_HOURLY_SQL;
-          break;
-        default:
-          metricsAggregateTable = METRICS_CLUSTER_AGGREGATE_TABLE_NAME;
-          queryStmt = GET_CLUSTER_AGGREGATE_SQL;
-      }
-    }
-
-    StringBuilder sb = new StringBuilder(queryStmt);
-    sb.append(" WHERE ");
-    sb.append(condition.getConditionClause());
-    sb.append(" ORDER BY METRIC_NAME, SERVER_TIME");
-    if (condition.getLimit() != null) {
-      sb.append(" LIMIT ").append(condition.getLimit());
-    }
-
-    String query = String.format(sb.toString(),
-      PhoenixTransactSQL.getNaiveTimeRangeHint(condition.getStartTime(),
-        NATIVE_TIME_RANGE_DELTA), metricsAggregateTable);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("SQL => " + query + ", condition => " + condition);
-    }
-    PreparedStatement stmt = connection.prepareStatement(query);
-    int pos = 1;
-    if (condition.getMetricNames() != null) {
-      for (; pos <= condition.getMetricNames().size(); pos++) {
-        stmt.setString(pos, condition.getMetricNames().get(pos - 1));
-      }
-    }
-    // TODO: Upper case all strings on POST
-    if (condition.getAppId() != null) {
-      stmt.setString(pos++, condition.getAppId());
-    }
-    if (condition.getInstanceId() != null) {
-      stmt.setString(pos++, condition.getInstanceId());
-    }
-    if (condition.getStartTime() != null) {
-      stmt.setLong(pos++, condition.getStartTime());
-    }
-    if (condition.getEndTime() != null) {
-      stmt.setLong(pos, condition.getEndTime());
-    }
-
-    return stmt;
-  }
-
-  public static PreparedStatement prepareGetLatestAggregateMetricSqlStmt(
-    Connection connection, Condition condition) throws SQLException {
-
-    validateConditionIsNotEmpty(condition);
-
-    String stmtStr;
-    if (condition.getStatement() != null) {
-      stmtStr = condition.getStatement();
-    } else {
-      stmtStr = String.format(GET_CLUSTER_AGGREGATE_SQL, "",
-          METRICS_CLUSTER_AGGREGATE_TABLE_NAME);
-    }
-
-    StringBuilder sb = new StringBuilder(stmtStr);
-    sb.append(" WHERE ");
-    sb.append(condition.getConditionClause());
-    String orderByClause = condition.getOrderByClause(false);
-    if (orderByClause != null) {
-      sb.append(orderByClause);
-    } else {
-      sb.append(" ORDER BY METRIC_NAME DESC, SERVER_TIME DESC  ");
-    }
-
-    sb.append(" LIMIT ").append(condition.getMetricNames().size());
-
-    String query = sb.toString();
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("SQL: " + query + ", condition: " + condition);
-    }
-
-    PreparedStatement stmt = connection.prepareStatement(query);
-    int pos = 1;
-    if (condition.getMetricNames() != null) {
-      for (; pos <= condition.getMetricNames().size(); pos++) {
-        stmt.setString(pos, condition.getMetricNames().get(pos - 1));
-      }
-    }
-    if (condition.getAppId() != null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Setting pos: " + pos + ", value: " + condition.getAppId());
-      }
-      stmt.setString(pos++, condition.getAppId());
-    }
-    if (condition.getInstanceId() != null) {
-      stmt.setString(pos++, condition.getInstanceId());
-    }
-
-    return stmt;
-  }
-
-  static interface Condition {
-
-    boolean isEmpty();
-
-    List<String> getMetricNames();
-    boolean isPointInTime();
-    boolean isGrouped();
-    void setStatement(String statement);
-    String getHostname();
-    Precision getPrecision();
-    void setPrecision(Precision precision);
-    String getAppId();
-    String getInstanceId();
-    StringBuilder getConditionClause();
-    String getOrderByClause(boolean asc);
-    String getStatement();
-    Long getStartTime();
-    Long getEndTime();
-    Integer getLimit();
-    Integer getFetchSize();
-    void setFetchSize(Integer fetchSize);
-    void addOrderByColumn(String column);
-    void setNoLimit();
-  }
-
-  static class DefaultCondition implements Condition {
-    List<String> metricNames;
-    String hostname;
-    String appId;
-    String instanceId;
-    Long startTime;
-    Long endTime;
-    Precision precision;
-    Integer limit;
-    boolean grouped;
-    boolean noLimit = false;
-    Integer fetchSize;
-    String statement;
-    Set<String> orderByColumns = new LinkedHashSet<String>();
-
-    DefaultCondition(List<String> metricNames, String hostname, String appId,
-              String instanceId, Long startTime, Long endTime, Precision precision,
-              Integer limit, boolean grouped) {
-      this.metricNames = metricNames;
-      this.hostname = hostname;
-      this.appId = appId;
-      this.instanceId = instanceId;
-      this.startTime = startTime;
-      this.endTime = endTime;
-      this.precision = precision;
-      this.limit = limit;
-      this.grouped = grouped;
-    }
-
-    public String getStatement() {
-      return statement;
-    }
-
-    public void setStatement(String statement) {
-      this.statement = statement;
-    }
-
-    public List<String> getMetricNames() {
-      return metricNames == null || metricNames.isEmpty() ? null : metricNames;
-    }
-
-    public StringBuilder getConditionClause() {
-      StringBuilder sb = new StringBuilder();
-      boolean appendConjunction = false;
-      StringBuilder metricsLike = new StringBuilder();
-      StringBuilder metricsIn = new StringBuilder();
-
-      if (getMetricNames() != null) {
-        for (String name : getMetricNames()) {
-          if (name.contains("%")) {
-            if (metricsLike.length() > 1) {
-              metricsLike.append(" OR ");
-            }
-            metricsLike.append("METRIC_NAME LIKE ?");
-          } else {
-            if (metricsIn.length() > 0) {
-              metricsIn.append(", ");
-            }
-            metricsIn.append("?");
-          }
-        }
-
-        if (metricsIn.length()>0) {
-          sb.append("(METRIC_NAME IN (");
-          sb.append(metricsIn);
-          sb.append(")");
-          appendConjunction = true;
-        }
-
-        if (metricsLike.length() > 0) {
-          if (appendConjunction) {
-            sb.append(" OR ");
-          } else {
-            sb.append("(");
-          }
-          sb.append(metricsLike);
-          appendConjunction = true;
-        }
-
-        if (appendConjunction) {
-          sb.append(")");
-        }
-      }
-
-      appendConjunction = append(sb, appendConjunction, getHostname(), " HOSTNAME = ?");
-      appendConjunction = append(sb, appendConjunction, getAppId(), " APP_ID = ?");
-      appendConjunction = append(sb, appendConjunction, getInstanceId(), " INSTANCE_ID = ?");
-      appendConjunction = append(sb, appendConjunction, getStartTime(), " SERVER_TIME >= ?");
-      append(sb, appendConjunction, getEndTime(), " SERVER_TIME < ?");
-
-      return sb;
-    }
-
-    protected static boolean append(StringBuilder sb,
-                                     boolean appendConjunction,
-                             Object value, String str) {
-      if (value != null) {
-        if (appendConjunction) {
-          sb.append(" AND");
-        }
-
-        sb.append(str);
-        appendConjunction = true;
-      }
-      return appendConjunction;
-    }
-
-    public String getHostname() {
-      return hostname == null || hostname.isEmpty() ? null : hostname;
-    }
-
-    public Precision getPrecision() {
-      return precision;
-    }
-
-    public void setPrecision(Precision precision) {
-      this.precision = precision;
-    }
-
-    public String getAppId() {
-      if (appId != null && !appId.isEmpty()) {
-        if (!(appId.equals("HOST") || appId.equals("FLUME_HANDLER")) ) {
-          return appId.toLowerCase();
-        } else {
-          return appId;
-        }
-      }
-      return null;
-    }
-
-    public String getInstanceId() {
-      return instanceId == null || instanceId.isEmpty() ? null : instanceId;
-    }
-
-    /**
-     * Convert to millis.
-     */
-    public Long getStartTime() {
-      if (startTime == null) {
-        return null;
-      } else if (startTime < 9999999999l) {
-        return startTime * 1000;
-      } else {
-        return startTime;
-      }
-    }
-
-    public Long getEndTime() {
-      if (endTime == null) {
-        return null;
-      }
-      if (endTime < 9999999999l) {
-        return endTime * 1000;
-      } else {
-        return endTime;
-      }
-    }
-
-    public void setNoLimit() {
-      this.noLimit = true;
-    }
-
-    public Integer getLimit() {
-      if (noLimit) {
-        return null;
-      }
-      return limit == null ? PhoenixHBaseAccessor.RESULTSET_LIMIT : limit;
-    }
-
-    public boolean isGrouped() {
-      return grouped;
-    }
-
-    public boolean isPointInTime() {
-      return getStartTime() == null && getEndTime() == null;
-    }
-
-    public boolean isEmpty() {
-      return (metricNames == null || metricNames.isEmpty())
-        && (hostname == null || hostname.isEmpty())
-        && (appId == null || appId.isEmpty())
-        && (instanceId == null || instanceId.isEmpty())
-        && startTime == null
-        && endTime == null;
-    }
-
-    public Integer getFetchSize() {
-      return fetchSize;
-    }
-
-    public void setFetchSize(Integer fetchSize) {
-      this.fetchSize = fetchSize;
-    }
-
-    public void addOrderByColumn(String column) {
-      orderByColumns.add(column);
-    }
-
-    public String getOrderByClause(boolean asc) {
-      String orderByStr = " ORDER BY ";
-      if (!orderByColumns.isEmpty()) {
-        StringBuilder sb = new StringBuilder(orderByStr);
-        for (String orderByColumn : orderByColumns) {
-          if (sb.length() != orderByStr.length()) {
-            sb.append(", ");
-          }
-          sb.append(orderByColumn);
-          if (!asc) {
-            sb.append(" DESC");
-          }
-        }
-        sb.append(" ");
-        return sb.toString();
-      }
-      return null;
-    }
-
-    @Override
-    public String toString() {
-      return "Condition{" +
-        "metricNames=" + metricNames +
-        ", hostname='" + hostname + '\'' +
-        ", appId='" + appId + '\'' +
-        ", instanceId='" + instanceId + '\'' +
-        ", startTime=" + startTime +
-        ", endTime=" + endTime +
-        ", limit=" + limit +
-        ", grouped=" + grouped +
-        ", orderBy=" + orderByColumns +
-        ", noLimit=" + noLimit +
-        '}';
-    }
-  }
-
-  static class SplitByMetricNamesCondition implements Condition {
-    private final Condition adaptee;
-    private String currentMetric;
-
-    SplitByMetricNamesCondition(Condition condition){
-      this.adaptee = condition;
-    }
-
-    @Override
-    public boolean isEmpty() {
-      return adaptee.isEmpty();
-    }
-
-    @Override
-    public List<String> getMetricNames() {
-      return Collections.singletonList(currentMetric);
-    }
-
-    @Override
-    public boolean isPointInTime() {
-      return adaptee.isPointInTime();
-    }
-
-    @Override
-    public boolean isGrouped() {
-      return adaptee.isGrouped();
-    }
-
-    @Override
-    public void setStatement(String statement) {
-      adaptee.setStatement(statement);
-    }
-
-    @Override
-    public String getHostname() {
-      return adaptee.getHostname();
-    }
-
-    @Override
-    public Precision getPrecision() {
-      return adaptee.getPrecision();
-    }
-
-    @Override
-    public void setPrecision(Precision precision) {
-      adaptee.setPrecision(precision);
-    }
-
-    @Override
-    public String getAppId() {
-      return adaptee.getAppId();
-    }
-
-    @Override
-    public String getInstanceId() {
-      return adaptee.getInstanceId();
-    }
-
-    @Override
-    public StringBuilder getConditionClause() {
-      StringBuilder sb = new StringBuilder();
-      boolean appendConjunction = false;
-
-      if (getMetricNames() != null) {
-        for (String name : getMetricNames()) {
-          if (sb.length() > 1) {
-            sb.append(" OR ");
-          }
-          sb.append("METRIC_NAME = ?");
-        }
-
-        appendConjunction = true;
-      }
-
-      appendConjunction = DefaultCondition.append(sb, appendConjunction,
-        getHostname(), " HOSTNAME = ?");
-      appendConjunction = DefaultCondition.append(sb, appendConjunction,
-        getAppId(), " APP_ID = ?");
-      appendConjunction = DefaultCondition.append(sb, appendConjunction,
-        getInstanceId(), " INSTANCE_ID = ?");
-      appendConjunction = DefaultCondition.append(sb, appendConjunction,
-        getStartTime(), " SERVER_TIME >= ?");
-      DefaultCondition.append(sb, appendConjunction, getEndTime(),
-        " SERVER_TIME < ?");
-
-      return sb;
-    }
-
-    @Override
-    public String getOrderByClause(boolean asc) {
-      return adaptee.getOrderByClause(asc);
-    }
-
-    @Override
-    public String getStatement() {
-      return adaptee.getStatement();
-    }
-
-    @Override
-    public Long getStartTime() {
-      return adaptee.getStartTime();
-    }
-
-    @Override
-    public Long getEndTime() {
-      return adaptee.getEndTime();
-    }
-
-    @Override
-    public Integer getLimit() {
-      return adaptee.getLimit();
-    }
-
-    @Override
-    public Integer getFetchSize() {
-      return adaptee.getFetchSize();
-    }
-
-    @Override
-    public void setFetchSize(Integer fetchSize) {
-      adaptee.setFetchSize(fetchSize);
-    }
-
-    @Override
-    public void addOrderByColumn(String column) {
-      adaptee.addOrderByColumn(column);
-    }
-
-    @Override
-    public void setNoLimit() {
-      adaptee.setNoLimit();
-    }
-
-    public List<String> getOriginalMetricNames() {
-      return adaptee.getMetricNames();
-    }
-
-    public void setCurrentMetric(String currentMetric) {
-      this.currentMetric = currentMetric;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b93452ed/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineClusterMetric.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineClusterMetric.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineClusterMetric.java
deleted file mode 100644
index d227993..0000000
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineClusterMetric.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
-
-public class TimelineClusterMetric {
-  private String metricName;
-  private String appId;
-  private String instanceId;
-  private long timestamp;
-  private String type;
-
-  TimelineClusterMetric(String metricName, String appId, String instanceId,
-                        long timestamp, String type) {
-    this.metricName = metricName;
-    this.appId = appId;
-    this.instanceId = instanceId;
-    this.timestamp = timestamp;
-    this.type = type;
-  }
-
-  String getMetricName() {
-    return metricName;
-  }
-
-  String getAppId() {
-    return appId;
-  }
-
-  String getInstanceId() {
-    return instanceId;
-  }
-
-  long getTimestamp() {
-    return timestamp;
-  }
-
-  String getType() { return type; }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    TimelineClusterMetric that = (TimelineClusterMetric) o;
-
-    if (timestamp != that.timestamp) return false;
-    if (appId != null ? !appId.equals(that.appId) : that.appId != null)
-      return false;
-    if (instanceId != null ? !instanceId.equals(that.instanceId) : that.instanceId != null)
-      return false;
-    if (!metricName.equals(that.metricName)) return false;
-
-    return true;
-  }
-
-  public boolean equalsExceptTime(TimelineClusterMetric metric) {
-    if (!metricName.equals(metric.metricName)) return false;
-    if (!appId.equals(metric.appId)) return false;
-    if (instanceId != null ? !instanceId.equals(metric.instanceId) : metric.instanceId != null)
-      return false;
-
-    return true;
-  }
-  @Override
-  public int hashCode() {
-    int result = metricName.hashCode();
-    result = 31 * result + (appId != null ? appId.hashCode() : 0);
-    result = 31 * result + (instanceId != null ? instanceId.hashCode() : 0);
-    result = 31 * result + (int) (timestamp ^ (timestamp >>> 32));
-    return result;
-  }
-
-  @Override
-  public String toString() {
-    return "TimelineClusterMetric{" +
-      "metricName='" + metricName + '\'' +
-      ", appId='" + appId + '\'' +
-      ", instanceId='" + instanceId + '\'' +
-      ", timestamp=" + timestamp +
-      '}';
-  }
-}


[3/7] ambari git commit: AMBARI-10290. Expose avaialble host metrics across hostcomponents. Support HostComponent host metrics. (swagle)

Posted by sw...@apache.org.
AMBARI-10290. Expose avaialble host metrics across hostcomponents. Support HostComponent host metrics. (swagle)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/23faabc4
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/23faabc4
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/23faabc4

Branch: refs/heads/trunk
Commit: 23faabc4a9bcb6908314a4c5679b1d4045de8f53
Parents: b93452e
Author: Siddharth Wagle <sw...@hortonworks.com>
Authored: Thu Apr 2 14:33:29 2015 -0700
Committer: Siddharth Wagle <sw...@hortonworks.com>
Committed: Thu Apr 2 14:55:29 2015 -0700

----------------------------------------------------------------------
 .../aggregators/AbstractTimelineAggregator.java |    4 +
 .../metrics/timeline/aggregators/Function.java  |    3 +-
 .../metrics/timeline/ITClusterAggregator.java   |    2 +-
 ambari-server/pom.xml                           |    5 +
 .../internal/AbstractPropertyProvider.java      |   13 +-
 .../controller/internal/PropertyInfo.java       |    9 +
 .../internal/StackDefinedPropertyProvider.java  |    6 +-
 .../ganglia/GangliaPropertyProvider.java        |    2 +-
 .../metrics/timeline/AMSPropertyProvider.java   |  161 +-
 .../controller/utilities/PropertyHelper.java    |   10 +
 .../ambari/server/state/stack/Metric.java       |   11 +-
 .../0.1.0/configuration/ams-site.xml            |    2 +-
 .../HBASE/0.96.0.2.0/metrics.json               |  429 +++--
 .../common-services/HDFS/2.1.0.2.0/metrics.json |  429 +++--
 .../STORM/0.9.1.2.1/metrics.json                | 1510 +++++++++---------
 .../common-services/YARN/2.1.0.2.0/metrics.json |  361 ++++-
 .../src/main/resources/ganglia_properties.json  |  268 ++--
 .../StackArtifactResourceProviderTest.java      |    6 +
 .../RestMetricsPropertyProviderTest.java        |   16 +-
 .../metrics/ganglia/TestStreamProvider.java     |    4 +-
 .../timeline/AMSPropertyProviderTest.java       |  106 +-
 .../ams/single_host_component_metrics.json      |   21 +
 .../test/resources/ams/single_host_metric.json  |  242 +--
 .../stacks/OTHER/1.0/services/HDFS/metrics.json |    3 +-
 24 files changed, 2327 insertions(+), 1296 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/23faabc4/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/AbstractTimelineAggregator.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/AbstractTimelineAggregator.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/AbstractTimelineAggregator.java
index 8dea46e..07717a8 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/AbstractTimelineAggregator.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/AbstractTimelineAggregator.java
@@ -37,6 +37,10 @@ import static java.util.concurrent.TimeUnit.SECONDS;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.AGGREGATOR_CHECKPOINT_DELAY;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.RESULTSET_FETCH_SIZE;
 
+/**
+ * Base class for all runnable aggregators. Provides common functions like
+ * check pointing and scheduling.
+ */
 public abstract class AbstractTimelineAggregator implements Runnable {
   protected final PhoenixHBaseAccessor hBaseAccessor;
   private final Log LOG;

http://git-wip-us.apache.org/repos/asf/ambari/blob/23faabc4/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/Function.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/Function.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/Function.java
index 9d17fca..8292657 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/Function.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/Function.java
@@ -31,8 +31,7 @@ public class Function {
   private ReadFunction readFunction = ReadFunction.VALUE;
   private PostProcessingFunction postProcessingFunction = null;
 
-  public Function(){
-
+  public Function() {
   }
 
   public Function(ReadFunction readFunction,

http://git-wip-us.apache.org/repos/asf/ambari/blob/23faabc4/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITClusterAggregator.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITClusterAggregator.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITClusterAggregator.java
index da3d703b..27e9d67 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITClusterAggregator.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITClusterAggregator.java
@@ -419,7 +419,7 @@ public class ITClusterAggregator extends AbstractMiniHBaseClusterTest {
       currentHostAggregate = PhoenixHBaseAccessor.getMetricClusterAggregateFromResultSet(rs);
       recordCount++;
     }
-    Assert.assertEquals(4, recordCount);
+    assertEquals(4, recordCount);
     assertNotNull(currentMetric);
     assertEquals("cpu_user", currentMetric.getMetricName());
     assertEquals("app1", currentMetric.getAppId());

http://git-wip-us.apache.org/repos/asf/ambari/blob/23faabc4/ambari-server/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-server/pom.xml b/ambari-server/pom.xml
index 1e6be0c..896a202 100644
--- a/ambari-server/pom.xml
+++ b/ambari-server/pom.xml
@@ -1816,6 +1816,11 @@
       <artifactId>ambari-metrics-common</artifactId>
       <version>${project.version}</version>
     </dependency>
+    <dependency>
+      <groupId>com.fasterxml.jackson.core</groupId>
+      <artifactId>jackson-annotations</artifactId>
+      <version>2.1.4</version>
+    </dependency>
   </dependencies>
 
   <pluginRepositories>

http://git-wip-us.apache.org/repos/asf/ambari/blob/23faabc4/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractPropertyProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractPropertyProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractPropertyProvider.java
index 3d88dda..dba20ed 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractPropertyProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractPropertyProvider.java
@@ -21,9 +21,7 @@ package org.apache.ambari.server.controller.internal;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
 import java.text.DecimalFormat;
-import java.util.Collections;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
@@ -110,12 +108,12 @@ public abstract class AbstractPropertyProvider extends BaseProvider implements P
   protected Map<String, PropertyInfo> getPropertyInfoMap(String componentName, String propertyId) {
     Map<String, PropertyInfo> propertyInfoMap = new HashMap<String, PropertyInfo>();
 
-    getPropertyInfoMap(componentName, propertyId, propertyInfoMap);
+    updatePropertyInfoMap(componentName, propertyId, propertyInfoMap);
 
     return propertyInfoMap;
   }
 
-  protected void getPropertyInfoMap(String componentName, String propertyId, Map<String, PropertyInfo> propertyInfoMap) {
+  protected void updatePropertyInfoMap(String componentName, String propertyId, Map<String, PropertyInfo> propertyInfoMap) {
     Map<String, PropertyInfo> componentMetricMap = getComponentMetrics().get(componentName);
 
     propertyInfoMap.clear();
@@ -303,8 +301,11 @@ public abstract class AbstractPropertyProvider extends BaseProvider implements P
           regexGroup = regexGroup.replace("/", ".");
           key = key.replaceFirst(FIND_REGEX_IN_METRIC_REGEX, regexGroup);
         }
-        componentMetricMap.put(propertyId, new PropertyInfo(key,
-          propertyInfo.isTemporal(), propertyInfo.isPointInTime()));
+        PropertyInfo compPropertyInfo = new PropertyInfo(key,
+          propertyInfo.isTemporal(), propertyInfo.isPointInTime());
+        compPropertyInfo.setAmsHostMetric(propertyInfo.isAmsHostMetric());
+        compPropertyInfo.setAmsId(propertyInfo.getAmsId());
+        componentMetricMap.put(propertyId, compPropertyInfo);
       }
 
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/23faabc4/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PropertyInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PropertyInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PropertyInfo.java
index 6773bd5..ac1eb33 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PropertyInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PropertyInfo.java
@@ -26,6 +26,7 @@ public class PropertyInfo {
   private final boolean temporal;
   private final boolean pointInTime;
   private String amsId;
+  private boolean amsHostMetric;
 
   public PropertyInfo(String propertyId, boolean temporal, boolean pointInTime) {
     this.propertyId = propertyId;
@@ -52,4 +53,12 @@ public class PropertyInfo {
   public void setAmsId(String amsId) {
     this.amsId = amsId;
   }
+
+  public boolean isAmsHostMetric() {
+    return amsHostMetric;
+  }
+
+  public void setAmsHostMetric(boolean amsHostMetric) {
+    this.amsHostMetric = amsHostMetric;
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/23faabc4/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackDefinedPropertyProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackDefinedPropertyProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackDefinedPropertyProvider.java
index a18086b..497230d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackDefinedPropertyProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackDefinedPropertyProvider.java
@@ -248,8 +248,10 @@ public class StackDefinedPropertyProvider implements PropertyProvider {
 
     for (Entry<String, Metric> entry : def.getMetrics().entrySet()) {
       Metric metric = entry.getValue();
-      defs.put(entry.getKey(), new PropertyInfo(
-          metric.getName(), metric.isTemporal(), metric.isPointInTime()));
+      PropertyInfo propertyInfo = new PropertyInfo(metric.getName(),
+        metric.isTemporal(), metric.isPointInTime());
+      propertyInfo.setAmsHostMetric(metric.isAmsHostMetric());
+      defs.put(entry.getKey(), propertyInfo);
     }
 
     return defs;

http://git-wip-us.apache.org/repos/asf/ambari/blob/23faabc4/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaPropertyProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaPropertyProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaPropertyProvider.java
index e23f461..c07bc17 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaPropertyProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaPropertyProvider.java
@@ -202,7 +202,7 @@ public abstract class GangliaPropertyProvider extends MetricsPropertyProvider {
             updateComponentMetricMap(componentMetricMap, id);
           }
 
-          getPropertyInfoMap(getComponentName(resource), id, propertyInfoMap);
+          updatePropertyInfoMap(getComponentName(resource), id, propertyInfoMap);
 
           for (Map.Entry<String, PropertyInfo> entry : propertyInfoMap.entrySet()) {
             String propertyId = entry.getKey();

http://git-wip-us.apache.org/repos/asf/ambari/blob/23faabc4/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/timeline/AMSPropertyProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/timeline/AMSPropertyProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/timeline/AMSPropertyProvider.java
index b566473..77ffbc1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/timeline/AMSPropertyProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/timeline/AMSPropertyProvider.java
@@ -27,6 +27,7 @@ import org.apache.ambari.server.controller.spi.SystemException;
 import org.apache.ambari.server.controller.spi.TemporalInfo;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
 import org.apache.ambari.server.controller.utilities.StreamProvider;
+import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
 import org.apache.http.client.utils.URIBuilder;
@@ -107,6 +108,9 @@ public abstract class AMSPropertyProvider extends MetricsPropertyProvider {
     private final Map<String, Set<String>> metrics = new HashMap<String, Set<String>>();
     private final URIBuilder uriBuilder;
     private final String dummyHostName = "__SummaryInfo__";
+    // Metrics with amsHostMetric = true
+    // Basically a host metric to be returned for a hostcomponent
+    private final Set<String> hostComponentHostMetrics = new HashSet<String>();
 
     private MetricsRequest(TemporalInfo temporalInfo, URIBuilder uriBuilder) {
       this.temporalInfo = temporalInfo;
@@ -135,6 +139,51 @@ public abstract class AMSPropertyProvider extends MetricsPropertyProvider {
       propertyIds.add(id);
     }
 
+    public void putHosComponentHostMetric(String metric) {
+      if (metric != null) {
+        hostComponentHostMetrics.add(metric);
+      }
+    }
+
+    private TimelineMetrics getTimelineMetricsForSpec(String spec) {
+      TimelineMetrics timelineMetrics = null;
+
+      LOG.debug("Metrics request url = " + spec);
+      BufferedReader reader = null;
+      try {
+        reader = new BufferedReader(new InputStreamReader(streamProvider.readFrom(spec)));
+        timelineMetrics = timelineObjectReader.readValue(reader);
+        LOG.debug("Timeline metrics response => " + timelineMetrics);
+
+      } catch (IOException io) {
+        String errorMsg = "Error getting timeline metrics.";
+        if (LOG.isDebugEnabled()) {
+          LOG.error(errorMsg, io);
+        } else {
+          if (io instanceof SocketTimeoutException) {
+            errorMsg += " Can not connect to collector, socket error.";
+          }
+          LOG.error(errorMsg);
+        }
+      } finally {
+        if (reader != null) {
+          try {
+            reader.close();
+          } catch (IOException e) {
+            if (LOG.isWarnEnabled()) {
+              if (LOG.isDebugEnabled()) {
+                LOG.warn("Unable to close http input stream : spec=" + spec, e);
+              } else {
+                LOG.warn("Unable to close http input stream : spec=" + spec);
+              }
+            }
+          }
+        }
+      }
+
+      return timelineMetrics;
+    }
+
     /**
      * Populate the associated resources by making a call to the Metrics
      * service.
@@ -145,7 +194,7 @@ public abstract class AMSPropertyProvider extends MetricsPropertyProvider {
     public Collection<Resource> populateResources() throws SystemException {
       // No open ended query support.
       if (temporalInfo != null && (temporalInfo.getStartTime() == null
-        || temporalInfo.getEndTime() == null)) {
+          || temporalInfo.getEndTime() == null)) {
         return Collections.emptySet();
       }
 
@@ -170,18 +219,28 @@ public abstract class AMSPropertyProvider extends MetricsPropertyProvider {
             return Collections.emptySet();
           }
 
-          String spec = getSpec(hostname, resource);
-
-          BufferedReader reader = null;
-          try {
-            LOG.debug("Metrics request url =" + spec);
-            reader = new BufferedReader(new InputStreamReader(streamProvider.readFrom(spec)));
-
-            TimelineMetrics timelineMetrics = timelineObjectReader.readValue(reader);
-            LOG.debug("Timeline metrics response => " + timelineMetrics);
+          TimelineMetrics timelineMetrics;
+          // Allow for multiple requests since host metrics for a
+          // hostcomponent need the HOST appId
+          if (hostComponentHostMetrics.isEmpty()) {
+            String spec = getSpec(hostname, resource);
+            timelineMetrics = getTimelineMetricsForSpec(spec);
+          } else {
+            Set<String> specs = getSpecsForHostComponentMetrics(hostname, resource);
+            timelineMetrics = new TimelineMetrics();
+            for (String spec : specs) {
+              if (!StringUtils.isEmpty(spec)) {
+                TimelineMetrics metrics = getTimelineMetricsForSpec(spec);
+                if (metrics != null) {
+                  timelineMetrics.getMetrics().addAll(metrics.getMetrics());
+                }
+              }
+            }
+          }
 
-            Set<String> patterns = createPatterns(metrics.keySet());
+          Set<String> patterns = createPatterns(metrics.keySet());
 
+          if (timelineMetrics != null) {
             for (TimelineMetric metric : timelineMetrics.getMetrics()) {
               if (metric.getMetricName() != null
                 && metric.getMetricValues() != null
@@ -189,31 +248,6 @@ public abstract class AMSPropertyProvider extends MetricsPropertyProvider {
                 populateResource(resource, metric);
               }
             }
-
-          } catch (IOException io) {
-            String errorMsg = "Error getting timeline metrics.";
-            if (LOG.isDebugEnabled()) {
-              LOG.error(errorMsg, io);
-            } else {
-              if (io instanceof SocketTimeoutException) {
-                errorMsg += " Can not connect to collector, socket error.";
-              }
-              LOG.error(errorMsg);
-            }
-          } finally {
-            if (reader != null) {
-              try {
-                reader.close();
-              } catch (IOException e) {
-                if (LOG.isWarnEnabled()) {
-                  if (LOG.isDebugEnabled()) {
-                    LOG.warn("Unable to close http input stream : spec=" + spec, e);
-                  } else {
-                    LOG.warn("Unable to close http input stream : spec=" + spec);
-                  }
-                }
-              }
-            }
           }
         }
       }
@@ -221,8 +255,33 @@ public abstract class AMSPropertyProvider extends MetricsPropertyProvider {
       return Collections.emptySet();
     }
 
-    private String getSpec(String hostname, Resource resource) {
-      String metricsParam = getSetString(processRegexps(metrics.keySet()), -1);
+    /**
+     * Return separate specs for : host component metrics and host component
+     * host metrics.
+     * @return @Set Urls
+     */
+    private Set<String> getSpecsForHostComponentMetrics(String hostname, Resource resource) {
+      Set<String> nonHostComponentMetrics = new HashSet<String>(metrics.keySet());
+      nonHostComponentMetrics.removeAll(hostComponentHostMetrics);
+
+      Set<String> specs = new HashSet<String>();
+      if (!hostComponentHostMetrics.isEmpty()) {
+        String hostComponentHostMetricParams = getSetString(processRegexps(hostComponentHostMetrics), -1);
+        setQueryParams(resource, hostComponentHostMetricParams, hostname, "HOST");
+        specs.add(uriBuilder.toString());
+      }
+
+      if (!nonHostComponentMetrics.isEmpty()) {
+        String nonHostComponentHostMetricParams = getSetString(processRegexps(nonHostComponentMetrics), -1);
+        setQueryParams(resource, nonHostComponentHostMetricParams, hostname, null);
+        specs.add(uriBuilder.toString());
+      }
+
+      return specs;
+    }
+
+    private void setQueryParams(Resource resource, String metricsParam,
+                                String hostname, String appId) {
       // Reuse uriBuilder
       uriBuilder.removeQuery();
 
@@ -234,12 +293,16 @@ public abstract class AMSPropertyProvider extends MetricsPropertyProvider {
         uriBuilder.setParameter("hostname", hostname);
       }
 
-      String componentName = getComponentName(resource);
-      if (componentName != null && !componentName.isEmpty()) {
-        if (TIMELINE_APPID_MAP.containsKey(componentName)) {
-          componentName = TIMELINE_APPID_MAP.get(componentName);
+      if (appId != null) {
+        uriBuilder.setParameter("appId", appId);
+      } else {
+        String componentName = getComponentName(resource);
+        if (componentName != null && !componentName.isEmpty()) {
+          if (TIMELINE_APPID_MAP.containsKey(componentName)) {
+            componentName = TIMELINE_APPID_MAP.get(componentName);
+          }
+          uriBuilder.setParameter("appId", componentName);
         }
-        uriBuilder.setParameter("appId", componentName);
       }
 
       if (temporalInfo != null) {
@@ -253,6 +316,12 @@ public abstract class AMSPropertyProvider extends MetricsPropertyProvider {
           uriBuilder.setParameter("endTime", String.valueOf(endTime));
         }
       }
+    }
+
+    private String getSpec(String hostname, Resource resource) {
+      String metricsParam = getSetString(processRegexps(metrics.keySet()), -1);
+
+      setQueryParams(resource, metricsParam, hostname, null);
 
       return uriBuilder.toString();
     }
@@ -423,7 +492,7 @@ public abstract class AMSPropertyProvider extends MetricsPropertyProvider {
           updateComponentMetricMap(componentMetricMap, id);
         }
 
-        getPropertyInfoMap(componentName, id, propertyInfoMap);
+        updatePropertyInfoMap(componentName, id, propertyInfoMap);
 
         for (Map.Entry<String, PropertyInfo> entry : propertyInfoMap.entrySet()) {
           String propertyId = entry.getKey();
@@ -443,6 +512,10 @@ public abstract class AMSPropertyProvider extends MetricsPropertyProvider {
             }
             metricsRequest.putResource(getHostName(resource), resource);
             metricsRequest.putPropertyId(propertyInfo.getPropertyId(), propertyId);
+            // If request is for a host metric we need to create multiple requests
+            if (propertyInfo.isAmsHostMetric()) {
+              metricsRequest.putHosComponentHostMetric(propertyInfo.getPropertyId());
+            }
           }
         }
       }

http://git-wip-us.apache.org/repos/asf/ambari/blob/23faabc4/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/PropertyHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/PropertyHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/PropertyHelper.java
index c3d8e2f..cce4909 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/PropertyHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/PropertyHelper.java
@@ -433,6 +433,7 @@ public class PropertyHelper {
             PropertyInfo propertyInfo = new PropertyInfo(metric.getMetric(),
               metric.isTemporal(), metric.isPointInTime());
             propertyInfo.setAmsId(metric.getAmsId());
+            propertyInfo.setAmsHostMetric(metric.isAmsHostMetric());
             metrics.put(property, propertyInfo);
           }
           componentMetrics.put(componentEntry.getKey(), metrics);
@@ -487,6 +488,7 @@ public class PropertyHelper {
     private boolean pointInTime;
     private boolean temporal;
     private String amsId;
+    private boolean amsHostMetric;
 
     private Metric() {
     }
@@ -528,5 +530,13 @@ public class PropertyHelper {
     public void setAmsId(String amsId) {
       this.amsId = amsId;
     }
+
+    public boolean isAmsHostMetric() {
+      return amsHostMetric;
+    }
+
+    public void setAmsHostMetric(boolean amsHostMetric) {
+      this.amsHostMetric = amsHostMetric;
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/23faabc4/ambari-server/src/main/java/org/apache/ambari/server/state/stack/Metric.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/Metric.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/Metric.java
index c82a70a..4debef5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/Metric.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/Metric.java
@@ -21,14 +21,17 @@ public class Metric {
   private String metric = null;
   private boolean pointInTime = false;
   private boolean temporal = false;
+  private boolean amsHostMetric = false;
 
   public Metric() {
   }
 
-  public Metric(String metric, boolean pointInTime, boolean temporal) {
+  public Metric(String metric, boolean pointInTime, boolean temporal,
+                boolean amsHostMetric) {
     this.metric = metric;
     this.pointInTime = pointInTime;
     this.temporal = temporal;
+    this.amsHostMetric = amsHostMetric;
   }
 
   public String getName() {
@@ -43,4 +46,10 @@ public class Metric {
     return temporal;
   }
 
+  /**
+   * Indicates whether this hostcomponent metric is a host metric for AMS.
+   */
+  public boolean isAmsHostMetric() {
+    return amsHostMetric;
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/23faabc4/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
index 99760eb..74c4304 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
@@ -250,7 +250,7 @@
   </property>
   <property>
     <name>timeline.metrics.service.cluster.aggregator.appIds</name>
-    <value>datanode,nodemanager,hbase,nimbus</value>
+    <value>datanode,nodemanager,hbase</value>
     <description>
       List of application ids to use for aggregating host level metrics for
       an application. Example: bytes_read across Yarn Nodemanagers.

http://git-wip-us.apache.org/repos/asf/ambari/blob/23faabc4/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/metrics.json b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/metrics.json
index 1c25729..7bbf7be 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/metrics.json
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/metrics.json
@@ -5,6 +5,151 @@
         "type": "ganglia",
         "metrics": {
           "default": {
+            "metrics/cpu/cpu_idle":{
+              "metric":"cpu_idle",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/cpu/cpu_nice":{
+              "metric":"cpu_nice",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/cpu/cpu_system":{
+              "metric":"cpu_system",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/cpu/cpu_user":{
+              "metric":"cpu_user",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/cpu/cpu_wio":{
+              "metric":"cpu_wio",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/disk_free":{
+              "metric":"disk_free",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/disk_total":{
+              "metric":"disk_total",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/load/load_fifteen":{
+              "metric":"load_fifteen",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/load/load_five":{
+              "metric":"load_five",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/load/load_one":{
+              "metric":"load_one",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/mem_buffers":{
+              "metric":"mem_buffers",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/mem_cached":{
+              "metric":"mem_cached",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/mem_free":{
+              "metric":"mem_free",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/mem_shared":{
+              "metric":"mem_shared",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/mem_total":{
+              "metric":"mem_total",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/swap_free":{
+              "metric":"swap_free",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/swap_total":{
+              "metric":"swap_total",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/network/bytes_in":{
+              "metric":"bytes_in",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/network/bytes_out":{
+              "metric":"bytes_out",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/network/pkts_in":{
+              "metric":"pkts_in",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/network/pkts_out":{
+              "metric":"pkts_out",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/process/proc_run":{
+              "metric":"proc_run",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/process/proc_total":{
+              "metric":"proc_total",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/read_count":{
+              "metric":"read_count",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/write_count":{
+              "metric":"write_count",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/read_bytes":{
+              "metric":"read_bytes",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/write_bytes":{
+              "metric":"write_bytes",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/read_time":{
+              "metric":"read_time",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/write_time":{
+              "metric":"write_time",
+              "pointInTime":true,
+              "temporal":true
+            },
             "metrics/hbase/regionserver/compactionTime_avg_time": {
               "metric": "hbase.regionserver.compactionTime_avg_time",
               "pointInTime": true,
@@ -40,11 +185,6 @@
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/load/load_one": {
-              "metric": "load_one",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/rpc/getClosestRowBefore_num_ops": {
               "metric": "rpc.rpc.getClosestRowBefore_num_ops",
               "pointInTime": true,
@@ -70,11 +210,6 @@
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/memory/swap_total": {
-              "metric": "swap_total",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/rpc/stopMaster_num_ops": {
               "metric": "rpc.rpc.stopMaster_num_ops",
               "pointInTime": true,
@@ -90,11 +225,6 @@
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/process/proc_total": {
-              "metric": "proc_total",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/disk/part_max_used": {
               "metric": "part_max_used",
               "pointInTime": true,
@@ -170,11 +300,6 @@
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/network/bytes_in": {
-              "metric": "bytes_in",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/rpc/removeFromOnlineRegions_num_ops": {
               "metric": "rpc.rpc.removeFromOnlineRegions_num_ops",
               "pointInTime": true,
@@ -225,11 +350,6 @@
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/network/pkts_in": {
-              "metric": "pkts_in",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/jvm/memHeapCommittedM": {
               "metric": "jvm.JvmMetrics.MemHeapCommittedM",
               "pointInTime": true,
@@ -355,16 +475,6 @@
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/network/bytes_out": {
-              "metric": "bytes_out",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/load/load_five": {
-              "metric": "load_five",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/hbase/regionserver/fsReadLatencyHistogram_75th_percentile": {
               "metric": "hbase.regionserver.fsReadLatencyHistogram_75th_percentile",
               "pointInTime": true,
@@ -740,11 +850,6 @@
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/network/pkts_out": {
-              "metric": "pkts_out",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/rpc/close_num_ops": {
               "metric": "rpc.rpc.close_num_ops",
               "pointInTime": true,
@@ -940,11 +1045,6 @@
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/memory/mem_buffers": {
-              "metric": "mem_buffers",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/rpc/disableTable_num_ops": {
               "metric": "rpc.rpc.disableTable_num_ops",
               "pointInTime": true,
@@ -980,11 +1080,6 @@
               "pointInTime": false,
               "temporal": true
             },
-            "metrics/load/load_fifteen": {
-              "metric": "load_fifteen",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_num_ops": {
               "metric": "rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._num_ops",
               "pointInTime": true,
@@ -1570,11 +1665,6 @@
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/process/proc_run": {
-              "metric": "proc_run",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/rpc/getConfiguration_num_ops": {
               "metric": "rpc.rpc.getConfiguration_num_ops",
               "pointInTime": true,
@@ -2226,6 +2316,180 @@
         "type": "ganglia",
         "metrics": {
           "default": {
+            "metrics/cpu/cpu_idle":{
+              "metric":"cpu_idle",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_nice":{
+              "metric":"cpu_nice",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_system":{
+              "metric":"cpu_system",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_user":{
+              "metric":"cpu_user",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_wio":{
+              "metric":"cpu_wio",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/disk_free":{
+              "metric":"disk_free",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/disk_total":{
+              "metric":"disk_total",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/load/load_fifteen":{
+              "metric":"load_fifteen",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/load/load_five":{
+              "metric":"load_five",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/load/load_one":{
+              "metric":"load_one",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_buffers":{
+              "metric":"mem_buffers",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_cached":{
+              "metric":"mem_cached",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_free":{
+              "metric":"mem_free",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_shared":{
+              "metric":"mem_shared",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_total":{
+              "metric":"mem_total",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/swap_free":{
+              "metric":"swap_free",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/swap_total":{
+              "metric":"swap_total",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/network/bytes_in":{
+              "metric":"bytes_in",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/network/bytes_out":{
+              "metric":"bytes_out",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/network/pkts_in":{
+              "metric":"pkts_in",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/network/pkts_out":{
+              "metric":"pkts_out",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/process/proc_run":{
+              "metric":"proc_run",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/process/proc_total":{
+              "metric":"proc_total",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/read_count":{
+              "metric":"read_count",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/write_count":{
+              "metric":"write_count",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/read_bytes":{
+              "metric":"read_bytes",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/write_bytes":{
+              "metric":"write_bytes",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/read_time":{
+              "metric":"read_time",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/write_time":{
+              "metric":"write_time",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
             "metrics/hbase/regionserver/compactionTime_avg_time": {
               "metric": "hbase.regionserver.compactionTime_avg_time",
               "pointInTime": true,
@@ -2286,11 +2550,6 @@
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/load/load_one": {
-              "metric": "load_one",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/rpc/getClosestRowBefore_num_ops": {
               "metric": "rpc.rpc.getClosestRowBefore_num_ops",
               "pointInTime": true,
@@ -2336,11 +2595,6 @@
               "pointInTime": false,
               "temporal": true
             },
-            "metrics/memory/swap_total": {
-              "metric": "swap_total",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/rpc/multi_avg_time": {
               "metric": "rpc.rpc.multi_avg_time",
               "pointInTime": true,
@@ -2366,11 +2620,6 @@
               "pointInTime": false,
               "temporal": true
             },
-            "metrics/process/proc_total": {
-              "metric": "proc_total",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/rpc/balance_avg_time": {
               "metric": "rpc.rpc.balance_avg_time",
               "pointInTime": true,
@@ -2501,11 +2750,6 @@
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/network/bytes_in": {
-              "metric": "bytes_in",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/jvm/memNonHeapCommittedM": {
               "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
               "pointInTime": true,
@@ -2616,11 +2860,6 @@
               "pointInTime": false,
               "temporal": true
             },
-            "metrics/network/pkts_in": {
-              "metric": "pkts_in",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/rpc/isStopped/aboveOneSec/_num_ops": {
               "metric": "rpc.rpc.isStopped.aboveOneSec._num_ops",
               "pointInTime": true,
@@ -2826,11 +3065,6 @@
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/network/bytes_out": {
-              "metric": "bytes_out",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/rpc/checkAndDelete/aboveOneSec/_avg_time": {
               "metric": "rpc.rpc.checkAndDelete.aboveOneSec._avg_time",
               "pointInTime": true,
@@ -2846,11 +3080,6 @@
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/load/load_five": {
-              "metric": "load_five",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/hbase/regionserver/putRequestLatency_max": {
               "metric": "regionserver.Server.Mutate_max",
               "pointInTime": false,
@@ -3211,11 +3440,6 @@
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/process/proc_run": {
-              "metric": "proc_run",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/hbase/regionserver/blockCacheSize": {
               "metric": "regionserver.Server.blockCacheSize",
               "pointInTime": false,
@@ -3391,11 +3615,6 @@
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/network/pkts_out": {
-              "metric": "pkts_out",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/rpc/getFromOnlineRegions_num_ops": {
               "metric": "rpc.rpc.getFromOnlineRegions_num_ops",
               "pointInTime": true,
@@ -3756,11 +3975,6 @@
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/memory/mem_buffers": {
-              "metric": "mem_buffers",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/rpc/shutdown_avg_time": {
               "metric": "rpc.rpc.shutdown_avg_time",
               "pointInTime": true,
@@ -3846,11 +4060,6 @@
               "pointInTime": false,
               "temporal": true
             },
-            "metrics/load/load_fifteen": {
-              "metric": "load_fifteen",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/jvm/logInfo": {
               "metric": "jvm.JvmMetrics.LogInfo",
               "pointInTime": true,

http://git-wip-us.apache.org/repos/asf/ambari/blob/23faabc4/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metrics.json b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metrics.json
index 702bda1..6fc808e 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metrics.json
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metrics.json
@@ -2634,6 +2634,151 @@
         "type": "ganglia",
         "metrics": {
           "default": {
+            "metrics/cpu/cpu_idle":{
+              "metric":"cpu_idle",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/cpu/cpu_nice":{
+              "metric":"cpu_nice",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/cpu/cpu_system":{
+              "metric":"cpu_system",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/cpu/cpu_user":{
+              "metric":"cpu_user",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/cpu/cpu_wio":{
+              "metric":"cpu_wio",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/disk_free":{
+              "metric":"disk_free",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/disk_total":{
+              "metric":"disk_total",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/load/load_fifteen":{
+              "metric":"load_fifteen",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/load/load_five":{
+              "metric":"load_five",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/load/load_one":{
+              "metric":"load_one",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/mem_buffers":{
+              "metric":"mem_buffers",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/mem_cached":{
+              "metric":"mem_cached",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/mem_free":{
+              "metric":"mem_free",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/mem_shared":{
+              "metric":"mem_shared",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/mem_total":{
+              "metric":"mem_total",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/swap_free":{
+              "metric":"swap_free",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/swap_total":{
+              "metric":"swap_total",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/network/bytes_in":{
+              "metric":"bytes_in",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/network/bytes_out":{
+              "metric":"bytes_out",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/network/pkts_in":{
+              "metric":"pkts_in",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/network/pkts_out":{
+              "metric":"pkts_out",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/process/proc_run":{
+              "metric":"proc_run",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/process/proc_total":{
+              "metric":"proc_total",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/read_count":{
+              "metric":"read_count",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/write_count":{
+              "metric":"write_count",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/read_bytes":{
+              "metric":"read_bytes",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/write_bytes":{
+              "metric":"write_bytes",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/read_time":{
+              "metric":"read_time",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/write_time":{
+              "metric":"write_time",
+              "pointInTime":true,
+              "temporal":true
+            },
             "metrics/dfs/datanode/heartBeats_avg_time": {
               "metric": "dfs.datanode.HeartbeatsAvgTime",
               "pointInTime": true,
@@ -2684,11 +2829,6 @@
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/load/load_one": {
-              "metric": "load_one",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/dfs/datanode/writes_from_remote_client": {
               "metric": "dfs.datanode.WritesFromRemoteClient",
               "pointInTime": true,
@@ -2734,11 +2874,6 @@
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/memory/swap_total": {
-              "metric": "swap_total",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/rpc/multi_avg_time": {
               "metric": "rpc.rpc.multi_avg_time",
               "pointInTime": true,
@@ -2764,11 +2899,6 @@
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/process/proc_total": {
-              "metric": "proc_total",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/rpc/splitRegion_num_ops": {
               "metric": "rpc.rpc.splitRegion_num_ops",
               "pointInTime": true,
@@ -2874,11 +3004,6 @@
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/network/bytes_in": {
-              "metric": "bytes_in",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/jvm/memNonHeapCommittedM": {
               "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
               "pointInTime": true,
@@ -3004,11 +3129,6 @@
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/network/pkts_in": {
-              "metric": "pkts_in",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/rpc/isStopped/aboveOneSec/_num_ops": {
               "metric": "rpc.rpc.isStopped.aboveOneSec._num_ops",
               "pointInTime": true,
@@ -3209,11 +3329,6 @@
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/network/bytes_out": {
-              "metric": "bytes_out",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/rpc/checkAndDelete/aboveOneSec/_avg_time": {
               "metric": "rpc.rpc.checkAndDelete.aboveOneSec._avg_time",
               "pointInTime": true,
@@ -3224,11 +3339,6 @@
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/load/load_five": {
-              "metric": "load_five",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/dfs/datanode/blocks_read": {
               "metric": "dfs.datanode.BlocksRead",
               "pointInTime": true,
@@ -3529,11 +3639,6 @@
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/process/proc_run": {
-              "metric": "proc_run",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/rpc/close_avg_time": {
               "metric": "rpc.rpc.close_avg_time",
               "pointInTime": true,
@@ -3659,11 +3764,6 @@
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/network/pkts_out": {
-              "metric": "pkts_out",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/rpc/getFromOnlineRegions_num_ops": {
               "metric": "rpc.rpc.getFromOnlineRegions_num_ops",
               "pointInTime": true,
@@ -4014,11 +4114,6 @@
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/memory/mem_buffers": {
-              "metric": "mem_buffers",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/rpc/shutdown_avg_time": {
               "metric": "rpc.rpc.shutdown_avg_time",
               "pointInTime": true,
@@ -4079,11 +4174,6 @@
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/load/load_fifteen": {
-              "metric": "load_fifteen",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/rpc/getHServerInfo_avg_time": {
               "metric": "rpc.rpc.getHServerInfo_avg_time",
               "pointInTime": true,
@@ -4208,6 +4298,180 @@
         "type": "ganglia",
         "metrics": {
           "default": {
+            "metrics/cpu/cpu_idle":{
+              "metric":"cpu_idle",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_nice":{
+              "metric":"cpu_nice",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_system":{
+              "metric":"cpu_system",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_user":{
+              "metric":"cpu_user",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_wio":{
+              "metric":"cpu_wio",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/disk_free":{
+              "metric":"disk_free",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/disk_total":{
+              "metric":"disk_total",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/load/load_fifteen":{
+              "metric":"load_fifteen",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/load/load_five":{
+              "metric":"load_five",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/load/load_one":{
+              "metric":"load_one",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_buffers":{
+              "metric":"mem_buffers",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_cached":{
+              "metric":"mem_cached",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_free":{
+              "metric":"mem_free",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_shared":{
+              "metric":"mem_shared",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_total":{
+              "metric":"mem_total",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/swap_free":{
+              "metric":"swap_free",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/swap_total":{
+              "metric":"swap_total",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/network/bytes_in":{
+              "metric":"bytes_in",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/network/bytes_out":{
+              "metric":"bytes_out",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/network/pkts_in":{
+              "metric":"pkts_in",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/network/pkts_out":{
+              "metric":"pkts_out",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/process/proc_run":{
+              "metric":"proc_run",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/process/proc_total":{
+              "metric":"proc_total",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/read_count":{
+              "metric":"read_count",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/write_count":{
+              "metric":"write_count",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/read_bytes":{
+              "metric":"read_bytes",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/write_bytes":{
+              "metric":"write_bytes",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/read_time":{
+              "metric":"read_time",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/write_time":{
+              "metric":"write_time",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
             "metrics/dfs/datanode/heartBeats_avg_time": {
               "metric": "dfs.datanode.HeartbeatsAvgTime",
               "pointInTime": true,
@@ -4253,11 +4517,6 @@
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/load/load_one": {
-              "metric": "load_one",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/dfs/datanode/writes_from_remote_client": {
               "metric": "dfs.datanode.WritesFromRemoteClient",
               "pointInTime": true,
@@ -4298,11 +4557,6 @@
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/memory/swap_total": {
-              "metric": "swap_total",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/rpc/multi_avg_time": {
               "metric": "rpc.rpc.multi_avg_time",
               "pointInTime": true,
@@ -4323,11 +4577,6 @@
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/process/proc_total": {
-              "metric": "proc_total",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/rpc/balance_avg_time": {
               "metric": "rpc.rpc.balance_avg_time",
               "pointInTime": true,
@@ -4438,11 +4687,6 @@
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/network/bytes_in": {
-              "metric": "bytes_in",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/jvm/memNonHeapCommittedM": {
               "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
               "pointInTime": false,
@@ -4553,11 +4797,6 @@
               "pointInTime": false,
               "temporal": true
             },
-            "metrics/network/pkts_in": {
-              "metric": "pkts_in",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/rpc/isStopped/aboveOneSec/_num_ops": {
               "metric": "rpc.rpc.isStopped.aboveOneSec._num_ops",
               "pointInTime": true,
@@ -4743,11 +4982,6 @@
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/network/bytes_out": {
-              "metric": "bytes_out",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/rpc/checkAndDelete/aboveOneSec/_avg_time": {
               "metric": "rpc.rpc.checkAndDelete.aboveOneSec._avg_time",
               "pointInTime": true,
@@ -4758,11 +4992,6 @@
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/load/load_five": {
-              "metric": "load_five",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/dfs/datanode/blocks_read": {
               "metric": "dfs.datanode.BlocksRead",
               "pointInTime": true,
@@ -5053,11 +5282,6 @@
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/process/proc_run": {
-              "metric": "proc_run",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/rpc/close_avg_time": {
               "metric": "rpc.rpc.close_avg_time",
               "pointInTime": true,
@@ -5178,11 +5402,6 @@
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/network/pkts_out": {
-              "metric": "pkts_out",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/rpc/getFromOnlineRegions_num_ops": {
               "metric": "rpc.rpc.getFromOnlineRegions_num_ops",
               "pointInTime": true,
@@ -5503,11 +5722,6 @@
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/memory/mem_buffers": {
-              "metric": "mem_buffers",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/rpc/shutdown_avg_time": {
               "metric": "rpc.rpc.shutdown_avg_time",
               "pointInTime": true,
@@ -5568,11 +5782,6 @@
               "pointInTime": true,
               "temporal": true
             },
-            "metrics/load/load_fifteen": {
-              "metric": "load_fifteen",
-              "pointInTime": true,
-              "temporal": true
-            },
             "metrics/rpc/getHServerInfo_avg_time": {
               "metric": "rpc.rpc.getHServerInfo_avg_time",
               "pointInTime": true,