You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by av...@apache.org on 2018/05/11 14:06:13 UTC

[ambari] branch trunk updated (2e47da7 -> 4b8a6b2)

This is an automated email from the ASF dual-hosted git repository.

avijayan pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git.


    from 2e47da7  Merge pull request #1246 from hiveww/AMBARI-23826-trunk
     new 7d215e0  Revert "AMBARI-23804 : Refine AMS HBase region splitting calculation based on UUID work (Refactor Split point computation. Allow only Murmur3Hash)."
     new dd24931  Revert "AMBARI-23804 : Refine AMS HBase region splitting calculation based on UUID work (Pass hbase total heapsize to collector)."
     new 4b8a6b2  Revert "AMBARI-23804 : Refine AMS HBase region splitting calculation based on UUID work."

The 3 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../core/timeline/HBaseTimelineMetricsService.java |  15 +-
 .../core/timeline/PhoenixHBaseAccessor.java        | 227 +++++++++++--------
 .../core/timeline/TimelineMetricConfiguration.java |  47 +++-
 .../timeline/TimelineMetricSplitPointComputer.java | 240 ---------------------
 .../core/timeline/TimelineMetricStoreWatcher.java  |   6 +-
 .../discovery/TimelineMetricMetadataManager.java   | 120 +++--------
 .../core/timeline/query/PhoenixTransactSQL.java    |  20 +-
 .../timeline/uuid/Murmur3HashUuidGenStrategy.java  |  54 -----
 .../core/timeline/uuid/TimelineMetricUuid.java     |  55 -----
 .../ambari/metrics/webapp/TimelineWebServices.java |  16 +-
 .../resources/metrics_def/AMSSMOKETESTFAKE.dat     |   1 -
 .../{HBASE_MASTER.dat => MASTER_HBASE.dat}         |   0
 .../{HBASE_REGIONSERVER.dat => SLAVE_HBASE.dat}    |   0
 .../metrics_def/TIMELINE_METRIC_STORE_WATCHER.dat  |   1 -
 .../timeline/AbstractMiniHBaseClusterTest.java     |   9 +-
 .../core/timeline/ITPhoenixHBaseAccessor.java      |  39 ++--
 .../core/timeline/PhoenixHBaseAccessorTest.java    |   4 +-
 .../TimelineMetricSplitPointComputerTest.java      | 141 ------------
 .../timeline/TimelineMetricStoreWatcherTest.java   |   8 +-
 .../timeline/discovery/TestMetadataManager.java    |   2 +-
 ...est.java => TimelineMetricUuidManagerTest.java} | 132 ++++++------
 .../ambari/server/upgrade/UpgradeCatalog270.java   |  19 +-
 .../0.1.0/configuration/ams-site.xml               |  54 +++++
 .../0.1.0/package/files/service-metrics/KAFKA.txt  |   0
 .../AMBARI_METRICS/0.1.0/package/scripts/ams.py    |  32 +--
 .../AMBARI_METRICS/0.1.0/package/scripts/params.py |   1 -
 .../AMBARI_METRICS/0.1.0/service_advisor.py        |  22 ++
 .../server/upgrade/UpgradeCatalog270Test.java      |   4 -
 .../AMBARI_METRICS/test_service_advisor.py         |   4 +
 .../2.0.6/AMBARI_METRICS/test_metrics_collector.py |   2 +-
 .../test/python/stacks/2.0.6/configs/default.json  |   5 +-
 .../stacks/2.0.6/configs/default_ams_embedded.json |   4 +-
 32 files changed, 406 insertions(+), 878 deletions(-)
 delete mode 100644 ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/TimelineMetricSplitPointComputer.java
 delete mode 100644 ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/uuid/Murmur3HashUuidGenStrategy.java
 delete mode 100644 ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/uuid/TimelineMetricUuid.java
 delete mode 100644 ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/AMSSMOKETESTFAKE.dat
 rename ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/{HBASE_MASTER.dat => MASTER_HBASE.dat} (100%)
 rename ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/{HBASE_REGIONSERVER.dat => SLAVE_HBASE.dat} (100%)
 delete mode 100644 ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/TIMELINE_METRIC_STORE_WATCHER.dat
 delete mode 100644 ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/TimelineMetricSplitPointComputerTest.java
 rename ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/uuid/{MetricUuidGenStrategyTest.java => TimelineMetricUuidManagerTest.java} (59%)
 mode change 100644 => 100755 ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/service-metrics/KAFKA.txt

-- 
To stop receiving notification emails like this one, please contact
avijayan@apache.org.

[ambari] 02/03: Revert "AMBARI-23804 : Refine AMS HBase region splitting calculation based on UUID work (Pass hbase total heapsize to collector)."

Posted by av...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

avijayan pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git

commit dd249318ee0373117d8e1e8dee4543847d5d4d52
Author: Aravindan Vijayan <av...@hortonworks.com>
AuthorDate: Fri May 11 07:03:41 2018 -0700

    Revert "AMBARI-23804 : Refine AMS HBase region splitting calculation based on UUID work (Pass hbase total heapsize to collector)."
    
    This reverts commit 5bae5c66268591f999e639ca32a50931d803f9f2.
---
 .../common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py       | 4 ----
 ambari-server/src/test/python/stacks/2.0.6/configs/default.json       | 3 +--
 .../src/test/python/stacks/2.0.6/configs/default_ams_embedded.json    | 3 +--
 3 files changed, 2 insertions(+), 8 deletions(-)

diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
index fc57ffc..d0ee66e 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
@@ -252,10 +252,6 @@ def ams(name=None, action=None):
           master_components.append('ambari_server')
         new_ams_site['timeline.metrics.initial.configured.master.components'] = ",".join(master_components)
 
-    hbase_total_heapsize_with_trailing_m = params.hbase_heapsize
-    hbase_total_heapsize = int(hbase_total_heapsize_with_trailing_m[:-1]) * 1024 * 1024
-    new_ams_site['hbase_total_heapsize'] = hbase_total_heapsize
-
     XmlConfig("ams-site.xml",
               conf_dir=params.ams_collector_conf_dir,
               configurations=new_ams_site,
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
index 98fc30d..69bd8d8 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
@@ -1078,8 +1078,7 @@
             "timeline.metrics.cluster.aggregator.minute.interval": "120",
             "timeline.metrics.host.aggregator.hourly.interval": "3600",
             "timeline.metrics.initial.configured.master.components": "drpc_server,hive_server,resourcemanager,all,webhcat_server,snamenode,storm_ui_server,falcon_server,namenode,ganglia_server,logviewer_server,hive_metastore,nimbus,zookeeper_server,historyserver,hbase_master,oozie_server,metrics_collector,supervisor,ganglia_monitor,hive_mysql,ambari_server",
-            "timeline.metrics.initial.configured.slave.components": "hbase_regionserver,datanode,nodemanager",
-            "hbase_total_heapsize": 536870912
+            "timeline.metrics.initial.configured.slave.components": "hbase_regionserver,datanode,nodemanager"
         },
         "ams-grafana-env": {
             "metrics_grafana_log_dir": "/var/log/ambari-metrics-grafana",
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json
index a802b74..92cd24a 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json
@@ -995,8 +995,7 @@
             "timeline.metrics.daily.aggregator.minute.interval": "86400",
             "timeline.metrics.cluster.aggregator.minute.interval": "120",
             "timeline.metrics.host.aggregator.hourly.interval": "3600",
-            "timeline.metrics.initial.configured.master.components": "snamenode,nm,drpc_server,hive_server,resourcemanager,all,slave,webhcat_server,ganglia_server,storm_ui_server,falcon_server,hs,hive_metastore,logviewer_server,nimbus,zookeeper_server,hbase_rs,namenode,hbase_master,metrics_collector,ambari_server,supervisor,ganglia_monitor,hive_mysql",
-            "hbase_total_heapsize": 1073741824
+            "timeline.metrics.initial.configured.master.components": "snamenode,nm,drpc_server,hive_server,resourcemanager,all,slave,webhcat_server,ganglia_server,storm_ui_server,falcon_server,hs,hive_metastore,logviewer_server,nimbus,zookeeper_server,hbase_rs,namenode,hbase_master,metrics_collector,ambari_server,supervisor,ganglia_monitor,hive_mysql"
         }
     },
     "configurationAttributes": {

-- 
To stop receiving notification emails like this one, please contact
avijayan@apache.org.

[ambari] 01/03: Revert "AMBARI-23804 : Refine AMS HBase region splitting calculation based on UUID work (Refactor Split point computation. Allow only Murmur3Hash)."

Posted by av...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

avijayan pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git

commit 7d215e0bdc519f450a127b6adbf11438c23136ad
Author: Aravindan Vijayan <av...@hortonworks.com>
AuthorDate: Fri May 11 07:03:29 2018 -0700

    Revert "AMBARI-23804 : Refine AMS HBase region splitting calculation based on UUID work (Refactor Split point computation. Allow only Murmur3Hash)."
    
    This reverts commit d4d2767ae0d9c8e3043144df52bd5d6094b61171.
---
 .../metrics/core/timeline/PhoenixHBaseAccessor.java |  1 -
 .../timeline/TimelineMetricSplitPointComputer.java  | 19 +++++++++----------
 .../discovery/TimelineMetricMetadataManager.java    | 20 +++++++++++++++++++-
 .../timeline/uuid/Murmur3HashUuidGenStrategy.java   | 13 +------------
 .../TimelineMetricSplitPointComputerTest.java       | 21 +++++++++------------
 5 files changed, 38 insertions(+), 36 deletions(-)

diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/PhoenixHBaseAccessor.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/PhoenixHBaseAccessor.java
index c684b0a..dec7850 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/PhoenixHBaseAccessor.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/PhoenixHBaseAccessor.java
@@ -492,7 +492,6 @@ public class PhoenixHBaseAccessor {
     PreparedStatement pStmt = null;
     TimelineMetricSplitPointComputer splitPointComputer = new TimelineMetricSplitPointComputer(
       metricsConf, hbaseConf, metadataManagerInstance);
-    splitPointComputer.computeSplitPoints();
 
     String encoding = metricsConf.get(HBASE_ENCODING_SCHEME, DEFAULT_ENCODING);
     String compression = metricsConf.get(HBASE_COMPRESSION_SCHEME, DEFAULT_TABLE_COMPRESSION);
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/TimelineMetricSplitPointComputer.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/TimelineMetricSplitPointComputer.java
index 05366cc..89bb843 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/TimelineMetricSplitPointComputer.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/TimelineMetricSplitPointComputer.java
@@ -49,11 +49,6 @@ public class TimelineMetricSplitPointComputer {
   private static final int SLAVE_EQUIDISTANT_POINTS = 50;
   private static final int MASTER_EQUIDISTANT_POINTS = 5;
 
-  private double hbaseTotalHeapsize;
-  private double hbaseMemstoreUpperLimit;
-  private double hbaseMemstoreFlushSize;
-  private TimelineMetricMetadataManager timelineMetricMetadataManager = null;
-
   private List<byte[]> precisionSplitPoints = new ArrayList<>();
   private List<byte[]> aggregateSplitPoints = new ArrayList<>();
 
@@ -71,14 +66,18 @@ public class TimelineMetricSplitPointComputer {
       slaveComponents.addAll(Arrays.asList(componentsString.split(",")));
     }
 
-    this.timelineMetricMetadataManager = timelineMetricMetadataManager;
-    hbaseTotalHeapsize = metricsConf.getDouble("hbase_total_heapsize", 1024*1024*1024);
-    hbaseMemstoreUpperLimit = hbaseConf.getDouble("hbase.regionserver.global.memstore.upperLimit", 0.3);
-    hbaseMemstoreFlushSize = hbaseConf.getDouble("hbase.hregion.memstore.flush.size", 134217728);
+    double hbaseTotalHeapsize = metricsConf.getDouble("hbase_total_heapsize", 1024*1024*1024);
+    double hbaseMemstoreUpperLimit = hbaseConf.getDouble("hbase.regionserver.global.memstore.upperLimit", 0.5);
+    double hbaseMemstoreFlushSize = hbaseConf.getDouble("hbase.hregion.memstore.flush.size", 134217728);
+
+    computeSplitPoints(hbaseTotalHeapsize, hbaseMemstoreUpperLimit, hbaseMemstoreFlushSize, timelineMetricMetadataManager);
   }
 
 
-  protected void computeSplitPoints() {
+  private void computeSplitPoints(double hbaseTotalHeapsize,
+                                  double hbaseMemstoreUpperLimit,
+                                  double hbaseMemstoreFlushSize,
+                                  TimelineMetricMetadataManager timelineMetricMetadataManager) {
 
     double memstoreMaxMemory = hbaseMemstoreUpperLimit * hbaseTotalHeapsize;
     int maxInMemoryRegions = (int) ((memstoreMaxMemory / hbaseMemstoreFlushSize) - OTHER_TABLE_STATIC_REGIONS);
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/discovery/TimelineMetricMetadataManager.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/discovery/TimelineMetricMetadataManager.java
index 86226ec..737c2ff 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/discovery/TimelineMetricMetadataManager.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/discovery/TimelineMetricMetadataManager.java
@@ -397,6 +397,18 @@ public class TimelineMetricMetadataManager {
       }
     }
 
+    if (!HOSTED_APPS_MAP.isEmpty()) {
+      Map.Entry<String, TimelineMetricHostMetadata> entry = HOSTED_APPS_MAP.entrySet().iterator().next();
+      TimelineMetricHostMetadata timelineMetricHostMetadata = entry.getValue();
+      if (timelineMetricHostMetadata.getUuid() != null  && timelineMetricHostMetadata.getUuid().length == 16) {
+        HOSTNAME_UUID_LENGTH = 16;
+        uuidGenStrategy = new MD5UuidGenStrategy();
+      } else {
+        HOSTNAME_UUID_LENGTH = 4;
+        uuidGenStrategy = new Murmur3HashUuidGenStrategy();
+      }
+    }
+
     for (String host : HOSTED_APPS_MAP.keySet()) {
       TimelineMetricHostMetadata timelineMetricHostMetadata = HOSTED_APPS_MAP.get(host);
       if (timelineMetricHostMetadata != null && timelineMetricHostMetadata.getUuid() != null) {
@@ -411,7 +423,13 @@ public class TimelineMetricMetadataManager {
    * @return the UUID generator of type org.apache.ambari.metrics.core.timeline.uuid.MetricUuidGenStrategy
    */
   private MetricUuidGenStrategy getUuidStrategy(Configuration configuration) {
-    return new Murmur3HashUuidGenStrategy();
+    String strategy = configuration.get(TIMELINE_METRICS_UUID_GEN_STRATEGY, "");
+    if ("md5".equalsIgnoreCase(strategy)){
+      return new MD5UuidGenStrategy();
+    } else {
+      //Default
+      return new Murmur3HashUuidGenStrategy();
+    }
   }
 
   /**
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/uuid/Murmur3HashUuidGenStrategy.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/uuid/Murmur3HashUuidGenStrategy.java
index af8cee5..9418aa4 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/uuid/Murmur3HashUuidGenStrategy.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/uuid/Murmur3HashUuidGenStrategy.java
@@ -24,12 +24,7 @@ import org.apache.commons.lang.StringUtils;
 
 public class Murmur3HashUuidGenStrategy implements MetricUuidGenStrategy{
 
-  /**
-   * Compute Murmur3Hash 16 byte UUID for a Metric-App-Instance.
-   * @param timelineClusterMetric input metric
-   * @param maxLength Max length of returned UUID. (Will always be 16 for this technique)
-   * @return 16 byte UUID.
-   */  @Override
+  @Override
   public byte[] computeUuid(TimelineClusterMetric timelineClusterMetric, int maxLength) {
 
     String metricString = timelineClusterMetric.getMetricName() + timelineClusterMetric.getAppId();
@@ -40,12 +35,6 @@ public class Murmur3HashUuidGenStrategy implements MetricUuidGenStrategy{
     return Hashing.murmur3_128().hashBytes(metricBytes).asBytes();
   }
 
-  /**
-   * Compute Murmur3Hash 4 byte UUID for a String.
-   * @param value String input
-   * @param maxLength Max length of returned UUID. (Will always be 4 for this technique)
-   * @return 4 byte UUID.
-   */
   @Override
   public byte[] computeUuid(String value, int maxLength) {
     byte[] valueBytes = value.getBytes();
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/TimelineMetricSplitPointComputerTest.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/TimelineMetricSplitPointComputerTest.java
index 150dac2..4d663cc 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/TimelineMetricSplitPointComputerTest.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/TimelineMetricSplitPointComputerTest.java
@@ -51,7 +51,7 @@ public class TimelineMetricSplitPointComputerTest {
     expect(metricsConfMock.getDouble("hbase_total_heapsize", 1024*1024*1024)).andReturn(1024 * 1024 * 1024.0).once();
 
     Configuration hbaseConfMock = EasyMock.createMock(Configuration.class);
-    expect(hbaseConfMock.getDouble("hbase.regionserver.global.memstore.upperLimit", 0.3)).andReturn(0.3).once();
+    expect(hbaseConfMock.getDouble("hbase.regionserver.global.memstore.upperLimit", 0.5)).andReturn(0.5).once();
     expect(hbaseConfMock.getDouble("hbase.hregion.memstore.flush.size", 134217728)).andReturn(134217728.0).once();
 
     TimelineMetricMetadataManager metricMetadataManagerMock = EasyMock.createNiceMock(TimelineMetricMetadataManager.class);
@@ -62,7 +62,6 @@ public class TimelineMetricSplitPointComputerTest {
     TimelineMetricSplitPointComputer timelineMetricSplitPointComputer = new TimelineMetricSplitPointComputer(metricsConfMock,
       hbaseConfMock,
       metricMetadataManagerMock);
-    timelineMetricSplitPointComputer.computeSplitPoints();
 
     Assert.assertEquals(timelineMetricSplitPointComputer.getPrecisionSplitPoints().size(), 3);
     Assert.assertEquals(timelineMetricSplitPointComputer.getClusterAggregateSplitPoints().size(), 1);
@@ -86,7 +85,7 @@ public class TimelineMetricSplitPointComputerTest {
     expect(metricsConfMock.getDouble("hbase_total_heapsize", 1024*1024*1024)).andReturn(8589934592.0).once();
 
     Configuration hbaseConfMock = EasyMock.createMock(Configuration.class);
-    expect(hbaseConfMock.getDouble("hbase.regionserver.global.memstore.upperLimit", 0.3)).andReturn(0.3).once();
+    expect(hbaseConfMock.getDouble("hbase.regionserver.global.memstore.upperLimit", 0.5)).andReturn(0.5).once();
     expect(hbaseConfMock.getDouble("hbase.hregion.memstore.flush.size", 134217728)).andReturn(134217728.0).once();
 
     TimelineMetricMetadataManager metricMetadataManagerMock = EasyMock.createNiceMock(TimelineMetricMetadataManager.class);
@@ -97,11 +96,10 @@ public class TimelineMetricSplitPointComputerTest {
     TimelineMetricSplitPointComputer timelineMetricSplitPointComputer = new TimelineMetricSplitPointComputer(metricsConfMock,
       hbaseConfMock,
       metricMetadataManagerMock);
-    timelineMetricSplitPointComputer.computeSplitPoints();
 
-    Assert.assertEquals(timelineMetricSplitPointComputer.getPrecisionSplitPoints().size(), 6);
-    Assert.assertEquals(timelineMetricSplitPointComputer.getClusterAggregateSplitPoints().size(), 1);
-    Assert.assertEquals(timelineMetricSplitPointComputer.getHostAggregateSplitPoints().size(), 1);
+    Assert.assertEquals(timelineMetricSplitPointComputer.getPrecisionSplitPoints().size(), 16);
+    Assert.assertEquals(timelineMetricSplitPointComputer.getClusterAggregateSplitPoints().size(), 3);
+    Assert.assertEquals(timelineMetricSplitPointComputer.getHostAggregateSplitPoints().size(), 3);
   }
 
   @Test
@@ -121,7 +119,7 @@ public class TimelineMetricSplitPointComputerTest {
     expect(metricsConfMock.getDouble("hbase_total_heapsize", 1024*1024*1024)).andReturn(24 * 1024 * 1024 * 1024.0).once();
 
     Configuration hbaseConfMock = EasyMock.createMock(Configuration.class);
-    expect(hbaseConfMock.getDouble("hbase.regionserver.global.memstore.upperLimit", 0.3)).andReturn(0.3).once();
+    expect(hbaseConfMock.getDouble("hbase.regionserver.global.memstore.upperLimit", 0.5)).andReturn(0.5).once();
     expect(hbaseConfMock.getDouble("hbase.hregion.memstore.flush.size", 134217728)).andReturn(2 * 134217728.0).once();
 
     TimelineMetricMetadataManager metricMetadataManagerMock = EasyMock.createNiceMock(TimelineMetricMetadataManager.class);
@@ -132,10 +130,9 @@ public class TimelineMetricSplitPointComputerTest {
     TimelineMetricSplitPointComputer timelineMetricSplitPointComputer = new TimelineMetricSplitPointComputer(metricsConfMock,
       hbaseConfMock,
       metricMetadataManagerMock);
-    timelineMetricSplitPointComputer.computeSplitPoints();
 
-    Assert.assertEquals(timelineMetricSplitPointComputer.getPrecisionSplitPoints().size(), 14);
-    Assert.assertEquals(timelineMetricSplitPointComputer.getClusterAggregateSplitPoints().size(), 3);
-    Assert.assertEquals(timelineMetricSplitPointComputer.getHostAggregateSplitPoints().size(), 3);
+    Assert.assertEquals(timelineMetricSplitPointComputer.getPrecisionSplitPoints().size(), 28);
+    Assert.assertEquals(timelineMetricSplitPointComputer.getClusterAggregateSplitPoints().size(), 6);
+    Assert.assertEquals(timelineMetricSplitPointComputer.getHostAggregateSplitPoints().size(), 6);
   }
 }

-- 
To stop receiving notification emails like this one, please contact
avijayan@apache.org.

[ambari] 03/03: Revert "AMBARI-23804 : Refine AMS HBase region splitting calculation based on UUID work."

Posted by av...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

avijayan pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git

commit 4b8a6b2e99971ac4970612fef391d6f237ae3e03
Author: Aravindan Vijayan <av...@hortonworks.com>
AuthorDate: Fri May 11 07:03:48 2018 -0700

    Revert "AMBARI-23804 : Refine AMS HBase region splitting calculation based on UUID work."
    
    This reverts commit 8bb3bcab2bbb2a582ed0db25c86881a9e07803b7.
---
 .../core/timeline/HBaseTimelineMetricsService.java |  15 +-
 .../core/timeline/PhoenixHBaseAccessor.java        | 226 +++++++++++--------
 .../core/timeline/TimelineMetricConfiguration.java |  47 +++-
 .../timeline/TimelineMetricSplitPointComputer.java | 239 ---------------------
 .../core/timeline/TimelineMetricStoreWatcher.java  |   6 +-
 .../discovery/TimelineMetricMetadataManager.java   | 130 +++--------
 .../core/timeline/query/PhoenixTransactSQL.java    |  20 +-
 .../timeline/uuid/Murmur3HashUuidGenStrategy.java  |  43 ----
 .../core/timeline/uuid/TimelineMetricUuid.java     |  55 -----
 .../ambari/metrics/webapp/TimelineWebServices.java |  16 +-
 .../resources/metrics_def/AMSSMOKETESTFAKE.dat     |   1 -
 .../{HBASE_MASTER.dat => MASTER_HBASE.dat}         |   0
 .../{HBASE_REGIONSERVER.dat => SLAVE_HBASE.dat}    |   0
 .../metrics_def/TIMELINE_METRIC_STORE_WATCHER.dat  |   1 -
 .../timeline/AbstractMiniHBaseClusterTest.java     |   9 +-
 .../core/timeline/ITPhoenixHBaseAccessor.java      |  39 ++--
 .../core/timeline/PhoenixHBaseAccessorTest.java    |   4 +-
 .../TimelineMetricSplitPointComputerTest.java      | 138 ------------
 .../timeline/TimelineMetricStoreWatcherTest.java   |   8 +-
 .../timeline/discovery/TestMetadataManager.java    |   2 +-
 ...est.java => TimelineMetricUuidManagerTest.java} | 132 ++++++------
 .../ambari/server/upgrade/UpgradeCatalog270.java   |  19 +-
 .../0.1.0/configuration/ams-site.xml               |  54 +++++
 .../0.1.0/package/files/service-metrics/KAFKA.txt  |   0
 .../AMBARI_METRICS/0.1.0/package/scripts/ams.py    |  28 +--
 .../AMBARI_METRICS/0.1.0/package/scripts/params.py |   1 -
 .../AMBARI_METRICS/0.1.0/service_advisor.py        |  22 ++
 .../server/upgrade/UpgradeCatalog270Test.java      |   4 -
 .../AMBARI_METRICS/test_service_advisor.py         |   4 +
 .../2.0.6/AMBARI_METRICS/test_metrics_collector.py |   2 +-
 .../test/python/stacks/2.0.6/configs/default.json  |   4 +-
 .../stacks/2.0.6/configs/default_ams_embedded.json |   3 +-
 32 files changed, 402 insertions(+), 870 deletions(-)

diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/HBaseTimelineMetricsService.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/HBaseTimelineMetricsService.java
index d09d4bb..56a28dc 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/HBaseTimelineMetricsService.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/HBaseTimelineMetricsService.java
@@ -17,6 +17,7 @@
  */
 package org.apache.ambari.metrics.core.timeline;
 
+import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.DEFAULT_TOPN_HOSTS_LIMIT;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.USE_GROUPBY_AGGREGATOR_QUERIES;
 import static org.apache.ambari.metrics.core.timeline.availability.AggregationTaskRunner.ACTUAL_AGGREGATOR_NAMES;
 
@@ -34,6 +35,7 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.Executors;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.ThreadFactory;
@@ -109,18 +111,15 @@ public class HBaseTimelineMetricsService extends AbstractService implements Time
   private synchronized void initializeSubsystem() {
     if (!isInitialized) {
       hBaseAccessor = new PhoenixHBaseAccessor(null);
-
-      // Initialize metadata
+      // Initialize schema
+      hBaseAccessor.initMetricSchema();
+      // Initialize metadata from store
       try {
         metricMetadataManager = new TimelineMetricMetadataManager(hBaseAccessor);
       } catch (MalformedURLException | URISyntaxException e) {
         throw new ExceptionInInitializerError("Unable to initialize metadata manager");
       }
       metricMetadataManager.initializeMetadata();
-
-      // Initialize metric schema
-      hBaseAccessor.initMetricSchema();
-
       // Initialize policies before TTL update
       hBaseAccessor.initPoliciesAndTTL();
       // Start HA service
@@ -396,10 +395,6 @@ public class HBaseTimelineMetricsService extends AbstractService implements Time
     return metricsFunctions;
   }
 
-  public void putMetricsSkipCache(TimelineMetrics metrics) throws SQLException, IOException {
-    hBaseAccessor.insertMetricRecordsWithMetadata(metricMetadataManager, metrics, true);
-  }
-
   @Override
   public TimelinePutResponse putMetrics(TimelineMetrics metrics) throws SQLException, IOException {
     // Error indicated by the Sql exception
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/PhoenixHBaseAccessor.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/PhoenixHBaseAccessor.java
index dec7850..040df1b 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/PhoenixHBaseAccessor.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/PhoenixHBaseAccessor.java
@@ -20,7 +20,6 @@ package org.apache.ambari.metrics.core.timeline;
 import static java.util.concurrent.TimeUnit.SECONDS;
 import static org.apache.ambari.metrics.core.timeline.FunctionUtils.findMetricFunctions;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.AGGREGATORS_SKIP_BLOCK_CACHE;
-import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.BLOCKING_STORE_FILES_KEY;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.CLUSTER_AGGREGATOR_MINUTE_SLEEP_INTERVAL;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.CLUSTER_AGGREGATOR_TIMESLICE_INTERVAL;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.CLUSTER_DAILY_TABLE_TTL;
@@ -28,32 +27,41 @@ import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguratio
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.CLUSTER_MINUTE_TABLE_TTL;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.CLUSTER_SECOND_TABLE_TTL;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.CONTAINER_METRICS_TTL;
-import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.DATE_TIERED_COMPACTION_POLICY;
-import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.FIFO_COMPACTION_POLICY_CLASS;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.GLOBAL_MAX_RETRIES;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.GLOBAL_RESULT_LIMIT;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.GLOBAL_RETRY_INTERVAL;
+import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.HBASE_BLOCKING_STORE_FILES;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.HBASE_COMPRESSION_SCHEME;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.HBASE_ENCODING_SCHEME;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.HOST_AGGREGATOR_MINUTE_SLEEP_INTERVAL;
-import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.HSTORE_COMPACTION_CLASS_KEY;
-import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.HSTORE_ENGINE_CLASS;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TRANSIENT_METRIC_PATTERNS;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.HOST_DAILY_TABLE_TTL;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.HOST_HOUR_TABLE_TTL;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.HOST_MINUTE_TABLE_TTL;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.METRICS_TRANSIENT_TABLE_TTL;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.PRECISION_TABLE_TTL;
+import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TIMELINE_METRICS_AGGREGATE_TABLES_DURABILITY;
+import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TIMELINE_METRICS_AGGREGATE_TABLE_HBASE_BLOCKING_STORE_FILES;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TIMELINE_METRICS_CACHE_COMMIT_INTERVAL;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TIMELINE_METRICS_CACHE_ENABLED;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TIMELINE_METRICS_CACHE_SIZE;
+import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TIMELINE_METRICS_HBASE_AGGREGATE_TABLE_COMPACTION_POLICY_CLASS;
+import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TIMELINE_METRICS_HBASE_AGGREGATE_TABLE_COMPACTION_POLICY_KEY;
+import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TIMELINE_METRICS_HBASE_PRECISION_TABLE_COMPACTION_POLICY_CLASS;
+import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TIMELINE_METRICS_HBASE_PRECISION_TABLE_COMPACTION_POLICY_KEY;
+import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TIMELINE_METRICS_PRECISION_TABLE_DURABILITY;
+import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TIMELINE_METRICS_PRECISION_TABLE_HBASE_BLOCKING_STORE_FILES;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TIMELINE_METRIC_AGGREGATOR_SINK_CLASS;
+import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.ALTER_METRICS_METADATA_TABLE;
 import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.CONTAINER_METRICS_TABLE_NAME;
 import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.CREATE_CONTAINER_METRICS_TABLE_SQL;
 import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.CREATE_TRANSIENT_METRICS_TABLE_SQL;
+import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.CREATE_HOSTED_APPS_METADATA_TABLE_SQL;
+import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.CREATE_INSTANCE_HOST_TABLE_SQL;
 import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.CREATE_METRICS_AGGREGATE_TABLE_SQL;
 import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.CREATE_METRICS_CLUSTER_AGGREGATE_GROUPED_TABLE_SQL;
 import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.CREATE_METRICS_CLUSTER_AGGREGATE_TABLE_SQL;
+import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.CREATE_METRICS_METADATA_TABLE_SQL;
 import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.CREATE_METRICS_TABLE_SQL;
 import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.DEFAULT_ENCODING;
 import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.DEFAULT_TABLE_COMPRESSION;
@@ -124,7 +132,6 @@ import org.apache.ambari.metrics.core.timeline.sink.ExternalSinkProvider;
 import org.apache.ambari.metrics.core.timeline.source.InternalMetricsSource;
 import org.apache.ambari.metrics.core.timeline.source.InternalSourceProvider;
 import org.apache.commons.collections.CollectionUtils;
-import org.apache.commons.lang.ArrayUtils;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -194,8 +201,21 @@ public class PhoenixHBaseAccessor {
   private TimelineMetricsAggregatorSink aggregatorSink;
   private final int cacheCommitInterval;
   private final boolean skipBlockCacheForAggregatorsEnabled;
+  private final String timelineMetricsTablesDurability;
+  private final String timelineMetricsPrecisionTableDurability;
   private TimelineMetricMetadataManager metadataManagerInstance;
 
+  static final String HSTORE_COMPACTION_CLASS_KEY =
+    "hbase.hstore.defaultengine.compactionpolicy.class";
+  static final String HSTORE_ENGINE_CLASS =
+    "hbase.hstore.engine.class";
+  static final String FIFO_COMPACTION_POLICY_CLASS =
+    "org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy";
+  static final String DATE_TIERED_COMPACTION_POLICY =
+    "org.apache.hadoop.hbase.regionserver.DateTieredStoreEngine";
+  static final String BLOCKING_STORE_FILES_KEY =
+    "hbase.hstore.blockingStoreFiles";
+
   private Map<String, Integer> tableTTL = new HashMap<>();
 
   private final TimelineMetricConfiguration configuration;
@@ -239,9 +259,11 @@ public class PhoenixHBaseAccessor {
     this.cacheCommitInterval = Integer.valueOf(metricsConf.get(TIMELINE_METRICS_CACHE_COMMIT_INTERVAL, "3"));
     this.insertCache = new ArrayBlockingQueue<TimelineMetrics>(cacheSize);
     this.skipBlockCacheForAggregatorsEnabled = metricsConf.getBoolean(AGGREGATORS_SKIP_BLOCK_CACHE, false);
+    this.timelineMetricsTablesDurability = metricsConf.get(TIMELINE_METRICS_AGGREGATE_TABLES_DURABILITY, "");
+    this.timelineMetricsPrecisionTableDurability = metricsConf.get(TIMELINE_METRICS_PRECISION_TABLE_DURABILITY, "");
 
     tableTTL.put(METRICS_RECORD_TABLE_NAME, metricsConf.getInt(PRECISION_TABLE_TTL, 1 * 86400));  // 1 day
-    tableTTL.put(CONTAINER_METRICS_TABLE_NAME, metricsConf.getInt(CONTAINER_METRICS_TTL, 14 * 86400));  // 30 days
+    tableTTL.put(CONTAINER_METRICS_TABLE_NAME, metricsConf.getInt(CONTAINER_METRICS_TTL, 30 * 86400));  // 30 days
     tableTTL.put(METRICS_AGGREGATE_MINUTE_TABLE_NAME, metricsConf.getInt(HOST_MINUTE_TABLE_TTL, 7 * 86400)); //7 days
     tableTTL.put(METRICS_AGGREGATE_HOURLY_TABLE_NAME, metricsConf.getInt(HOST_HOUR_TABLE_TTL, 30 * 86400)); //30 days
     tableTTL.put(METRICS_AGGREGATE_DAILY_TABLE_NAME, metricsConf.getInt(HOST_DAILY_TABLE_TTL, 365 * 86400)); //1 year
@@ -448,7 +470,7 @@ public class PhoenixHBaseAccessor {
     return mapper.readValue(json, metricValuesTypeRef);
   }
 
-  public Connection getConnectionRetryingOnException() throws SQLException, InterruptedException {
+  private Connection getConnectionRetryingOnException() throws SQLException, InterruptedException {
     RetryCounter retryCounter = retryCounterFactory.create();
     while (true) {
       try{
@@ -489,9 +511,6 @@ public class PhoenixHBaseAccessor {
   protected void initMetricSchema() {
     Connection conn = null;
     Statement stmt = null;
-    PreparedStatement pStmt = null;
-    TimelineMetricSplitPointComputer splitPointComputer = new TimelineMetricSplitPointComputer(
-      metricsConf, hbaseConf, metadataManagerInstance);
 
     String encoding = metricsConf.get(HBASE_ENCODING_SCHEME, DEFAULT_ENCODING);
     String compression = metricsConf.get(HBASE_COMPRESSION_SCHEME, DEFAULT_TABLE_COMPRESSION);
@@ -502,6 +521,21 @@ public class PhoenixHBaseAccessor {
       conn = getConnectionRetryingOnException();
       stmt = conn.createStatement();
 
+      // Metadata
+      String metadataSql = String.format(CREATE_METRICS_METADATA_TABLE_SQL,
+        encoding, compression);
+      stmt.executeUpdate(metadataSql);
+      stmt.executeUpdate(ALTER_METRICS_METADATA_TABLE);
+
+      String hostedAppSql = String.format(CREATE_HOSTED_APPS_METADATA_TABLE_SQL,
+        encoding, compression);
+      stmt.executeUpdate(hostedAppSql);
+
+      //Host Instances table
+      String hostedInstancesSql = String.format(CREATE_INSTANCE_HOST_TABLE_SQL,
+        encoding, compression);
+      stmt.executeUpdate(hostedInstancesSql);
+
       // Container Metrics
       stmt.executeUpdate( String.format(CREATE_CONTAINER_METRICS_TABLE_SQL,
         encoding, tableTTL.get(CONTAINER_METRICS_TABLE_NAME), compression));
@@ -509,15 +543,13 @@ public class PhoenixHBaseAccessor {
       // Host level
       String precisionSql = String.format(CREATE_METRICS_TABLE_SQL,
         encoding, tableTTL.get(METRICS_RECORD_TABLE_NAME), compression);
-      pStmt = prepareCreateMetricsTableStatement(conn, precisionSql, splitPointComputer.getPrecisionSplitPoints());
-      pStmt.executeUpdate();
+      stmt.executeUpdate(precisionSql);
 
       String hostMinuteAggregrateSql = String.format(CREATE_METRICS_AGGREGATE_TABLE_SQL,
         METRICS_AGGREGATE_MINUTE_TABLE_NAME, encoding,
         tableTTL.get(METRICS_AGGREGATE_MINUTE_TABLE_NAME),
         compression);
-      pStmt = prepareCreateMetricsTableStatement(conn, hostMinuteAggregrateSql, splitPointComputer.getHostAggregateSplitPoints());
-      pStmt.executeUpdate();
+      stmt.executeUpdate(hostMinuteAggregrateSql);
 
       stmt.executeUpdate(String.format(CREATE_METRICS_AGGREGATE_TABLE_SQL,
         METRICS_AGGREGATE_HOURLY_TABLE_NAME, encoding,
@@ -533,9 +565,8 @@ public class PhoenixHBaseAccessor {
         METRICS_CLUSTER_AGGREGATE_TABLE_NAME, encoding,
         tableTTL.get(METRICS_CLUSTER_AGGREGATE_TABLE_NAME),
         compression);
-      pStmt = prepareCreateMetricsTableStatement(conn, aggregateSql, splitPointComputer.getClusterAggregateSplitPoints());
-      pStmt.executeUpdate();
 
+      stmt.executeUpdate(aggregateSql);
       stmt.executeUpdate(String.format(CREATE_METRICS_CLUSTER_AGGREGATE_GROUPED_TABLE_SQL,
         METRICS_CLUSTER_AGGREGATE_MINUTE_TABLE_NAME, encoding,
         tableTTL.get(METRICS_CLUSTER_AGGREGATE_MINUTE_TABLE_NAME),
@@ -572,13 +603,6 @@ public class PhoenixHBaseAccessor {
           // Ignore
         }
       }
-      if (pStmt != null) {
-        try {
-          pStmt.close();
-        } catch (Exception e) {
-          // Ignore
-        }
-      }
       if (conn != null) {
         try {
           conn.close();
@@ -589,7 +613,7 @@ public class PhoenixHBaseAccessor {
     }
   }
 
-  void initPoliciesAndTTL() {
+  protected void initPoliciesAndTTL() {
     Admin hBaseAdmin = null;
     try {
       hBaseAdmin = dataSource.getHBaseAdmin();
@@ -598,13 +622,9 @@ public class PhoenixHBaseAccessor {
     }
 
     TableName[] tableNames = null;
-    TableName[] containerMetricsTableName = null;
-
     if (hBaseAdmin != null) {
       try {
         tableNames = hBaseAdmin.listTableNames(PHOENIX_TABLES_REGEX_PATTERN, false);
-        containerMetricsTableName = hBaseAdmin.listTableNames(CONTAINER_METRICS_TABLE_NAME, false);
-        tableNames = (TableName[]) ArrayUtils.addAll(tableNames, containerMetricsTableName);
       } catch (IOException e) {
         LOG.warn("Unable to get table names from HBaseAdmin for setting policies.", e);
         return;
@@ -688,44 +708,72 @@ public class PhoenixHBaseAccessor {
   }
 
   private boolean setDurabilityForTable(String tableName, TableDescriptorBuilder tableDescriptor) {
-    String tableDurability = metricsConf.get("timeline.metrics." + tableName + ".durability", "");
-    if (StringUtils.isNotEmpty(tableDurability)) {
-      LOG.info("Setting WAL option " + tableDurability + " for table : " + tableName);
-      boolean validDurability = true;
-      if ("SKIP_WAL".equals(tableDurability)) {
-        tableDescriptor.setDurability(Durability.SKIP_WAL);
-      } else if ("SYNC_WAL".equals(tableDurability)) {
-        tableDescriptor.setDurability(Durability.SYNC_WAL);
-      } else if ("ASYNC_WAL".equals(tableDurability)) {
-        tableDescriptor.setDurability(Durability.ASYNC_WAL);
-      } else if ("FSYNC_WAL".equals(tableDurability)) {
-        tableDescriptor.setDurability(Durability.FSYNC_WAL);
-      } else {
-        LOG.info("Unknown value for durability : " + tableDurability);
-        validDurability = false;
+
+    boolean modifyTable = false;
+
+    if (METRIC_TRANSIENT_TABLE_NAME.equalsIgnoreCase(tableName)) {
+      tableDescriptor.setDurability(Durability.SKIP_WAL);
+      modifyTable = true;
+    } else if (METRICS_RECORD_TABLE_NAME.equals(tableName)) {
+      if (!timelineMetricsPrecisionTableDurability.isEmpty()) {
+        LOG.info("Setting WAL option " + timelineMetricsPrecisionTableDurability + " for table : " + tableName);
+        boolean validDurability = true;
+        if ("SKIP_WAL".equals(timelineMetricsPrecisionTableDurability)) {
+          tableDescriptor.setDurability(Durability.SKIP_WAL);
+        } else if ("SYNC_WAL".equals(timelineMetricsPrecisionTableDurability)) {
+          tableDescriptor.setDurability(Durability.SYNC_WAL);
+        } else if ("ASYNC_WAL".equals(timelineMetricsPrecisionTableDurability)) {
+          tableDescriptor.setDurability(Durability.ASYNC_WAL);
+        } else if ("FSYNC_WAL".equals(timelineMetricsPrecisionTableDurability)) {
+          tableDescriptor.setDurability(Durability.FSYNC_WAL);
+        } else {
+          LOG.info("Unknown value for " + TIMELINE_METRICS_PRECISION_TABLE_DURABILITY + " : " + timelineMetricsPrecisionTableDurability);
+          validDurability = false;
+        }
+        if (validDurability) {
+          modifyTable = true;
+        }
+      }
+    } else {
+      if (!timelineMetricsTablesDurability.isEmpty()) {
+        LOG.info("Setting WAL option " + timelineMetricsTablesDurability + " for table : " + tableName);
+        boolean validDurability = true;
+        if ("SKIP_WAL".equals(timelineMetricsTablesDurability)) {
+          tableDescriptor.setDurability(Durability.SKIP_WAL);
+        } else if ("SYNC_WAL".equals(timelineMetricsTablesDurability)) {
+          tableDescriptor.setDurability(Durability.SYNC_WAL);
+        } else if ("ASYNC_WAL".equals(timelineMetricsTablesDurability)) {
+          tableDescriptor.setDurability(Durability.ASYNC_WAL);
+        } else if ("FSYNC_WAL".equals(timelineMetricsTablesDurability)) {
+          tableDescriptor.setDurability(Durability.FSYNC_WAL);
+        } else {
+          LOG.info("Unknown value for " + TIMELINE_METRICS_AGGREGATE_TABLES_DURABILITY + " : " + timelineMetricsTablesDurability);
+          validDurability = false;
+        }
+        if (validDurability) {
+          modifyTable = true;
+        }
       }
-      return validDurability;
     }
-    return false;
+    return modifyTable;
   }
 
-
   private boolean setCompactionPolicyForTable(String tableName, TableDescriptorBuilder tableDescriptorBuilder) {
 
     boolean modifyTable = false;
 
-    String keyConfig = "timeline.metrics." + tableName + ".compaction.policy.key";
-    String policyConfig = "timeline.metrics." + tableName + ".compaction.policy";
-    String storeFilesConfig = "timeline.metrics." + tableName + ".blocking.store.files";
-
-    String compactionPolicyKey = metricsConf.get(keyConfig, HSTORE_ENGINE_CLASS);
-    String compactionPolicyClass = metricsConf.get(policyConfig, DATE_TIERED_COMPACTION_POLICY);
-    int blockingStoreFiles = hbaseConf.getInt(storeFilesConfig, 60);
-
-    if (tableName.equals(METRICS_RECORD_TABLE_NAME)) {
-      compactionPolicyKey = metricsConf.get(keyConfig, HSTORE_COMPACTION_CLASS_KEY);
-      compactionPolicyClass = metricsConf.get(policyConfig, FIFO_COMPACTION_POLICY_CLASS);
-      blockingStoreFiles = hbaseConf.getInt(storeFilesConfig, 1000);
+    String compactionPolicyKey = metricsConf.get(TIMELINE_METRICS_HBASE_AGGREGATE_TABLE_COMPACTION_POLICY_KEY,
+      HSTORE_ENGINE_CLASS);
+    String compactionPolicyClass = metricsConf.get(TIMELINE_METRICS_HBASE_AGGREGATE_TABLE_COMPACTION_POLICY_CLASS,
+      DATE_TIERED_COMPACTION_POLICY);
+    int blockingStoreFiles = hbaseConf.getInt(TIMELINE_METRICS_AGGREGATE_TABLE_HBASE_BLOCKING_STORE_FILES, 60);
+
+    if (tableName.equals(METRICS_RECORD_TABLE_NAME) || tableName.equalsIgnoreCase(METRIC_TRANSIENT_TABLE_NAME)) {
+      compactionPolicyKey = metricsConf.get(TIMELINE_METRICS_HBASE_PRECISION_TABLE_COMPACTION_POLICY_KEY,
+        HSTORE_COMPACTION_CLASS_KEY);
+      compactionPolicyClass = metricsConf.get(TIMELINE_METRICS_HBASE_PRECISION_TABLE_COMPACTION_POLICY_CLASS,
+        FIFO_COMPACTION_POLICY_CLASS);
+      blockingStoreFiles = hbaseConf.getInt(TIMELINE_METRICS_PRECISION_TABLE_HBASE_BLOCKING_STORE_FILES, 1000);
     }
 
     if (StringUtils.isEmpty(compactionPolicyKey) || StringUtils.isEmpty(compactionPolicyClass)) {
@@ -733,54 +781,46 @@ public class PhoenixHBaseAccessor {
       modifyTable = setHbaseBlockingStoreFiles(tableDescriptorBuilder, tableName, 300);
     } else {
       tableDescriptorBuilder.setValue(compactionPolicyKey, compactionPolicyClass);
-      setHbaseBlockingStoreFiles(tableDescriptorBuilder, tableName, blockingStoreFiles);
-      modifyTable = true;
-    }
-
-    if (!compactionPolicyKey.equals(HSTORE_ENGINE_CLASS)) {
       tableDescriptorBuilder.removeValue(HSTORE_ENGINE_CLASS.getBytes());
-    }
-    if (!compactionPolicyKey.equals(HSTORE_COMPACTION_CLASS_KEY)) {
       tableDescriptorBuilder.removeValue(HSTORE_COMPACTION_CLASS_KEY.getBytes());
+      setHbaseBlockingStoreFiles(tableDescriptorBuilder, tableName, blockingStoreFiles);
+      modifyTable = true;
     }
 
     return modifyTable;
   }
 
   private boolean setHbaseBlockingStoreFiles(TableDescriptorBuilder tableDescriptor, String tableName, int value) {
-    tableDescriptor.setValue(BLOCKING_STORE_FILES_KEY, String.valueOf(value));
-    LOG.info("Setting config property " + BLOCKING_STORE_FILES_KEY +
-      " = " + value + " for " + tableName);
-    return true;
-  }
-
-
-  private PreparedStatement prepareCreateMetricsTableStatement(Connection connection,
-                                                               String sql,
-                                                               List<byte[]> splitPoints) throws SQLException {
-
-    String createTableWithSplitPointsSql = sql + getSplitPointsStr(splitPoints.size());
-    LOG.info(createTableWithSplitPointsSql);
-    PreparedStatement statement = connection.prepareStatement(createTableWithSplitPointsSql);
-    for (int i = 1; i <= splitPoints.size(); i++) {
-      statement.setBytes(i, splitPoints.get(i - 1));
+    int blockingStoreFiles = hbaseConf.getInt(HBASE_BLOCKING_STORE_FILES, value);
+    if (blockingStoreFiles != value) {
+      blockingStoreFiles = value;
+      tableDescriptor.setValue(BLOCKING_STORE_FILES_KEY, String.valueOf(value));
+      LOG.info("Setting config property " + BLOCKING_STORE_FILES_KEY +
+        " = " + blockingStoreFiles + " for " + tableName);
+      return true;
     }
-    return statement;
+    return false;
   }
 
-  private String getSplitPointsStr(int numSplits) {
-    if (numSplits <= 0) {
+  protected String getSplitPointsStr(String splitPoints) {
+    if (StringUtils.isEmpty(splitPoints.trim())) {
       return "";
     }
-    StringBuilder sb = new StringBuilder(" SPLIT ON ");
-    sb.append("(");
-    for (int i = 0; i < numSplits; i++) {
-      sb.append("?");
-      sb.append(",");
+    String[] points = splitPoints.split(",");
+    if (points.length > 0) {
+      StringBuilder sb = new StringBuilder(" SPLIT ON ");
+      sb.append("(");
+      for (String point : points) {
+        sb.append("'");
+        sb.append(point.trim());
+        sb.append("'");
+        sb.append(",");
+      }
+      sb.deleteCharAt(sb.length() - 1);
+      sb.append(")");
+      return sb.toString();
     }
-    sb.deleteCharAt(sb.length() - 1);
-    sb.append(")");
-    return sb.toString();
+    return "";
   }
 
   /**
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/TimelineMetricConfiguration.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/TimelineMetricConfiguration.java
index 393d4a3..6ec2c6b 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/TimelineMetricConfiguration.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/TimelineMetricConfiguration.java
@@ -237,6 +237,12 @@ public class TimelineMetricConfiguration {
   public static final String WATCHER_MAX_FAILURES =
     "timeline.metrics.service.watcher.max.failures";
 
+  public static final String PRECISION_TABLE_SPLIT_POINTS =
+    "timeline.metrics.host.aggregate.splitpoints";
+
+  public static final String AGGREGATE_TABLE_SPLIT_POINTS =
+    "timeline.metrics.cluster.aggregate.splitpoints";
+
   public static final String AGGREGATORS_SKIP_BLOCK_CACHE =
     "timeline.metrics.aggregators.skip.blockcache.enabled";
 
@@ -255,6 +261,12 @@ public class TimelineMetricConfiguration {
   public static final String TIMELINE_METRICS_SINK_COLLECTION_PERIOD =
     "timeline.metrics.sink.collection.period";
 
+  public static final String TIMELINE_METRICS_PRECISION_TABLE_DURABILITY =
+    "timeline.metrics.precision.table.durability";
+
+  public static final String TIMELINE_METRICS_AGGREGATE_TABLES_DURABILITY =
+      "timeline.metrics.aggregate.tables.durability";
+
   public static final String TIMELINE_METRICS_WHITELIST_ENABLED =
     "timeline.metrics.whitelisting.enabled";
 
@@ -273,9 +285,33 @@ public class TimelineMetricConfiguration {
   public static final String TIMELINE_METRICS_APPS_WHITELIST =
     "timeline.metrics.apps.whitelist";
 
+  public static final String HBASE_BLOCKING_STORE_FILES =
+    "hbase.hstore.blockingStoreFiles";
+
+  public static final String DEFAULT_TOPN_HOSTS_LIMIT =
+    "timeline.metrics.default.topn.hosts.limit";
+
   public static final String TIMELINE_METRIC_AGGREGATION_SQL_FILTERS =
     "timeline.metrics.cluster.aggregation.sql.filters";
 
+  public static final String TIMELINE_METRICS_HBASE_AGGREGATE_TABLE_COMPACTION_POLICY_KEY =
+    "timeline.metrics.hbase.aggregate.table.compaction.policy.key";
+
+  public static final String TIMELINE_METRICS_HBASE_AGGREGATE_TABLE_COMPACTION_POLICY_CLASS =
+    "timeline.metrics.hbase.aggregate.table.compaction.policy.class";
+
+  public static final String TIMELINE_METRICS_AGGREGATE_TABLE_HBASE_BLOCKING_STORE_FILES =
+    "timeline.metrics.aggregate.table.hbase.hstore.blockingStoreFiles";
+
+  public static final String TIMELINE_METRICS_HBASE_PRECISION_TABLE_COMPACTION_POLICY_KEY =
+    "timeline.metrics.hbase.precision.table.compaction.policy.key";
+
+  public static final String TIMELINE_METRICS_HBASE_PRECISION_TABLE_COMPACTION_POLICY_CLASS =
+    "timeline.metrics.hbase.precision.table.compaction.policy.class";
+
+  public static final String TIMELINE_METRICS_PRECISION_TABLE_HBASE_BLOCKING_STORE_FILES =
+    "timeline.metrics.precision.table.hbase.hstore.blockingStoreFiles";
+
   public static final String TIMELINE_METRICS_SUPPORT_MULTIPLE_CLUSTERS =
     "timeline.metrics.support.multiple.clusters";
 
@@ -310,9 +346,6 @@ public class TimelineMetricConfiguration {
 
   public static final String TRANSIENT_METRIC_PATTERNS = "timeline.metrics.transient.metric.patterns";
 
-  public static final String TIMELINE_METRIC_INITIAL_CONFIGURED_MASTER_COMPONENTS = "timeline.metrics.initial.configured.master.components";
-  public static final String TIMELINE_METRIC_INITIAL_CONFIGURED_SLAVE_COMPONENTS = "timeline.metrics.initial.configured.slave.components";
-
   public static final String KAFKA_SERVERS = "timeline.metrics.external.sink.kafka.bootstrap.servers";
   public static final String KAFKA_ACKS = "timeline.metrics.external.sink.kafka.acks";
   public static final String KAFKA_RETRIES = "timeline.metrics.external.sink.kafka.bootstrap.retries";
@@ -320,13 +353,7 @@ public class TimelineMetricConfiguration {
   public static final String KAFKA_LINGER_MS = "timeline.metrics.external.sink.kafka.linger.ms";
   public static final String KAFKA_BUFFER_MEM = "timeline.metrics.external.sink.kafka.buffer.memory";
   public static final String KAFKA_SINK_TIMEOUT_SECONDS = "timeline.metrics.external.sink.kafka.timeout.seconds";
-
-  public static final String HSTORE_COMPACTION_CLASS_KEY = "hbase.hstore.defaultengine.compactionpolicy.class";
-  public static final String HSTORE_ENGINE_CLASS = "hbase.hstore.engine.class";
-  public static final String FIFO_COMPACTION_POLICY_CLASS = "org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy";
-  public static final String DATE_TIERED_COMPACTION_POLICY = "org.apache.hadoop.hbase.regionserver.DateTieredStoreEngine";
-  public static final String BLOCKING_STORE_FILES_KEY = "hbase.hstore.blockingStoreFiles";
-
+  
   private Configuration hbaseConf;
   private Configuration metricsConf;
   private Configuration metricsSslConf;
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/TimelineMetricSplitPointComputer.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/TimelineMetricSplitPointComputer.java
deleted file mode 100644
index 89bb843..0000000
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/TimelineMetricSplitPointComputer.java
+++ /dev/null
@@ -1,239 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.metrics.core.timeline;
-
-import org.apache.ambari.metrics.core.timeline.aggregators.TimelineClusterMetric;
-import org.apache.ambari.metrics.core.timeline.discovery.TimelineMetricMetadataManager;
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-
-import java.io.BufferedReader;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TIMELINE_METRIC_INITIAL_CONFIGURED_MASTER_COMPONENTS;
-import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TIMELINE_METRIC_INITIAL_CONFIGURED_SLAVE_COMPONENTS;
-
-public class TimelineMetricSplitPointComputer {
-
-  private static final Log LOG = LogFactory.getLog(TimelineMetricSplitPointComputer.class);
-  private Set<String> masterComponents = new HashSet<>();
-  private Set<String> slaveComponents = new HashSet<>();
-
-  private static final int MINIMUM_PRECISION_TABLE_REGIONS = 4;
-  private static final int MINIMUM_AGGREGATE_TABLE_REGIONS = 2;
-  private static final int OTHER_TABLE_STATIC_REGIONS = 8;
-  private static final int SLAVE_EQUIDISTANT_POINTS = 50;
-  private static final int MASTER_EQUIDISTANT_POINTS = 5;
-
-  private List<byte[]> precisionSplitPoints = new ArrayList<>();
-  private List<byte[]> aggregateSplitPoints = new ArrayList<>();
-
-  public TimelineMetricSplitPointComputer(Configuration metricsConf,
-                                          Configuration hbaseConf,
-                                          TimelineMetricMetadataManager timelineMetricMetadataManager) {
-
-    String componentsString = metricsConf.get(TIMELINE_METRIC_INITIAL_CONFIGURED_MASTER_COMPONENTS, "");
-    if (StringUtils.isNotEmpty(componentsString)) {
-      masterComponents.addAll(Arrays.asList(componentsString.split(",")));
-    }
-
-   componentsString = metricsConf.get(TIMELINE_METRIC_INITIAL_CONFIGURED_SLAVE_COMPONENTS, "");
-    if (StringUtils.isNotEmpty(componentsString)) {
-      slaveComponents.addAll(Arrays.asList(componentsString.split(",")));
-    }
-
-    double hbaseTotalHeapsize = metricsConf.getDouble("hbase_total_heapsize", 1024*1024*1024);
-    double hbaseMemstoreUpperLimit = hbaseConf.getDouble("hbase.regionserver.global.memstore.upperLimit", 0.5);
-    double hbaseMemstoreFlushSize = hbaseConf.getDouble("hbase.hregion.memstore.flush.size", 134217728);
-
-    computeSplitPoints(hbaseTotalHeapsize, hbaseMemstoreUpperLimit, hbaseMemstoreFlushSize, timelineMetricMetadataManager);
-  }
-
-
-  private void computeSplitPoints(double hbaseTotalHeapsize,
-                                  double hbaseMemstoreUpperLimit,
-                                  double hbaseMemstoreFlushSize,
-                                  TimelineMetricMetadataManager timelineMetricMetadataManager) {
-
-    double memstoreMaxMemory = hbaseMemstoreUpperLimit * hbaseTotalHeapsize;
-    int maxInMemoryRegions = (int) ((memstoreMaxMemory / hbaseMemstoreFlushSize) - OTHER_TABLE_STATIC_REGIONS);
-
-    int targetPrecisionTableRegionCount = MINIMUM_PRECISION_TABLE_REGIONS;
-    int targetAggregateTableRegionCount = MINIMUM_AGGREGATE_TABLE_REGIONS;
-
-    if (maxInMemoryRegions > 2) {
-      targetPrecisionTableRegionCount =  Math.max(4, (int)(0.70 * maxInMemoryRegions));
-      targetAggregateTableRegionCount =  Math.max(2, (int)(0.15 * maxInMemoryRegions));
-    }
-
-    List<MetricApp> metricList = new ArrayList<>();
-
-    for (String component : masterComponents) {
-      metricList.addAll(getSortedMetricListForSplitPoint(component, false));
-    }
-
-    for (String component : slaveComponents) {
-      metricList.addAll(getSortedMetricListForSplitPoint(component, true));
-    }
-
-    int totalMetricLength = metricList.size();
-
-    if (targetPrecisionTableRegionCount > 1) {
-      int idx = (int) Math.ceil(totalMetricLength / targetPrecisionTableRegionCount);
-      int index = idx;
-      for (int i = 0; i < targetPrecisionTableRegionCount; i++) {
-        if (index < totalMetricLength - 1) {
-          MetricApp metricAppService = metricList.get(index);
-          byte[] uuid = timelineMetricMetadataManager.getUuid(
-            new TimelineClusterMetric(metricAppService.metricName, metricAppService.appId, null, -1),
-            true);
-          precisionSplitPoints.add(uuid);
-          index += idx;
-        }
-      }
-    }
-
-    if (targetAggregateTableRegionCount > 1) {
-      int idx = (int) Math.ceil(totalMetricLength / targetAggregateTableRegionCount);
-      int index = idx;
-      for (int i = 0; i < targetAggregateTableRegionCount; i++) {
-        if (index < totalMetricLength - 1) {
-          MetricApp metricAppService = metricList.get(index);
-          byte[] uuid = timelineMetricMetadataManager.getUuid(
-            new TimelineClusterMetric(metricAppService.metricName, metricAppService.appId, null, -1),
-            true);
-          aggregateSplitPoints.add(uuid);
-          index += idx;
-        }
-      }
-    }
-  }
-
-  private List<MetricApp> getSortedMetricListForSplitPoint(String component, boolean isSlave) {
-
-    String appId = getAppId(component);
-    List<MetricApp> metricList = new ArrayList<>();
-
-    ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
-    if (classLoader == null) {
-      classLoader = getClass().getClassLoader();
-    }
-
-    String strLine;
-    BufferedReader bufferedReader;
-
-    try (InputStream inputStream = classLoader.getResourceAsStream("metrics_def/" + appId.toUpperCase() + ".dat")) {
-
-      if (inputStream != null) {
-        bufferedReader = new BufferedReader(new InputStreamReader(inputStream));
-        LOG.info("Found split point candidate metrics for : " + appId);
-
-        while ((strLine = bufferedReader.readLine()) != null) {
-          metricList.add(new MetricApp(strLine.trim(), appId));
-        }
-      } else {
-        LOG.info("Split point candidate metrics not found for : " + appId);
-      }
-    } catch (Exception e) {
-      LOG.info("Error reading split point candidate metrics for component : " + component);
-      LOG.error(e);
-    }
-
-    if (isSlave) {
-      return getEquidistantMetrics(metricList, SLAVE_EQUIDISTANT_POINTS);
-    } else {
-      return getEquidistantMetrics(metricList, MASTER_EQUIDISTANT_POINTS);
-    }
-  }
-
-  private List<MetricApp> getEquidistantMetrics(List<MetricApp> metrics, int distance) {
-    List<MetricApp> selectedMetricApps = new ArrayList<>();
-
-    int idx = metrics.size() / distance;
-    if (idx == 0) {
-      return metrics;
-    }
-
-    int index = idx;
-    for (int i = 0; i < distance; i++) {
-      selectedMetricApps.add(metrics.get(index - 1));
-      index += idx;
-    }
-    return selectedMetricApps;
-  }
-
-
-  public List<byte[]> getPrecisionSplitPoints() {
-    return precisionSplitPoints;
-  }
-
-  public List<byte[]> getClusterAggregateSplitPoints() {
-    return aggregateSplitPoints;
-  }
-
-  public List<byte[]> getHostAggregateSplitPoints() {
-    return aggregateSplitPoints;
-  }
-
-  private String getAppId(String component) {
-
-    if (component.equalsIgnoreCase("METRICS_COLLECTOR")) {
-      return "ams-hbase";
-    }
-
-    if (component.equalsIgnoreCase("METRICS_MONITOR")) {
-      return "HOST";
-    }
-    return component;
-  }
-}
-
-class MetricApp implements Comparable{
-  String metricName;
-  String appId;
-
-  MetricApp(String metricName, String appId) {
-    this.metricName = metricName;
-    if (appId.startsWith("hbase")) {
-      this.appId = "hbase";
-    } else {
-      this.appId = appId;
-    }
-  }
-
-  @Override
-  public int compareTo(Object o) {
-    MetricApp that = (MetricApp)o;
-
-    int metricCompare = metricName.compareTo(that.metricName);
-    if (metricCompare != 0) {
-      return metricCompare;
-    }
-
-    return appId.compareTo(that.appId);
-  }
-}
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/TimelineMetricStoreWatcher.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/TimelineMetricStoreWatcher.java
index ba7ce44..0ab7929 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/TimelineMetricStoreWatcher.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/TimelineMetricStoreWatcher.java
@@ -45,13 +45,13 @@ public class TimelineMetricStoreWatcher implements Runnable {
   private static int failures = 0;
   private final TimelineMetricConfiguration configuration;
 
-  private HBaseTimelineMetricsService timelineMetricStore;
+  private TimelineMetricStore timelineMetricStore;
 
   //used to call timelineMetricStore blocking methods with timeout
   private ExecutorService executor = Executors.newSingleThreadExecutor();
 
 
-  public TimelineMetricStoreWatcher(HBaseTimelineMetricsService timelineMetricStore,
+  public TimelineMetricStoreWatcher(TimelineMetricStore timelineMetricStore,
                                     TimelineMetricConfiguration configuration) {
     this.timelineMetricStore = timelineMetricStore;
     this.configuration = configuration;
@@ -100,7 +100,7 @@ public class TimelineMetricStoreWatcher implements Runnable {
 
     Callable<TimelineMetric> task = new Callable<TimelineMetric>() {
       public TimelineMetric call() throws Exception {
-        timelineMetricStore.putMetricsSkipCache(metrics);
+        timelineMetricStore.putMetrics(metrics);
         TimelineMetrics timelineMetrics = timelineMetricStore.getTimelineMetrics(
           Collections.singletonList(FAKE_METRIC_NAME), Collections.singletonList(FAKE_HOSTNAME),
           FAKE_APP_ID, null, startTime - delay * 2 * 1000,
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/discovery/TimelineMetricMetadataManager.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/discovery/TimelineMetricMetadataManager.java
index 737c2ff..1ca5bc0 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/discovery/TimelineMetricMetadataManager.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/discovery/TimelineMetricMetadataManager.java
@@ -20,9 +20,7 @@ package org.apache.ambari.metrics.core.timeline.discovery;
 import java.io.IOException;
 import java.net.MalformedURLException;
 import java.net.URISyntaxException;
-import java.sql.Connection;
 import java.sql.SQLException;
-import java.sql.Statement;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -40,12 +38,9 @@ import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
-import org.apache.ambari.metrics.core.timeline.MetricsSystemInitializationException;
 import org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration;
 import org.apache.ambari.metrics.core.timeline.uuid.MetricUuidGenStrategy;
 import org.apache.ambari.metrics.core.timeline.uuid.MD5UuidGenStrategy;
-import org.apache.ambari.metrics.core.timeline.uuid.Murmur3HashUuidGenStrategy;
-import org.apache.ambari.metrics.core.timeline.uuid.TimelineMetricUuid;
 import org.apache.commons.collections.CollectionUtils;
 import org.apache.commons.collections.MapUtils;
 import org.apache.commons.lang.ArrayUtils;
@@ -58,19 +53,13 @@ import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetricMetadata;
 import org.apache.ambari.metrics.core.timeline.PhoenixHBaseAccessor;
 import org.apache.ambari.metrics.core.timeline.aggregators.TimelineClusterMetric;
+import org.apache.ambari.metrics.core.timeline.uuid.HashBasedUuidGenStrategy;
 
-import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.HBASE_COMPRESSION_SCHEME;
-import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.HBASE_ENCODING_SCHEME;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TRANSIENT_METRIC_PATTERNS;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.METRICS_METADATA_SYNC_INIT_DELAY;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.METRICS_METADATA_SYNC_SCHEDULE_DELAY;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TIMELINE_METRICS_UUID_GEN_STRATEGY;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TIMELINE_METRIC_METADATA_FILTERS;
-import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.CREATE_HOSTED_APPS_METADATA_TABLE_SQL;
-import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.CREATE_INSTANCE_HOST_TABLE_SQL;
-import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.CREATE_METRICS_METADATA_TABLE_SQL;
-import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.DEFAULT_ENCODING;
-import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.DEFAULT_TABLE_COMPRESSION;
 import static org.apache.hadoop.metrics2.sink.timeline.TimelineMetricUtils.getJavaMetricPatterns;
 import static org.apache.hadoop.metrics2.sink.timeline.TimelineMetricUtils.getJavaRegexFromSqlRegex;
 
@@ -78,18 +67,18 @@ public class TimelineMetricMetadataManager {
   private static final Log LOG = LogFactory.getLog(TimelineMetricMetadataManager.class);
   // Cache all metadata on retrieval
   private final Map<TimelineMetricMetadataKey, TimelineMetricMetadata> METADATA_CACHE = new ConcurrentHashMap<>();
-  private final Map<TimelineMetricUuid, TimelineMetricMetadataKey> uuidKeyMap = new ConcurrentHashMap<>();
+  private final Map<String, TimelineMetricMetadataKey> uuidKeyMap = new ConcurrentHashMap<>();
   // Map to lookup apps on a host
   private final Map<String, TimelineMetricHostMetadata> HOSTED_APPS_MAP = new ConcurrentHashMap<>();
-  private final Map<TimelineMetricUuid, String> uuidHostMap = new ConcurrentHashMap<>();
+  private final Map<String, String> uuidHostMap = new ConcurrentHashMap<>();
   private final Map<String, Set<String>> INSTANCE_HOST_MAP = new ConcurrentHashMap<>();
   // Sync only when needed
   AtomicBoolean SYNC_HOSTED_APPS_METADATA = new AtomicBoolean(false);
   AtomicBoolean SYNC_HOSTED_INSTANCES_METADATA = new AtomicBoolean(false);
 
-  private MetricUuidGenStrategy uuidGenStrategy = new Murmur3HashUuidGenStrategy();
+  private MetricUuidGenStrategy uuidGenStrategy = new HashBasedUuidGenStrategy();
   public static final int TIMELINE_METRIC_UUID_LENGTH = 16;
-  public static int HOSTNAME_UUID_LENGTH = 4;
+  public static final int HOSTNAME_UUID_LENGTH = 16;
 
   //Transient metric patterns. No UUID management and aggregation for such metrics.
   private List<String> transientMetricPatterns = new ArrayList<>();
@@ -131,54 +120,7 @@ public class TimelineMetricMetadataManager {
    * Initialize Metadata from the store
    */
   public void initializeMetadata() {
-
-    //Create metadata schema
-    Connection conn = null;
-    Statement stmt = null;
-
-    String encoding = metricsConf.get(HBASE_ENCODING_SCHEME, DEFAULT_ENCODING);
-    String compression = metricsConf.get(HBASE_COMPRESSION_SCHEME, DEFAULT_TABLE_COMPRESSION);
-
-    try {
-      LOG.info("Initializing metrics metadata schema...");
-      conn = hBaseAccessor.getConnectionRetryingOnException();
-      stmt = conn.createStatement();
-
-      // Metadata
-      String metadataSql = String.format(CREATE_METRICS_METADATA_TABLE_SQL,
-        encoding, compression);
-      stmt.executeUpdate(metadataSql);
-
-      String hostedAppSql = String.format(CREATE_HOSTED_APPS_METADATA_TABLE_SQL,
-        encoding, compression);
-      stmt.executeUpdate(hostedAppSql);
-
-      //Host Instances table
-      String hostedInstancesSql = String.format(CREATE_INSTANCE_HOST_TABLE_SQL,
-        encoding, compression);
-      stmt.executeUpdate(hostedInstancesSql);
-    } catch (SQLException | InterruptedException sql) {
-      LOG.error("Error creating Metrics Schema in HBase using Phoenix.", sql);
-      throw new MetricsSystemInitializationException(
-        "Error creating Metrics Metadata Schema in HBase using Phoenix.", sql);
-    } finally {
-      if (stmt != null) {
-        try {
-          stmt.close();
-        } catch (SQLException e) {
-          // Ignore
-        }
-      }
-      if (conn != null) {
-        try {
-          conn.close();
-        } catch (SQLException e) {
-          // Ignore
-        }
-      }
-    }
-
-      metricMetadataSync = new TimelineMetricMetadataSync(this);
+    metricMetadataSync = new TimelineMetricMetadataSync(this);
     // Schedule the executor to sync to store
     executorService.scheduleWithFixedDelay(metricMetadataSync,
       metricsConf.getInt(METRICS_METADATA_SYNC_INIT_DELAY, 120), // 2 minutes
@@ -393,26 +335,14 @@ public class TimelineMetricMetadataManager {
     for (TimelineMetricMetadataKey key : METADATA_CACHE.keySet()) {
       TimelineMetricMetadata timelineMetricMetadata = METADATA_CACHE.get(key);
       if (timelineMetricMetadata != null && timelineMetricMetadata.getUuid() != null) {
-        uuidKeyMap.put(new TimelineMetricUuid(timelineMetricMetadata.getUuid()), key);
-      }
-    }
-
-    if (!HOSTED_APPS_MAP.isEmpty()) {
-      Map.Entry<String, TimelineMetricHostMetadata> entry = HOSTED_APPS_MAP.entrySet().iterator().next();
-      TimelineMetricHostMetadata timelineMetricHostMetadata = entry.getValue();
-      if (timelineMetricHostMetadata.getUuid() != null  && timelineMetricHostMetadata.getUuid().length == 16) {
-        HOSTNAME_UUID_LENGTH = 16;
-        uuidGenStrategy = new MD5UuidGenStrategy();
-      } else {
-        HOSTNAME_UUID_LENGTH = 4;
-        uuidGenStrategy = new Murmur3HashUuidGenStrategy();
+        uuidKeyMap.put(new String(timelineMetricMetadata.getUuid()), key);
       }
     }
 
     for (String host : HOSTED_APPS_MAP.keySet()) {
       TimelineMetricHostMetadata timelineMetricHostMetadata = HOSTED_APPS_MAP.get(host);
       if (timelineMetricHostMetadata != null && timelineMetricHostMetadata.getUuid() != null) {
-        uuidHostMap.put(new TimelineMetricUuid(timelineMetricHostMetadata.getUuid()), host);
+        uuidHostMap.put(new String(timelineMetricHostMetadata.getUuid()), host);
       }
     }
   }
@@ -424,11 +354,11 @@ public class TimelineMetricMetadataManager {
    */
   private MetricUuidGenStrategy getUuidStrategy(Configuration configuration) {
     String strategy = configuration.get(TIMELINE_METRICS_UUID_GEN_STRATEGY, "");
-    if ("md5".equalsIgnoreCase(strategy)){
-      return new MD5UuidGenStrategy();
+    if ("hash".equalsIgnoreCase(strategy)) {
+      return new HashBasedUuidGenStrategy();
     } else {
       //Default
-      return new Murmur3HashUuidGenStrategy();
+      return new MD5UuidGenStrategy();
     }
   }
 
@@ -449,13 +379,14 @@ public class TimelineMetricMetadataManager {
     }
 
     if (!createIfNotPresent) {
-      LOG.debug("UUID not found for " + hostname + ", createIfNotPresent is false");
+      LOG.warn("UUID not found for " + hostname + ", createIfNotPresent is false");
       return null;
     }
 
     byte[] uuid = uuidGenStrategy.computeUuid(hostname, HOSTNAME_UUID_LENGTH);
-    if (uuidHostMap.containsKey(new TimelineMetricUuid(uuid))) {
-      LOG.error("Duplicate key computed for " + hostname +", Collides with  " + uuidHostMap.get(uuid));
+    String uuidStr = new String(uuid);
+    if (uuidHostMap.containsKey(uuidStr)) {
+      LOG.error("Duplicate key computed for " + hostname +", Collides with  " + uuidHostMap.get(uuidStr));
       return null;
     }
 
@@ -464,7 +395,7 @@ public class TimelineMetricMetadataManager {
       HOSTED_APPS_MAP.put(hostname, timelineMetricHostMetadata);
     }
     timelineMetricHostMetadata.setUuid(uuid);
-    uuidHostMap.put(new TimelineMetricUuid(uuid), hostname);
+    uuidHostMap.put(uuidStr, hostname);
 
     return uuid;
   }
@@ -489,16 +420,17 @@ public class TimelineMetricMetadataManager {
     }
 
     if (!createIfNotPresent) {
-      LOG.debug("UUID not found for " + key + ", createIfNotPresent is false");
+      LOG.warn("UUID not found for " + key + ", createIfNotPresent is false");
       return null;
     }
 
-    byte[] uuidBytes = uuidGenStrategy.computeUuid(timelineClusterMetric, TIMELINE_METRIC_UUID_LENGTH);
+    byte[] uuid = uuidGenStrategy.computeUuid(timelineClusterMetric, TIMELINE_METRIC_UUID_LENGTH);
 
-    TimelineMetricUuid uuid = new TimelineMetricUuid(uuidBytes);
-    if (uuidKeyMap.containsKey(uuid) && !uuidKeyMap.get(uuid).equals(key)) {
-      TimelineMetricMetadataKey collidingKey = uuidKeyMap.get(uuid);
-      LOG.error("Duplicate key " + uuid + " computed for " + timelineClusterMetric + ", Collides with  " + collidingKey);
+    String uuidStr = new String(uuid);
+    if (uuidKeyMap.containsKey(uuidStr) && !uuidKeyMap.get(uuidStr).equals(key)) {
+      TimelineMetricMetadataKey collidingKey = (TimelineMetricMetadataKey)uuidKeyMap.get(uuidStr);
+      LOG.error("Duplicate key " + Arrays.toString(uuid) + "(" + uuid +  ") computed for " + timelineClusterMetric.toString()
+        + ", Collides with  " + collidingKey.toString());
       return null;
     }
 
@@ -510,10 +442,10 @@ public class TimelineMetricMetadataManager {
       METADATA_CACHE.put(key, timelineMetricMetadata);
     }
 
-    timelineMetricMetadata.setUuid(uuid.uuid);
+    timelineMetricMetadata.setUuid(uuid);
     timelineMetricMetadata.setIsPersisted(false);
-    uuidKeyMap.put(uuid, key);
-    return uuid.uuid;
+    uuidKeyMap.put(uuidStr, key);
+    return uuid;
   }
 
   /**
@@ -552,14 +484,14 @@ public class TimelineMetricMetadataManager {
     return metricUuid;
   }
 
-  public String getMetricNameFromUuid(byte[] uuid) {
+  public String getMetricNameFromUuid(byte[]  uuid) {
 
     byte[] metricUuid = uuid;
     if (uuid.length == TIMELINE_METRIC_UUID_LENGTH + HOSTNAME_UUID_LENGTH) {
       metricUuid = ArrayUtils.subarray(uuid, 0, TIMELINE_METRIC_UUID_LENGTH);
     }
 
-    TimelineMetricMetadataKey key = uuidKeyMap.get(new TimelineMetricUuid(metricUuid));
+    TimelineMetricMetadataKey key = uuidKeyMap.get(new String(metricUuid));
     return key != null ? key.getMetricName() : null;
   }
 
@@ -574,11 +506,11 @@ public class TimelineMetricMetadataManager {
     }
 
     if (uuid.length == TIMELINE_METRIC_UUID_LENGTH) {
-      TimelineMetricMetadataKey key = uuidKeyMap.get(new TimelineMetricUuid(uuid));
+      TimelineMetricMetadataKey key = uuidKeyMap.get(new String(uuid));
       return key != null ? new TimelineMetric(key.metricName, null, key.appId, key.instanceId) : null;
     } else {
       byte[] metricUuid = ArrayUtils.subarray(uuid, 0, TIMELINE_METRIC_UUID_LENGTH);
-      TimelineMetricMetadataKey key = uuidKeyMap.get(new TimelineMetricUuid(metricUuid));
+      TimelineMetricMetadataKey key = uuidKeyMap.get(new String(metricUuid));
       if (key == null) {
         LOG.error("TimelineMetricMetadataKey is null for : " + Arrays.toString(uuid));
         return null;
@@ -589,7 +521,7 @@ public class TimelineMetricMetadataManager {
       timelineMetric.setInstanceId(key.instanceId);
 
       byte[] hostUuid = ArrayUtils.subarray(uuid, TIMELINE_METRIC_UUID_LENGTH, HOSTNAME_UUID_LENGTH + TIMELINE_METRIC_UUID_LENGTH);
-      timelineMetric.setHostName(uuidHostMap.get(new TimelineMetricUuid(hostUuid)));
+      timelineMetric.setHostName(uuidHostMap.get(new String(hostUuid)));
       return timelineMetric;
     }
   }
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/query/PhoenixTransactSQL.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/query/PhoenixTransactSQL.java
index e0cc642..f76933a 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/query/PhoenixTransactSQL.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/query/PhoenixTransactSQL.java
@@ -43,11 +43,8 @@ public class PhoenixTransactSQL {
   /**
    * Create table to store individual metric records.
    */
-
-  public static final String METRICS_RECORD_TABLE_NAME = "METRIC_RECORD_UUID";
-
   public static final String CREATE_METRICS_TABLE_SQL = "CREATE TABLE IF NOT " +
-    "EXISTS " + METRICS_RECORD_TABLE_NAME + " (UUID BINARY(20) NOT NULL, " +
+    "EXISTS METRIC_RECORD_UUID (UUID BINARY(32) NOT NULL, " +
     "SERVER_TIME BIGINT NOT NULL, " +
     "METRIC_SUM DOUBLE, " +
     "METRIC_COUNT UNSIGNED_INT, " +
@@ -86,7 +83,7 @@ public class PhoenixTransactSQL {
 
   public static final String CREATE_METRICS_AGGREGATE_TABLE_SQL =
     "CREATE TABLE IF NOT EXISTS %s " +
-      "(UUID BINARY(20) NOT NULL, " +
+      "(UUID BINARY(32) NOT NULL, " +
       "SERVER_TIME BIGINT NOT NULL, " +
       "METRIC_SUM DOUBLE," +
       "METRIC_COUNT UNSIGNED_INT, " +
@@ -158,7 +155,7 @@ public class PhoenixTransactSQL {
 
   public static final String CREATE_HOSTED_APPS_METADATA_TABLE_SQL =
     "CREATE TABLE IF NOT EXISTS HOSTED_APPS_METADATA_UUID " +
-      "(HOSTNAME VARCHAR, UUID BINARY(4), APP_IDS VARCHAR, " +
+      "(HOSTNAME VARCHAR, UUID BINARY(16), APP_IDS VARCHAR, " +
       "CONSTRAINT pk PRIMARY KEY (HOSTNAME))" +
       "DATA_BLOCK_ENCODING='%s', COMPRESSION='%s'";
 
@@ -168,6 +165,9 @@ public class PhoenixTransactSQL {
       "CONSTRAINT pk PRIMARY KEY (INSTANCE_ID, HOSTNAME))" +
       "DATA_BLOCK_ENCODING='%s', COMPRESSION='%s'";
 
+  public static final String ALTER_METRICS_METADATA_TABLE =
+    "ALTER TABLE METRICS_METADATA_UUID ADD IF NOT EXISTS IS_WHITELISTED BOOLEAN";
+
   ////////////////////////////////
 
   /**
@@ -442,6 +442,8 @@ public class PhoenixTransactSQL {
   public static final String METRICS_CLUSTER_AGGREGATE_DAILY_V1_TABLE_NAME =
     "METRIC_AGGREGATE_DAILY";
 
+  public static final String METRICS_RECORD_TABLE_NAME = "METRIC_RECORD_UUID";
+
   public static final String METRICS_AGGREGATE_MINUTE_TABLE_NAME =
     "METRIC_RECORD_MINUTE_UUID";
   public static final String METRICS_AGGREGATE_HOURLY_TABLE_NAME =
@@ -457,7 +459,7 @@ public class PhoenixTransactSQL {
   public static final String METRICS_CLUSTER_AGGREGATE_DAILY_TABLE_NAME =
     "METRIC_AGGREGATE_DAILY_UUID";
 
-  public static final Pattern PHOENIX_TABLES_REGEX_PATTERN = Pattern.compile("METRIC_.*");
+  public static final Pattern PHOENIX_TABLES_REGEX_PATTERN = Pattern.compile("METRIC_.*_UUID");
 
   public static final String[] PHOENIX_TABLES = {
     METRICS_RECORD_TABLE_NAME,
@@ -467,9 +469,7 @@ public class PhoenixTransactSQL {
     METRICS_CLUSTER_AGGREGATE_TABLE_NAME,
     METRICS_CLUSTER_AGGREGATE_MINUTE_TABLE_NAME,
     METRICS_CLUSTER_AGGREGATE_HOURLY_TABLE_NAME,
-    METRICS_CLUSTER_AGGREGATE_DAILY_TABLE_NAME,
-    METRIC_TRANSIENT_TABLE_NAME,
-    CONTAINER_METRICS_TABLE_NAME
+    METRICS_CLUSTER_AGGREGATE_DAILY_TABLE_NAME
   };
 
   public static final String DEFAULT_TABLE_COMPRESSION = "SNAPPY";
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/uuid/Murmur3HashUuidGenStrategy.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/uuid/Murmur3HashUuidGenStrategy.java
deleted file mode 100644
index 9418aa4..0000000
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/uuid/Murmur3HashUuidGenStrategy.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES   OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.metrics.core.timeline.uuid;
-
-import com.google.common.hash.Hashing;
-import org.apache.ambari.metrics.core.timeline.aggregators.TimelineClusterMetric;
-import org.apache.commons.lang.StringUtils;
-
-public class Murmur3HashUuidGenStrategy implements MetricUuidGenStrategy{
-
-  @Override
-  public byte[] computeUuid(TimelineClusterMetric timelineClusterMetric, int maxLength) {
-
-    String metricString = timelineClusterMetric.getMetricName() + timelineClusterMetric.getAppId();
-    if (StringUtils.isNotEmpty(timelineClusterMetric.getInstanceId())) {
-      metricString += timelineClusterMetric.getInstanceId();
-    }
-    byte[] metricBytes = metricString.getBytes();
-    return Hashing.murmur3_128().hashBytes(metricBytes).asBytes();
-  }
-
-  @Override
-  public byte[] computeUuid(String value, int maxLength) {
-    byte[] valueBytes = value.getBytes();
-    return Hashing.murmur3_32().hashBytes(valueBytes).asBytes();
-  }
-}
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/uuid/TimelineMetricUuid.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/uuid/TimelineMetricUuid.java
deleted file mode 100644
index 7907ff6..0000000
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/uuid/TimelineMetricUuid.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.metrics.core.timeline.uuid;
-
-import java.util.Arrays;
-
-public class TimelineMetricUuid {
-  public byte[] uuid;
-
-  public TimelineMetricUuid(byte[] uuid) {
-    this.uuid = uuid;
-  }
-
-  @Override
-  public int hashCode() {
-    return Arrays.hashCode(uuid);
-  }
-
-  @Override
-  public boolean equals(Object o) {
-
-    if (this == o) {
-      return false;
-    }
-
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-
-    TimelineMetricUuid that = (TimelineMetricUuid) o;
-
-    return Arrays.equals(this.uuid, that.uuid);
-  }
-
-  @Override
-  public String toString() {
-    return Arrays.toString(uuid);
-  }
-}
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/webapp/TimelineWebServices.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/webapp/TimelineWebServices.java
index 3bcbaf6..9c88b1a 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/webapp/TimelineWebServices.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/webapp/TimelineWebServices.java
@@ -144,8 +144,8 @@ public class TimelineWebServices {
       // TODO: Check ACLs for MetricEntity using the TimelineACLManager.
       // TODO: Save owner of the MetricEntity.
 
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("Storing metrics: " +
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Storing metrics: " +
           TimelineUtils.dumpTimelineRecordtoJSON(metrics, true));
       }
 
@@ -175,8 +175,8 @@ public class TimelineWebServices {
     }
 
     try {
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("Storing aggregated metrics: " +
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Storing aggregated metrics: " +
                 TimelineUtils.dumpTimelineRecordtoJSON(metrics, true));
       }
 
@@ -200,8 +200,8 @@ public class TimelineWebServices {
     }
 
     try {
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("Storing container metrics: " + TimelineUtils
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Storing container metrics: " + TimelineUtils
             .dumpTimelineRecordtoJSON(metrics, true));
       }
 
@@ -250,8 +250,8 @@ public class TimelineWebServices {
   ) {
     init(res);
     try {
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("Request for metrics => metricNames: " + metricNames + ", " +
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Request for metrics => metricNames: " + metricNames + ", " +
           "appId: " + appId + ", instanceId: " + instanceId + ", " +
           "hostname: " + hostname + ", startTime: " + startTime + ", " +
           "endTime: " + endTime + ", " +
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/AMSSMOKETESTFAKE.dat b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/AMSSMOKETESTFAKE.dat
deleted file mode 100644
index f5c181a..0000000
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/AMSSMOKETESTFAKE.dat
+++ /dev/null
@@ -1 +0,0 @@
-AMBARI_METRICS.SmokeTest.FakeMetric
\ No newline at end of file
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/HBASE_MASTER.dat b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/MASTER_HBASE.dat
similarity index 100%
rename from ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/HBASE_MASTER.dat
rename to ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/MASTER_HBASE.dat
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/HBASE_REGIONSERVER.dat b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/SLAVE_HBASE.dat
similarity index 100%
rename from ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/HBASE_REGIONSERVER.dat
rename to ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/SLAVE_HBASE.dat
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/TIMELINE_METRIC_STORE_WATCHER.dat b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/TIMELINE_METRIC_STORE_WATCHER.dat
deleted file mode 100644
index af73d02..0000000
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/TIMELINE_METRIC_STORE_WATCHER.dat
+++ /dev/null
@@ -1 +0,0 @@
-TimelineMetricStoreWatcher.FakeMetric
\ No newline at end of file
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/AbstractMiniHBaseClusterTest.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/AbstractMiniHBaseClusterTest.java
index 26078cb..258054c 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/AbstractMiniHBaseClusterTest.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/AbstractMiniHBaseClusterTest.java
@@ -18,7 +18,6 @@
 package org.apache.ambari.metrics.core.timeline;
 
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.OUT_OFF_BAND_DATA_TIME_ALLOWANCE;
-import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.METRICS_CLUSTER_AGGREGATE_TABLE_NAME;
 import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.METRICS_RECORD_TABLE_NAME;
 import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.UPSERT_METRICS_SQL;
 import static org.apache.phoenix.end2end.ParallelStatsDisabledIT.tearDownMiniCluster;
@@ -101,12 +100,8 @@ public abstract class AbstractMiniHBaseClusterTest extends BaseTest {
     // inits connection, starts mini cluster
     conn = getConnection(getUrl());
 
-    Configuration metricsConf = new Configuration();
-    metricsConf.set(TimelineMetricConfiguration.HBASE_COMPRESSION_SCHEME, "NONE");
-
-    metadataManager = new TimelineMetricMetadataManager(metricsConf, hdb);
-    metadataManager.initializeMetadata();
     hdb.initMetricSchema();
+    metadataManager = new TimelineMetricMetadataManager(new Configuration(), hdb);
     hdb.setMetadataInstance(metadataManager);
   }
 
@@ -211,8 +206,6 @@ public abstract class AbstractMiniHBaseClusterTest extends BaseTest {
     metricsConf.set("timeline.metrics.transient.metric.patterns", "topology%");
     // Unit tests insert values into the future
     metricsConf.setLong(OUT_OFF_BAND_DATA_TIME_ALLOWANCE, 600000);
-    metricsConf.set("timeline.metrics." + METRICS_RECORD_TABLE_NAME + ".durability", "SKIP_WAL");
-    metricsConf.set("timeline.metrics." + METRICS_CLUSTER_AGGREGATE_TABLE_NAME + ".durability", "ASYNC_WAL");
 
     return
       new PhoenixHBaseAccessor(new TimelineMetricConfiguration(new Configuration(), metricsConf),
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/ITPhoenixHBaseAccessor.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/ITPhoenixHBaseAccessor.java
index 20fbc58..65b5a1b 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/ITPhoenixHBaseAccessor.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/ITPhoenixHBaseAccessor.java
@@ -19,13 +19,11 @@ package org.apache.ambari.metrics.core.timeline;
 
 import static junit.framework.Assert.assertEquals;
 import static junit.framework.Assert.assertTrue;
-import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.DATE_TIERED_COMPACTION_POLICY;
-import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.FIFO_COMPACTION_POLICY_CLASS;
-import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.HSTORE_COMPACTION_CLASS_KEY;
-import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.HSTORE_ENGINE_CLASS;
-import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.CONTAINER_METRICS_TABLE_NAME;
+import static org.apache.ambari.metrics.core.timeline.PhoenixHBaseAccessor.DATE_TIERED_COMPACTION_POLICY;
+import static org.apache.ambari.metrics.core.timeline.PhoenixHBaseAccessor.FIFO_COMPACTION_POLICY_CLASS;
+import static org.apache.ambari.metrics.core.timeline.PhoenixHBaseAccessor.HSTORE_COMPACTION_CLASS_KEY;
+import static org.apache.ambari.metrics.core.timeline.PhoenixHBaseAccessor.HSTORE_ENGINE_CLASS;
 import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.METRICS_AGGREGATE_MINUTE_TABLE_NAME;
-import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.METRICS_CLUSTER_AGGREGATE_TABLE_NAME;
 import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.METRICS_RECORD_TABLE_NAME;
 import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.PHOENIX_TABLES;
 import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.PHOENIX_TABLES_REGEX_PATTERN;
@@ -44,7 +42,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Optional;
 
-import org.apache.commons.lang.ArrayUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
@@ -370,30 +367,28 @@ public class ITPhoenixHBaseAccessor extends AbstractMiniHBaseClusterTest {
     precisionValues.put(METRICS_RECORD_TABLE_NAME, precisionTtl);
     f.set(hdb, precisionValues);
 
+    Field f2 = PhoenixHBaseAccessor.class.getDeclaredField("timelineMetricsTablesDurability");
+    f2.setAccessible(true);
+    f2.set(hdb, "ASYNC_WAL");
+
     hdb.initPoliciesAndTTL();
 
     // Verify expected policies are set
     boolean normalizerEnabled = false;
     String precisionTableCompactionPolicy = null;
     String aggregateTableCompactionPolicy = null;
-    boolean precisionTableDurabilitySet  = false;
-    boolean aggregateTableDurabilitySet  = false;
-
-    boolean isComplete = false;
-
-    for (int i = 0; i < 10 && !isComplete; i++) {
+    boolean tableDurabilitySet  = false;
+    for (int i = 0; i < 10; i++) {
       LOG.warn("Policy check retry : " + i);
       for (String tableName : PHOENIX_TABLES) {
         TableName[] tableNames = hBaseAdmin.listTableNames(PHOENIX_TABLES_REGEX_PATTERN, false);
-        TableName[] containerMetricsTableName = hBaseAdmin.listTableNames(CONTAINER_METRICS_TABLE_NAME, false);
-        tableNames = (TableName[]) ArrayUtils.addAll(tableNames, containerMetricsTableName);
-
         Optional<TableName> tableNameOptional = Arrays.stream(tableNames)
           .filter(t -> tableName.equals(t.getNameAsString())).findFirst();
 
         TableDescriptor tableDescriptor = hBaseAdmin.getTableDescriptor(tableNameOptional.get());
         
         normalizerEnabled = tableDescriptor.isNormalizationEnabled();
+        tableDurabilitySet = (Durability.ASYNC_WAL.equals(tableDescriptor.getDurability()));
         if (tableName.equals(METRICS_RECORD_TABLE_NAME)) {
           precisionTableCompactionPolicy = tableDescriptor.getValue(HSTORE_COMPACTION_CLASS_KEY);
         } else {
@@ -403,25 +398,17 @@ public class ITPhoenixHBaseAccessor extends AbstractMiniHBaseClusterTest {
         // Best effort for 20 seconds
         if (normalizerEnabled || (precisionTableCompactionPolicy == null && aggregateTableCompactionPolicy == null)) {
           Thread.sleep(2000l);
-        } else {
-          isComplete = true;
         }
         if (tableName.equals(METRICS_RECORD_TABLE_NAME)) {
-          precisionTableDurabilitySet = (Durability.SKIP_WAL.equals(tableDescriptor.getDurability()));
           for (ColumnFamilyDescriptor family : tableDescriptor.getColumnFamilies()) {
             precisionTtl = family.getTimeToLive();
           }
         }
-
-        if (tableName.equals(METRICS_CLUSTER_AGGREGATE_TABLE_NAME)) {
-          aggregateTableDurabilitySet = (Durability.ASYNC_WAL.equals(tableDescriptor.getDurability()));
-        }
       }
     }
 
     Assert.assertFalse("Normalizer disabled.", normalizerEnabled);
-    Assert.assertTrue("METRIC_RECORD_UUID Durability Set.", precisionTableDurabilitySet);
-    Assert.assertTrue("METRIC_AGGREGATE_UUID Durability Set.", aggregateTableDurabilitySet);
+    Assert.assertTrue("Durability Set.", tableDurabilitySet);
     Assert.assertEquals("FIFO compaction policy is set for METRIC_RECORD_UUID.", FIFO_COMPACTION_POLICY_CLASS, precisionTableCompactionPolicy);
     Assert.assertEquals("FIFO compaction policy is set for aggregate tables", DATE_TIERED_COMPACTION_POLICY, aggregateTableCompactionPolicy);
     Assert.assertEquals("Precision TTL value as expected.", 86400, precisionTtl);
@@ -454,7 +441,7 @@ public class ITPhoenixHBaseAccessor extends AbstractMiniHBaseClusterTest {
     metric.setExitCode(0);
     List<ContainerMetric> list = Arrays.asList(metric);
     hdb.insertContainerMetrics(list);
-    PreparedStatement stmt = conn.prepareStatement("SELECT * FROM CONTAINER_METRICS");
+    PreparedStatement stmt = conn.prepareStatement("SELECT * FROM CONTAINER_METRICS_UUID");
     ResultSet set = stmt.executeQuery();
     // check each filed is set properly when read back.
     boolean foundRecord = false;
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/PhoenixHBaseAccessorTest.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/PhoenixHBaseAccessorTest.java
index 63ec59e..9d1b2a4 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/PhoenixHBaseAccessorTest.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/PhoenixHBaseAccessorTest.java
@@ -109,7 +109,7 @@ public class PhoenixHBaseAccessorTest {
 
     mockStatic(PhoenixTransactSQL.class);
     PreparedStatement preparedStatementMock = EasyMock.createNiceMock(PreparedStatement.class);
-    Condition condition = new DefaultCondition(Collections.singletonList(new byte[20]), metricNames, hostnames, "appid", "instanceid", 123L, 234L, Precision.SECONDS, 10, true);
+    Condition condition = new DefaultCondition(Collections.singletonList(new byte[32]), metricNames, hostnames, "appid", "instanceid", 123L, 234L, Precision.SECONDS, 10, true);
     expect(PhoenixTransactSQL.prepareGetMetricsSqlStmt(null, condition)).andReturn(preparedStatementMock).once();
     ResultSet rsMock = EasyMock.createNiceMock(ResultSet.class);
     expect(preparedStatementMock.executeQuery()).andReturn(rsMock);
@@ -138,7 +138,7 @@ public class PhoenixHBaseAccessorTest {
 
     mockStatic(PhoenixTransactSQL.class);
     PreparedStatement preparedStatementMock = EasyMock.createNiceMock(PreparedStatement.class);
-    Condition condition = new DefaultCondition(Collections.singletonList(new byte[20]), metricNames, hostnames, "appid", "instanceid", 123L, 234L, Precision.SECONDS, 10, true);
+    Condition condition = new DefaultCondition(Collections.singletonList(new byte[32]), metricNames, hostnames, "appid", "instanceid", 123L, 234L, Precision.SECONDS, 10, true);
     expect(PhoenixTransactSQL.prepareGetMetricsSqlStmt(null, condition)).andReturn(preparedStatementMock).once();
     ResultSet rsMock = EasyMock.createNiceMock(ResultSet.class);
     RuntimeException runtimeException = EasyMock.createNiceMock(RuntimeException.class);
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/TimelineMetricSplitPointComputerTest.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/TimelineMetricSplitPointComputerTest.java
deleted file mode 100644
index 4d663cc..0000000
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/TimelineMetricSplitPointComputerTest.java
+++ /dev/null
@@ -1,138 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.metrics.core.timeline;
-
-import org.apache.ambari.metrics.core.timeline.aggregators.TimelineClusterMetric;
-import org.apache.ambari.metrics.core.timeline.discovery.TimelineMetricMetadataManager;
-import org.apache.hadoop.conf.Configuration;
-import org.easymock.EasyMock;
-import org.junit.Assert;
-import org.junit.Test;
-
-import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TIMELINE_METRIC_INITIAL_CONFIGURED_MASTER_COMPONENTS;
-import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TIMELINE_METRIC_INITIAL_CONFIGURED_SLAVE_COMPONENTS;
-import static org.easymock.EasyMock.anyBoolean;
-import static org.easymock.EasyMock.anyObject;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.replay;
-
-
-public class TimelineMetricSplitPointComputerTest {
-
-  @Test
-  public void testSplitPointComputationForBasicCluster() {
-
-    /**
-     *  HBase Total heap = 1G.
-     *  HDFS,HBASE,YARN services deployed.
-     */
-    Configuration metricsConfMock = EasyMock.createMock(Configuration.class);
-
-    expect(metricsConfMock.get(TIMELINE_METRIC_INITIAL_CONFIGURED_MASTER_COMPONENTS, "")).
-      andReturn("METRICS_COLLECTOR,AMBARI_SERVER,NAMENODE,RESOURCEMANAGER").once();
-    expect(metricsConfMock.get(TIMELINE_METRIC_INITIAL_CONFIGURED_SLAVE_COMPONENTS, "")).
-      andReturn("METRICS_MONITOR,DATANODE,NODEMANAGER,HBASE_REGIONSERVER").once();
-    expect(metricsConfMock.getDouble("hbase_total_heapsize", 1024*1024*1024)).andReturn(1024 * 1024 * 1024.0).once();
-
-    Configuration hbaseConfMock = EasyMock.createMock(Configuration.class);
-    expect(hbaseConfMock.getDouble("hbase.regionserver.global.memstore.upperLimit", 0.5)).andReturn(0.5).once();
-    expect(hbaseConfMock.getDouble("hbase.hregion.memstore.flush.size", 134217728)).andReturn(134217728.0).once();
-
-    TimelineMetricMetadataManager metricMetadataManagerMock = EasyMock.createNiceMock(TimelineMetricMetadataManager.class);
-    expect(metricMetadataManagerMock.getUuid(anyObject(TimelineClusterMetric.class), anyBoolean())).andReturn(new byte[16]);
-
-    replay(metricsConfMock, hbaseConfMock, metricMetadataManagerMock);
-
-    TimelineMetricSplitPointComputer timelineMetricSplitPointComputer = new TimelineMetricSplitPointComputer(metricsConfMock,
-      hbaseConfMock,
-      metricMetadataManagerMock);
-
-    Assert.assertEquals(timelineMetricSplitPointComputer.getPrecisionSplitPoints().size(), 3);
-    Assert.assertEquals(timelineMetricSplitPointComputer.getClusterAggregateSplitPoints().size(), 1);
-    Assert.assertEquals(timelineMetricSplitPointComputer.getHostAggregateSplitPoints().size(), 1);
-  }
-
-  @Test
-  public void testSplitPointComputationForMediumCluster() {
-
-    /**
-     *  HBase Total heap = 8G.
-     *  All services deployed.
-     */
-    Configuration metricsConfMock = EasyMock.createMock(Configuration.class);
-
-    expect(metricsConfMock.get(TIMELINE_METRIC_INITIAL_CONFIGURED_MASTER_COMPONENTS, "")).
-      andReturn("METRICS_COLLECTOR,AMBARI_SERVER,NAMENODE,RESOURCEMANAGER," +
-        "NIMBUS,HIVESERVER2,HIVEMETASTORE,HBASE_MASTER,KAFKA_BROKER").once();
-    expect(metricsConfMock.get(TIMELINE_METRIC_INITIAL_CONFIGURED_SLAVE_COMPONENTS, "")).
-      andReturn("METRICS_MONITOR,DATANODE,NODEMANAGER,HBASE_REGIONSERVER").once();
-    expect(metricsConfMock.getDouble("hbase_total_heapsize", 1024*1024*1024)).andReturn(8589934592.0).once();
-
-    Configuration hbaseConfMock = EasyMock.createMock(Configuration.class);
-    expect(hbaseConfMock.getDouble("hbase.regionserver.global.memstore.upperLimit", 0.5)).andReturn(0.5).once();
-    expect(hbaseConfMock.getDouble("hbase.hregion.memstore.flush.size", 134217728)).andReturn(134217728.0).once();
-
-    TimelineMetricMetadataManager metricMetadataManagerMock = EasyMock.createNiceMock(TimelineMetricMetadataManager.class);
-    expect(metricMetadataManagerMock.getUuid(anyObject(TimelineClusterMetric.class), anyBoolean())).andReturn(new byte[16]);
-
-    replay(metricsConfMock, hbaseConfMock, metricMetadataManagerMock);
-
-    TimelineMetricSplitPointComputer timelineMetricSplitPointComputer = new TimelineMetricSplitPointComputer(metricsConfMock,
-      hbaseConfMock,
-      metricMetadataManagerMock);
-
-    Assert.assertEquals(timelineMetricSplitPointComputer.getPrecisionSplitPoints().size(), 16);
-    Assert.assertEquals(timelineMetricSplitPointComputer.getClusterAggregateSplitPoints().size(), 3);
-    Assert.assertEquals(timelineMetricSplitPointComputer.getHostAggregateSplitPoints().size(), 3);
-  }
-
-  @Test
-  public void testSplitPointComputationForLargeCluster() {
-
-    /**
-     *  HBase Total heap = 24G.
-     *  All services deployed.
-     */
-    Configuration metricsConfMock = EasyMock.createMock(Configuration.class);
-
-    expect(metricsConfMock.get(TIMELINE_METRIC_INITIAL_CONFIGURED_MASTER_COMPONENTS, "")).
-      andReturn("METRICS_COLLECTOR,AMBARI_SERVER,NAMENODE,RESOURCEMANAGER," +
-        "NIMBUS,HIVESERVER2,HIVEMETASTORE,HBASE_MASTER,KAFKA_BROKER").once();
-    expect(metricsConfMock.get(TIMELINE_METRIC_INITIAL_CONFIGURED_SLAVE_COMPONENTS, "")).
-      andReturn("METRICS_MONITOR,DATANODE,NODEMANAGER,HBASE_REGIONSERVER").once();
-    expect(metricsConfMock.getDouble("hbase_total_heapsize", 1024*1024*1024)).andReturn(24 * 1024 * 1024 * 1024.0).once();
-
-    Configuration hbaseConfMock = EasyMock.createMock(Configuration.class);
-    expect(hbaseConfMock.getDouble("hbase.regionserver.global.memstore.upperLimit", 0.5)).andReturn(0.5).once();
-    expect(hbaseConfMock.getDouble("hbase.hregion.memstore.flush.size", 134217728)).andReturn(2 * 134217728.0).once();
-
-    TimelineMetricMetadataManager metricMetadataManagerMock = EasyMock.createNiceMock(TimelineMetricMetadataManager.class);
-    expect(metricMetadataManagerMock.getUuid(anyObject(TimelineClusterMetric.class), anyBoolean())).andReturn(new byte[16]);
-
-    replay(metricsConfMock, hbaseConfMock, metricMetadataManagerMock);
-
-    TimelineMetricSplitPointComputer timelineMetricSplitPointComputer = new TimelineMetricSplitPointComputer(metricsConfMock,
-      hbaseConfMock,
-      metricMetadataManagerMock);
-
-    Assert.assertEquals(timelineMetricSplitPointComputer.getPrecisionSplitPoints().size(), 28);
-    Assert.assertEquals(timelineMetricSplitPointComputer.getClusterAggregateSplitPoints().size(), 6);
-    Assert.assertEquals(timelineMetricSplitPointComputer.getHostAggregateSplitPoints().size(), 6);
-  }
-}
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/TimelineMetricStoreWatcherTest.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/TimelineMetricStoreWatcherTest.java
index eb64198..de0236c 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/TimelineMetricStoreWatcherTest.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/TimelineMetricStoreWatcherTest.java
@@ -48,10 +48,10 @@ public class TimelineMetricStoreWatcherTest {
 
   @Test
   public void testRunPositive() throws Exception {
-    HBaseTimelineMetricsService metricStore = createNiceMock(HBaseTimelineMetricsService.class);
+    TimelineMetricStore metricStore = createNiceMock(TimelineMetricStore.class);
 
-    metricStore.putMetricsSkipCache(anyObject(TimelineMetrics.class));
-    expectLastCall().once();
+    expect(metricStore.putMetrics(anyObject(TimelineMetrics.class)))
+      .andReturn(new TimelinePutResponse());
 
     // metric found
     expect(metricStore.getTimelineMetrics(EasyMock.<List<String>>anyObject(),
@@ -75,7 +75,7 @@ public class TimelineMetricStoreWatcherTest {
 
   @Test
   public void testRunNegative() throws Exception {
-    HBaseTimelineMetricsService metricStore = createNiceMock(HBaseTimelineMetricsService.class);
+    TimelineMetricStore metricStore = createNiceMock(TimelineMetricStore.class);
 
     expect(metricStore.putMetrics(anyObject(TimelineMetrics.class)))
       .andReturn(new TimelinePutResponse());
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/discovery/TestMetadataManager.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/discovery/TestMetadataManager.java
index 28bb75e..2f2b0b5 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/discovery/TestMetadataManager.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/discovery/TestMetadataManager.java
@@ -160,7 +160,7 @@ public class TestMetadataManager extends AbstractMiniHBaseClusterTest {
 
     byte[] uuid = metadataManager.getUuid(timelineMetric, true);
     Assert.assertNotNull(uuid);
-    Assert.assertEquals(uuid.length, 20);
+    Assert.assertEquals(uuid.length, 32);
 
     byte[] uuidWithoutHost = metadataManager.getUuid(new TimelineClusterMetric(timelineMetric.getMetricName(), timelineMetric.getAppId(), timelineMetric.getInstanceId(), -1), true);
     Assert.assertNotNull(uuidWithoutHost);
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/uuid/MetricUuidGenStrategyTest.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/uuid/TimelineMetricUuidManagerTest.java
similarity index 59%
rename from ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/uuid/MetricUuidGenStrategyTest.java
rename to ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/uuid/TimelineMetricUuidManagerTest.java
index a25310b..e4018bb 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/uuid/MetricUuidGenStrategyTest.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/uuid/TimelineMetricUuidManagerTest.java
@@ -20,7 +20,6 @@ package org.apache.ambari.metrics.core.timeline.uuid;
 
 import org.apache.ambari.metrics.core.timeline.aggregators.TimelineClusterMetric;
 import org.junit.Assert;
-import org.junit.BeforeClass;
 import org.junit.Ignore;
 import org.junit.Test;
 
@@ -38,30 +37,39 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
-public class MetricUuidGenStrategyTest {
+public class TimelineMetricUuidManagerTest {
 
 
-  private static List<String> apps = Arrays.asList("namenode",
-    "datanode", "hbase_master", "hbase_regionserver", "kafka_broker", "nimbus", "ams-hbase",
+  private List<String> apps = Arrays.asList("namenode",
+    "datanode", "master_hbase", "slave_hbase", "kafka_broker", "nimbus", "ams-hbase",
     "accumulo", "nodemanager", "resourcemanager", "ambari_server", "HOST", "timeline_metric_store_watcher",
-    "jobhistoryserver", "hiveserver2", "hivemetastore", "applicationhistoryserver", "amssmoketestfake", "llapdaemon");
+    "jobhistoryserver", "hiveserver2", "hivemetastore", "applicationhistoryserver", "amssmoketestfake");
 
-  private static Map<String, Set<String>> metricSet  = new HashMap<>();
-
-  @BeforeClass
-  public static void init() {
-    metricSet  = new HashMap<>(populateMetricWhitelistFromFile());
-  }
+  private Map<String, Set<String>> metricSet  = new HashMap<>(populateMetricWhitelistFromFile());
 
   @Test
-  @Ignore
-  public void testHashBasedUuid() throws SQLException {
-    testMetricCollisionsForUuidGenStrategy(new HashBasedUuidGenStrategy(), 16);
+  @Ignore("Collisions possible")
+  public void testHashBasedUuidForMetricName() throws SQLException {
+
+    MetricUuidGenStrategy strategy = new HashBasedUuidGenStrategy();
+    Map<String, TimelineClusterMetric> uuids = new HashMap<>();
+    for (String app : metricSet.keySet()) {
+      Set<String> metrics = metricSet.get(app);
+      for (String metric : metrics) {
+        TimelineClusterMetric timelineClusterMetric = new TimelineClusterMetric(metric, app, null, -1l);
+        byte[] uuid = strategy.computeUuid(timelineClusterMetric, 16);
+        Assert.assertNotNull(uuid);
+        Assert.assertTrue(uuid.length == 16);
+        String uuidStr = new String(uuid);
+        Assert.assertFalse(uuids.containsKey(uuidStr) && !uuids.containsValue(timelineClusterMetric));
+        uuids.put(uuidStr, timelineClusterMetric);
+      }
+    }
   }
 
   @Test
-  @Ignore
-  public void testHashBasedUuidForAppIds() throws SQLException {
+  public void testHaseBasedUuidForAppIds() throws SQLException {
+
     MetricUuidGenStrategy strategy = new HashBasedUuidGenStrategy();
     Map<String, TimelineClusterMetric> uuids = new HashMap<>();
     for (String app : metricSet.keySet()) {
@@ -74,64 +82,53 @@ public class MetricUuidGenStrategyTest {
   }
 
   @Test
-  @Ignore
   public void testHashBasedUuidForHostnames() throws SQLException {
-    testHostCollisionsForUuidGenStrategy(new HashBasedUuidGenStrategy(), 16);
-  }
-
-
-  @Test
-  public void testMD5BasedUuid() throws SQLException {
-    testMetricCollisionsForUuidGenStrategy(new MD5UuidGenStrategy(), 16);
-
-  }
-
-  @Test
-  public void testMD5BasedUuidForHostnames() throws SQLException {
-    testHostCollisionsForUuidGenStrategy(new MD5UuidGenStrategy(), 16);
-  }
 
+    MetricUuidGenStrategy strategy = new HashBasedUuidGenStrategy();
+    Map<String, String> uuids = new HashMap<>();
 
-  @Test
-  public void testMD5ConsistentHashing() throws SQLException, InterruptedException {
-    testConsistencyForUuidGenStrategy(new MD5UuidGenStrategy(), 16);
-  }
+    List<String> hosts = new ArrayList<>();
+    String hostPrefix = "TestHost.";
+    String hostSuffix = ".ambari.apache.org";
 
+    for (int i=0; i<=2000; i++) {
+      hosts.add(hostPrefix + i + hostSuffix);
+    }
 
-  @Test
-  public void testMurmur3HashUuid() throws SQLException {
-    testMetricCollisionsForUuidGenStrategy(new Murmur3HashUuidGenStrategy(), 16);
+    for (String host : hosts) {
+      byte[] uuid = strategy.computeUuid(host, 16);
+      Assert.assertNotNull(uuid);
+      Assert.assertTrue(uuid.length == 16);
+      String uuidStr = new String(uuid);
+      Assert.assertFalse(uuids.containsKey(uuidStr));
+      uuids.put(uuidStr, host);
+    }
   }
 
-  @Test
-  public void testMurmur3HashingBasedUuidForHostnames() throws SQLException {
-    testHostCollisionsForUuidGenStrategy(new Murmur3HashUuidGenStrategy(), 4);
-  }
 
   @Test
-  public void testMurmur3ConsistentHashing() throws SQLException, InterruptedException {
-    testConsistencyForUuidGenStrategy(new Murmur3HashUuidGenStrategy(), 4);
-  }
+  public void testRandomUuidForWhitelistedMetrics() throws SQLException {
 
-  private void testMetricCollisionsForUuidGenStrategy(MetricUuidGenStrategy strategy, int uuidLength) {
-    Map<TimelineMetricUuid, TimelineClusterMetric> uuids = new HashMap<>();
+    MetricUuidGenStrategy strategy = new MD5UuidGenStrategy();
+    Map<String, String> uuids = new HashMap<>();
     for (String app : metricSet.keySet()) {
       Set<String> metrics = metricSet.get(app);
-      for (String m : metrics) {
-        TimelineClusterMetric metric = new TimelineClusterMetric(m, app, null, -1l);
-        byte[] uuid = strategy.computeUuid(metric, uuidLength);
+      for (String metric : metrics) {
+        byte[] uuid = strategy.computeUuid(new TimelineClusterMetric(metric, app, null, -1l), 16);
         Assert.assertNotNull(uuid);
-        Assert.assertTrue(uuid.length == uuidLength);
-        TimelineMetricUuid uuidStr = new TimelineMetricUuid(uuid);
+        Assert.assertTrue(uuid.length == 16);
+        String uuidStr = new String(uuid);
         Assert.assertFalse(uuids.containsKey(uuidStr) && !uuids.containsValue(metric));
         uuids.put(uuidStr, metric);
       }
     }
   }
 
+  @Test
+  public void testRandomUuidForHostnames() throws SQLException {
 
-  private void testHostCollisionsForUuidGenStrategy(MetricUuidGenStrategy strategy, int uuidLength) {
-    Map<TimelineMetricUuid, String> uuids = new HashMap<>();
+    MetricUuidGenStrategy strategy = new MD5UuidGenStrategy();
+    Map<String, String> uuids = new HashMap<>();
 
     List<String> hosts = new ArrayList<>();
     String hostPrefix = "TestHost.";
@@ -141,33 +138,40 @@ public class MetricUuidGenStrategyTest {
       hosts.add(hostPrefix + i + hostSuffix);
     }
 
+    int numC = 0;
     for (String host : hosts) {
-      byte[] uuid = strategy.computeUuid(host, uuidLength);
+      byte[] uuid = strategy.computeUuid(host, 16);
       Assert.assertNotNull(uuid);
-      Assert.assertTrue(uuid.length == uuidLength);
-      TimelineMetricUuid uuidStr = new TimelineMetricUuid(uuid);
+      Assert.assertTrue(uuid.length == 16);
+      String uuidStr = new String(uuid);
       Assert.assertFalse(uuids.containsKey(uuidStr));
       uuids.put(uuidStr, host);
     }
   }
 
-  private void testConsistencyForUuidGenStrategy(MetricUuidGenStrategy strategy, int length) throws InterruptedException {
+
+  @Test
+  public void testConsistentHashing() throws SQLException, InterruptedException {
+
+    MetricUuidGenStrategy strategy = new MD5UuidGenStrategy();
     String key = "TestString";
 
-    byte[] uuid = strategy.computeUuid(key, length);
+    byte[] uuid = strategy.computeUuid(key, 16);
     Assert.assertNotNull(uuid);
-    Assert.assertTrue(uuid.length == length);
+    Assert.assertTrue(uuid.length == 16);
 
     for (int i = 0; i<100; i++) {
-      byte[] uuid2 = strategy.computeUuid(key, length);
+      byte[] uuid2 = strategy.computeUuid(key, 16);
       Assert.assertNotNull(uuid2);
-      Assert.assertTrue(uuid2.length == length);
+      Assert.assertTrue(uuid2.length == 16);
       Assert.assertArrayEquals(uuid, uuid2);
       Thread.sleep(10);
     }
   }
 
-  private static Map<String, Set<String>> populateMetricWhitelistFromFile() {
+
+  public Map<String, Set<String>> populateMetricWhitelistFromFile() {
+
 
     Map<String, Set<String>> metricSet = new HashMap<String, Set<String>>();
     FileInputStream fstream = null;
@@ -203,7 +207,7 @@ public class MetricUuidGenStrategyTest {
         }
       }
       metricsForApp.add("live_hosts");
-      if (appId.startsWith("hbase")) {
+      if (appId.equals("master_hbase") || appId.equals("slave_hbase")) {
         hbaseMetrics.addAll(metricsForApp);
       } else {
         metricSet.put(appId, metricsForApp);
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog270.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog270.java
index a111764..b7a55ae 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog270.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog270.java
@@ -1601,25 +1601,10 @@ public class UpgradeCatalog270 extends AbstractUpgradeCatalog {
     if (clusters != null) {
       Map<String, Cluster> clusterMap = clusters.getClusters();
 
+      ConfigHelper configHelper = injector.getInstance(ConfigHelper.class);
       if (clusterMap != null && !clusterMap.isEmpty()) {
         for (final Cluster cluster : clusterMap.values()) {
-          Map<String, String> newProperties = new HashMap<>();
-          newProperties.put("timeline.metrics.service.default.result.limit", "5760");
-
-          Config config = cluster.getDesiredConfigByType("ams-site");
-          if (config != null) {
-            Map<String, String> oldAmsSite = config.getProperties();
-            if (MapUtils.isNotEmpty(oldAmsSite)) {
-              int oldTtl = Integer.parseInt(oldAmsSite.get("timeline.container-metrics.ttl"));
-              if (oldTtl > 14 * 86400) {
-                newProperties.put("timeline.container-metrics.ttl", "1209600");
-              }
-            }
-          }
-          Set<String> removeProperties = Sets.newHashSet("timeline.metrics.host.aggregate.splitpoints",
-            "timeline.metrics.cluster.aggregate.splitpoints");
-          updateConfigurationPropertiesForCluster(cluster, "ams-site", newProperties, removeProperties, true, true);
-
+          updateConfigurationPropertiesForCluster(cluster, "ams-site", Collections.singletonMap("timeline.metrics.service.default.result.limit", "5760"), true, true);
         }
       }
     }
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
index e7eb3cb..0ffbf8a 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
@@ -541,6 +541,60 @@
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
+    <name>timeline.metrics.host.aggregate.splitpoints</name>
+    <value> </value>
+    <description>
+      Pre-split regions using the split points corresponding to this property
+      for the precision table that stores seconds aggregate data.
+    </description>
+    <depends-on>
+      <property>
+        <type>ams-hbase-site</type>
+        <name>hbase.regionserver.global.memstore.upperLimit</name>
+      </property>
+      <property>
+        <type>ams-hbase-site</type>
+        <name>hbase.hregion.memstore.flush.size</name>
+      </property>
+      <property>
+        <type>ams-hbase-env</type>
+        <name>hbase_master_heapsize</name>
+      </property>
+      <property>
+        <type>ams-hbase-env</type>
+        <name>hbase_regionserver_heapsize</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>timeline.metrics.cluster.aggregate.splitpoints</name>
+    <value> </value>
+    <description>
+      Pre-split regions using the split points corresponding to this property
+      for the aggregate table that stores seconds aggregate data across hosts.
+    </description>
+    <depends-on>
+      <property>
+        <type>ams-hbase-site</type>
+        <name>hbase.regionserver.global.memstore.upperLimit</name>
+      </property>
+      <property>
+        <type>ams-hbase-site</type>
+        <name>hbase.hregion.memstore.flush.size</name>
+      </property>
+      <property>
+        <type>ams-hbase-env</type>
+        <name>hbase_master_heapsize</name>
+      </property>
+      <property>
+        <type>ams-hbase-env</type>
+        <name>hbase_regionserver_heapsize</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
     <name>timeline.metrics.sink.report.interval</name>
     <value>60</value>
     <description>
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/service-metrics/KAFKA.txt b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/service-metrics/KAFKA.txt
old mode 100644
new mode 100755
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
index d0ee66e..73341f1 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
@@ -226,35 +226,9 @@ def ams(name=None, action=None):
               recursive_ownership = True
     )
 
-    new_ams_site = {}
-    new_ams_site.update(params.config['configurations']['ams-site'])
-    if params.clusterHostInfoDict:
-      master_components = []
-      slave_components = []
-      components = dict(params.clusterHostInfoDict).keys()
-      known_slave_components = ["nodemanager", "metrics_monitor", "datanode", "hbase_regionserver"]
-      for component in components:
-        if component and component.endswith("_hosts"):
-          component_name = component[:-6]
-        elif component and component.endswith("_host"):
-          component_name = component[:-5]
-        else:
-          continue
-        if component_name in known_slave_components:
-          slave_components.append(component_name)
-        else:
-          master_components.append(component_name)
-
-      if slave_components:
-        new_ams_site['timeline.metrics.initial.configured.slave.components'] = ",".join(slave_components)
-      if master_components:
-        if 'ambari_server' not in master_components:
-          master_components.append('ambari_server')
-        new_ams_site['timeline.metrics.initial.configured.master.components'] = ",".join(master_components)
-
     XmlConfig("ams-site.xml",
               conf_dir=params.ams_collector_conf_dir,
-              configurations=new_ams_site,
+              configurations=params.config['configurations']['ams-site'],
               configuration_attributes=params.config['configurationAttributes']['ams-site'],
               owner=params.ams_user,
               group=params.user_group
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
index 9424752..de0fcf1 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
@@ -393,7 +393,6 @@ hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
 
 
-clusterHostInfoDict = config["clusterHostInfo"]
 
 hdfs_site = config['configurations']['hdfs-site']
 default_fs = config['configurations']['core-site']['fs.defaultFS']
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/service_advisor.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/service_advisor.py
index 7deeae6..c78d48a 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/service_advisor.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/service_advisor.py
@@ -33,6 +33,15 @@ SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
 STACKS_DIR = os.path.join(SCRIPT_DIR, '../../../stacks/')
 PARENT_FILE = os.path.join(STACKS_DIR, 'service_advisor.py')
 
+#split points
+metricsDir = os.path.join(SCRIPT_DIR, 'package')
+print "METRICS_DIR=>" + str(metricsDir)
+serviceMetricsDir = os.path.join(metricsDir, 'files', 'service-metrics')
+customServiceMetricsDir = os.path.join(SCRIPT_DIR, '../../../dashboards/service-metrics')
+sys.path.append(os.path.join(metricsDir, 'scripts'))
+
+from split_points import FindSplitPointsForAMSRegions
+
 try:
   with open(PARENT_FILE, 'rb') as fp:
     service_advisor = imp.load_module('service_advisor', fp, PARENT_FILE, ('.py', 'rb', imp.PY_SOURCE))
@@ -430,6 +439,19 @@ class AMBARI_METRICSRecommender(service_advisor.ServiceAdvisor):
     if not ams_hbase_env:
       ams_hbase_env = configurations["ams-hbase-env"]["properties"]
 
+    split_point_finder = FindSplitPointsForAMSRegions(
+      ams_hbase_site, ams_hbase_env, serviceMetricsDir, customServiceMetricsDir, operatingMode, servicesList)
+
+    result = split_point_finder.get_split_points()
+    precision_splits = ' '
+    aggregate_splits = ' '
+    if result.precision:
+      precision_splits = result.precision
+    if result.aggregate:
+      aggregate_splits = result.aggregate
+    putAmsSiteProperty("timeline.metrics.host.aggregate.splitpoints", ','.join(precision_splits))
+    putAmsSiteProperty("timeline.metrics.cluster.aggregate.splitpoints", ','.join(aggregate_splits))
+
     component_grafana_exists = False
     for service in services['services']:
       if 'components' in service:
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog270Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog270Test.java
index ad6d435..2268694 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog270Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog270Test.java
@@ -1340,15 +1340,11 @@ public class UpgradeCatalog270Test {
     Map<String, String> oldProperties = new HashMap<String, String>() {
       {
         put("timeline.metrics.service.default.result.limit", "15840");
-        put("timeline.container-metrics.ttl", "2592000");
-        put("timeline.metrics.cluster.aggregate.splitpoints", "cpu_user,mem_free");
-        put("timeline.metrics.host.aggregate.splitpoints", "kafka.metric,nimbus.metric");
       }
     };
     Map<String, String> newProperties = new HashMap<String, String>() {
       {
         put("timeline.metrics.service.default.result.limit", "5760");
-        put("timeline.container-metrics.ttl", "1209600");
       }
     };
 
diff --git a/ambari-server/src/test/python/common-services/AMBARI_METRICS/test_service_advisor.py b/ambari-server/src/test/python/common-services/AMBARI_METRICS/test_service_advisor.py
index ef2ac04..a97866b 100644
--- a/ambari-server/src/test/python/common-services/AMBARI_METRICS/test_service_advisor.py
+++ b/ambari-server/src/test/python/common-services/AMBARI_METRICS/test_service_advisor.py
@@ -154,6 +154,8 @@ class TestAMBARI_METRICS010ServiceAdvisor(TestCase):
                                                                                          'hbase.unsafe.stream.capability.enforce': 'true'}},
                   'ams-site': {'properties': {'timeline.metrics.cache.commit.interval': '10',
                                                                              'timeline.metrics.cache.size': '100',
+                                                                             'timeline.metrics.cluster.aggregate.splitpoints': 'master.Balancer.BalancerCluster_95th_percentile',
+                                                                             'timeline.metrics.host.aggregate.splitpoints': 'master.Balancer.BalancerCluster_95th_percentile',
                                                                              'timeline.metrics.service.handler.thread.count': '20',
                                                                              'timeline.metrics.service.operation.mode': 'distributed',
                                                                              'timeline.metrics.service.watcher.disabled': 'true',
@@ -219,6 +221,8 @@ class TestAMBARI_METRICS010ServiceAdvisor(TestCase):
                                                                                          'hbase.unsafe.stream.capability.enforce': 'true'}},
                 'ams-site': {'properties': {'timeline.metrics.cache.commit.interval': '10',
                                                                              'timeline.metrics.cache.size': '100',
+                                                                             'timeline.metrics.cluster.aggregate.splitpoints': 'master.Balancer.BalancerCluster_95th_percentile',
+                                                                             'timeline.metrics.host.aggregate.splitpoints': 'master.Balancer.BalancerCluster_95th_percentile',
                                                                              'timeline.metrics.service.handler.thread.count': '20',
                                                                              'timeline.metrics.service.operation.mode': 'distributed',
                                                                              'timeline.metrics.service.watcher.disabled': 'true',
diff --git a/ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_collector.py b/ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_collector.py
index fc4d79d..58418f9 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_collector.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_collector.py
@@ -147,7 +147,7 @@ class TestMetricsCollector(RMFTestCase):
                               group = 'hadoop',
                               conf_dir = '/etc/ambari-metrics-collector/conf',
                               configurations = self.getConfig()['configurations']['ams-site'],
-                              configuration_attributes = self.getConfig()['configurationAttributes']['ams-site']
+                              configuration_attributes = self.getConfig()['configurationAttributes']['ams-hbase-site']
     )
 
     self.assertResourceCalled('XmlConfig', 'ssl-server.xml',
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
index 69bd8d8..220cc72 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
@@ -1076,9 +1076,7 @@
             "timeline.metrics.host.aggregator.daily.checkpointCutOffMultiplier": "1",
             "timeline.metrics.daily.aggregator.minute.interval": "86400",
             "timeline.metrics.cluster.aggregator.minute.interval": "120",
-            "timeline.metrics.host.aggregator.hourly.interval": "3600",
-            "timeline.metrics.initial.configured.master.components": "drpc_server,hive_server,resourcemanager,all,webhcat_server,snamenode,storm_ui_server,falcon_server,namenode,ganglia_server,logviewer_server,hive_metastore,nimbus,zookeeper_server,historyserver,hbase_master,oozie_server,metrics_collector,supervisor,ganglia_monitor,hive_mysql,ambari_server",
-            "timeline.metrics.initial.configured.slave.components": "hbase_regionserver,datanode,nodemanager"
+            "timeline.metrics.host.aggregator.hourly.interval": "3600"
         },
         "ams-grafana-env": {
             "metrics_grafana_log_dir": "/var/log/ambari-metrics-grafana",
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json
index 92cd24a..688c1c7 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json
@@ -994,8 +994,7 @@
             "timeline.metrics.host.aggregator.daily.checkpointCutOffMultiplier": "1",
             "timeline.metrics.daily.aggregator.minute.interval": "86400",
             "timeline.metrics.cluster.aggregator.minute.interval": "120",
-            "timeline.metrics.host.aggregator.hourly.interval": "3600",
-            "timeline.metrics.initial.configured.master.components": "snamenode,nm,drpc_server,hive_server,resourcemanager,all,slave,webhcat_server,ganglia_server,storm_ui_server,falcon_server,hs,hive_metastore,logviewer_server,nimbus,zookeeper_server,hbase_rs,namenode,hbase_master,metrics_collector,ambari_server,supervisor,ganglia_monitor,hive_mysql"
+            "timeline.metrics.host.aggregator.hourly.interval": "3600"
         }
     },
     "configurationAttributes": {

-- 
To stop receiving notification emails like this one, please contact
avijayan@apache.org.