You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by av...@apache.org on 2018/05/10 21:12:36 UTC

[ambari] branch trunk updated (8419a1a -> d4d2767)

This is an automated email from the ASF dual-hosted git repository.

avijayan pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git.


    from 8419a1a  AMBARI-21816. test_kms_server timing issue (#1235)
     new 8bb3bca  AMBARI-23804 : Refine AMS HBase region splitting calculation based on UUID work.
     new 5bae5c6  AMBARI-23804 : Refine AMS HBase region splitting calculation based on UUID work (Pass hbase total heapsize to collector).
     new d4d2767  AMBARI-23804 : Refine AMS HBase region splitting calculation based on UUID work (Refactor Split point computation. Allow only Murmur3Hash).

The 3 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../core/timeline/HBaseTimelineMetricsService.java |  15 +-
 .../core/timeline/PhoenixHBaseAccessor.java        | 227 ++++++++-----------
 .../core/timeline/TimelineMetricConfiguration.java |  47 +---
 .../timeline/TimelineMetricSplitPointComputer.java | 240 +++++++++++++++++++++
 .../core/timeline/TimelineMetricStoreWatcher.java  |   6 +-
 .../discovery/TimelineMetricMetadataManager.java   | 120 ++++++++---
 .../core/timeline/query/PhoenixTransactSQL.java    |  20 +-
 ...rategy.java => Murmur3HashUuidGenStrategy.java} |  38 ++--
 .../core/timeline/uuid/TimelineMetricUuid.java     |  45 ++--
 .../ambari/metrics/webapp/TimelineWebServices.java |  16 +-
 .../{AMSSMOKETESTFAKE.DAT => AMSSMOKETESTFAKE.dat} |   0
 .../{MASTER_HBASE.dat => HBASE_MASTER.dat}         |   0
 .../{SLAVE_HBASE.dat => HBASE_REGIONSERVER.dat}    |   0
 ...TCHER.DAT => TIMELINE_METRIC_STORE_WATCHER.dat} |   0
 .../timeline/AbstractMiniHBaseClusterTest.java     |   9 +-
 .../core/timeline/ITPhoenixHBaseAccessor.java      |  39 ++--
 .../core/timeline/PhoenixHBaseAccessorTest.java    |   4 +-
 .../TimelineMetricSplitPointComputerTest.java      | 141 ++++++++++++
 .../timeline/TimelineMetricStoreWatcherTest.java   |   8 +-
 .../timeline/discovery/TestMetadataManager.java    |   2 +-
 ...gerTest.java => MetricUuidGenStrategyTest.java} | 132 ++++++------
 .../ambari/server/upgrade/UpgradeCatalog270.java   |  19 +-
 .../0.1.0/configuration/ams-site.xml               |  54 -----
 .../0.1.0/package/files/service-metrics/KAFKA.txt  |   0
 .../AMBARI_METRICS/0.1.0/package/scripts/ams.py    |  32 ++-
 .../AMBARI_METRICS/0.1.0/package/scripts/params.py |   1 +
 .../AMBARI_METRICS/0.1.0/service_advisor.py        |  22 --
 .../server/upgrade/UpgradeCatalog270Test.java      |   4 +
 .../AMBARI_METRICS/test_service_advisor.py         |   4 -
 .../2.0.6/AMBARI_METRICS/test_metrics_collector.py |   2 +-
 .../test/python/stacks/2.0.6/configs/default.json  |   5 +-
 .../stacks/2.0.6/configs/default_ams_embedded.json |   4 +-
 32 files changed, 805 insertions(+), 451 deletions(-)
 create mode 100644 ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/TimelineMetricSplitPointComputer.java
 copy ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/uuid/{MD5UuidGenStrategy.java => Murmur3HashUuidGenStrategy.java} (68%)
 copy ambari-server/src/main/java/org/apache/ambari/server/state/AgentVersion.java => ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/uuid/TimelineMetricUuid.java (60%)
 copy ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/{AMSSMOKETESTFAKE.DAT => AMSSMOKETESTFAKE.dat} (100%)
 rename ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/{MASTER_HBASE.dat => HBASE_MASTER.dat} (100%)
 rename ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/{SLAVE_HBASE.dat => HBASE_REGIONSERVER.dat} (100%)
 copy ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/{TIMELINE_METRIC_STORE_WATCHER.DAT => TIMELINE_METRIC_STORE_WATCHER.dat} (100%)
 create mode 100644 ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/TimelineMetricSplitPointComputerTest.java
 rename ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/uuid/{TimelineMetricUuidManagerTest.java => MetricUuidGenStrategyTest.java} (59%)
 mode change 100755 => 100644 ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/service-metrics/KAFKA.txt

-- 
To stop receiving notification emails like this one, please contact
avijayan@apache.org.

[ambari] 01/03: AMBARI-23804 : Refine AMS HBase region splitting calculation based on UUID work.

Posted by av...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

avijayan pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git

commit 8bb3bcab2bbb2a582ed0db25c86881a9e07803b7
Author: Aravindan Vijayan <av...@hortonworks.com>
AuthorDate: Wed May 9 18:56:07 2018 -0700

    AMBARI-23804 : Refine AMS HBase region splitting calculation based on UUID work.
---
 .../core/timeline/HBaseTimelineMetricsService.java |  15 +-
 .../core/timeline/PhoenixHBaseAccessor.java        | 226 ++++++++-----------
 .../core/timeline/TimelineMetricConfiguration.java |  47 +---
 .../timeline/TimelineMetricSplitPointComputer.java | 239 +++++++++++++++++++++
 .../core/timeline/TimelineMetricStoreWatcher.java  |   6 +-
 .../discovery/TimelineMetricMetadataManager.java   | 130 ++++++++---
 .../core/timeline/query/PhoenixTransactSQL.java    |  20 +-
 .../timeline/uuid/Murmur3HashUuidGenStrategy.java  |  43 ++++
 .../core/timeline/uuid/TimelineMetricUuid.java     |  55 +++++
 .../ambari/metrics/webapp/TimelineWebServices.java |  16 +-
 .../resources/metrics_def/AMSSMOKETESTFAKE.dat     |   1 +
 .../{MASTER_HBASE.dat => HBASE_MASTER.dat}         |   0
 .../{SLAVE_HBASE.dat => HBASE_REGIONSERVER.dat}    |   0
 .../metrics_def/TIMELINE_METRIC_STORE_WATCHER.dat  |   1 +
 .../timeline/AbstractMiniHBaseClusterTest.java     |   9 +-
 .../core/timeline/ITPhoenixHBaseAccessor.java      |  39 ++--
 .../core/timeline/PhoenixHBaseAccessorTest.java    |   4 +-
 .../TimelineMetricSplitPointComputerTest.java      | 138 ++++++++++++
 .../timeline/TimelineMetricStoreWatcherTest.java   |   8 +-
 .../timeline/discovery/TestMetadataManager.java    |   2 +-
 ...gerTest.java => MetricUuidGenStrategyTest.java} | 132 ++++++------
 .../ambari/server/upgrade/UpgradeCatalog270.java   |  19 +-
 .../0.1.0/configuration/ams-site.xml               |  54 -----
 .../0.1.0/package/files/service-metrics/KAFKA.txt  |   0
 .../AMBARI_METRICS/0.1.0/package/scripts/ams.py    |  28 ++-
 .../AMBARI_METRICS/0.1.0/package/scripts/params.py |   1 +
 .../AMBARI_METRICS/0.1.0/service_advisor.py        |  22 --
 .../server/upgrade/UpgradeCatalog270Test.java      |   4 +
 .../AMBARI_METRICS/test_service_advisor.py         |   4 -
 .../2.0.6/AMBARI_METRICS/test_metrics_collector.py |   2 +-
 .../test/python/stacks/2.0.6/configs/default.json  |   4 +-
 .../stacks/2.0.6/configs/default_ams_embedded.json |   3 +-
 32 files changed, 870 insertions(+), 402 deletions(-)

diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/HBaseTimelineMetricsService.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/HBaseTimelineMetricsService.java
index 56a28dc..d09d4bb 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/HBaseTimelineMetricsService.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/HBaseTimelineMetricsService.java
@@ -17,7 +17,6 @@
  */
 package org.apache.ambari.metrics.core.timeline;
 
-import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.DEFAULT_TOPN_HOSTS_LIMIT;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.USE_GROUPBY_AGGREGATOR_QUERIES;
 import static org.apache.ambari.metrics.core.timeline.availability.AggregationTaskRunner.ACTUAL_AGGREGATOR_NAMES;
 
@@ -35,7 +34,6 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.Executors;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.ThreadFactory;
@@ -111,15 +109,18 @@ public class HBaseTimelineMetricsService extends AbstractService implements Time
   private synchronized void initializeSubsystem() {
     if (!isInitialized) {
       hBaseAccessor = new PhoenixHBaseAccessor(null);
-      // Initialize schema
-      hBaseAccessor.initMetricSchema();
-      // Initialize metadata from store
+
+      // Initialize metadata
       try {
         metricMetadataManager = new TimelineMetricMetadataManager(hBaseAccessor);
       } catch (MalformedURLException | URISyntaxException e) {
         throw new ExceptionInInitializerError("Unable to initialize metadata manager");
       }
       metricMetadataManager.initializeMetadata();
+
+      // Initialize metric schema
+      hBaseAccessor.initMetricSchema();
+
       // Initialize policies before TTL update
       hBaseAccessor.initPoliciesAndTTL();
       // Start HA service
@@ -395,6 +396,10 @@ public class HBaseTimelineMetricsService extends AbstractService implements Time
     return metricsFunctions;
   }
 
+  public void putMetricsSkipCache(TimelineMetrics metrics) throws SQLException, IOException {
+    hBaseAccessor.insertMetricRecordsWithMetadata(metricMetadataManager, metrics, true);
+  }
+
   @Override
   public TimelinePutResponse putMetrics(TimelineMetrics metrics) throws SQLException, IOException {
     // Error indicated by the Sql exception
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/PhoenixHBaseAccessor.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/PhoenixHBaseAccessor.java
index 040df1b..dec7850 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/PhoenixHBaseAccessor.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/PhoenixHBaseAccessor.java
@@ -20,6 +20,7 @@ package org.apache.ambari.metrics.core.timeline;
 import static java.util.concurrent.TimeUnit.SECONDS;
 import static org.apache.ambari.metrics.core.timeline.FunctionUtils.findMetricFunctions;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.AGGREGATORS_SKIP_BLOCK_CACHE;
+import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.BLOCKING_STORE_FILES_KEY;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.CLUSTER_AGGREGATOR_MINUTE_SLEEP_INTERVAL;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.CLUSTER_AGGREGATOR_TIMESLICE_INTERVAL;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.CLUSTER_DAILY_TABLE_TTL;
@@ -27,41 +28,32 @@ import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguratio
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.CLUSTER_MINUTE_TABLE_TTL;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.CLUSTER_SECOND_TABLE_TTL;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.CONTAINER_METRICS_TTL;
+import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.DATE_TIERED_COMPACTION_POLICY;
+import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.FIFO_COMPACTION_POLICY_CLASS;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.GLOBAL_MAX_RETRIES;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.GLOBAL_RESULT_LIMIT;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.GLOBAL_RETRY_INTERVAL;
-import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.HBASE_BLOCKING_STORE_FILES;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.HBASE_COMPRESSION_SCHEME;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.HBASE_ENCODING_SCHEME;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.HOST_AGGREGATOR_MINUTE_SLEEP_INTERVAL;
+import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.HSTORE_COMPACTION_CLASS_KEY;
+import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.HSTORE_ENGINE_CLASS;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TRANSIENT_METRIC_PATTERNS;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.HOST_DAILY_TABLE_TTL;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.HOST_HOUR_TABLE_TTL;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.HOST_MINUTE_TABLE_TTL;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.METRICS_TRANSIENT_TABLE_TTL;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.PRECISION_TABLE_TTL;
-import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TIMELINE_METRICS_AGGREGATE_TABLES_DURABILITY;
-import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TIMELINE_METRICS_AGGREGATE_TABLE_HBASE_BLOCKING_STORE_FILES;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TIMELINE_METRICS_CACHE_COMMIT_INTERVAL;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TIMELINE_METRICS_CACHE_ENABLED;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TIMELINE_METRICS_CACHE_SIZE;
-import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TIMELINE_METRICS_HBASE_AGGREGATE_TABLE_COMPACTION_POLICY_CLASS;
-import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TIMELINE_METRICS_HBASE_AGGREGATE_TABLE_COMPACTION_POLICY_KEY;
-import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TIMELINE_METRICS_HBASE_PRECISION_TABLE_COMPACTION_POLICY_CLASS;
-import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TIMELINE_METRICS_HBASE_PRECISION_TABLE_COMPACTION_POLICY_KEY;
-import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TIMELINE_METRICS_PRECISION_TABLE_DURABILITY;
-import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TIMELINE_METRICS_PRECISION_TABLE_HBASE_BLOCKING_STORE_FILES;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TIMELINE_METRIC_AGGREGATOR_SINK_CLASS;
-import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.ALTER_METRICS_METADATA_TABLE;
 import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.CONTAINER_METRICS_TABLE_NAME;
 import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.CREATE_CONTAINER_METRICS_TABLE_SQL;
 import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.CREATE_TRANSIENT_METRICS_TABLE_SQL;
-import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.CREATE_HOSTED_APPS_METADATA_TABLE_SQL;
-import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.CREATE_INSTANCE_HOST_TABLE_SQL;
 import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.CREATE_METRICS_AGGREGATE_TABLE_SQL;
 import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.CREATE_METRICS_CLUSTER_AGGREGATE_GROUPED_TABLE_SQL;
 import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.CREATE_METRICS_CLUSTER_AGGREGATE_TABLE_SQL;
-import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.CREATE_METRICS_METADATA_TABLE_SQL;
 import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.CREATE_METRICS_TABLE_SQL;
 import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.DEFAULT_ENCODING;
 import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.DEFAULT_TABLE_COMPRESSION;
@@ -132,6 +124,7 @@ import org.apache.ambari.metrics.core.timeline.sink.ExternalSinkProvider;
 import org.apache.ambari.metrics.core.timeline.source.InternalMetricsSource;
 import org.apache.ambari.metrics.core.timeline.source.InternalSourceProvider;
 import org.apache.commons.collections.CollectionUtils;
+import org.apache.commons.lang.ArrayUtils;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -201,21 +194,8 @@ public class PhoenixHBaseAccessor {
   private TimelineMetricsAggregatorSink aggregatorSink;
   private final int cacheCommitInterval;
   private final boolean skipBlockCacheForAggregatorsEnabled;
-  private final String timelineMetricsTablesDurability;
-  private final String timelineMetricsPrecisionTableDurability;
   private TimelineMetricMetadataManager metadataManagerInstance;
 
-  static final String HSTORE_COMPACTION_CLASS_KEY =
-    "hbase.hstore.defaultengine.compactionpolicy.class";
-  static final String HSTORE_ENGINE_CLASS =
-    "hbase.hstore.engine.class";
-  static final String FIFO_COMPACTION_POLICY_CLASS =
-    "org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy";
-  static final String DATE_TIERED_COMPACTION_POLICY =
-    "org.apache.hadoop.hbase.regionserver.DateTieredStoreEngine";
-  static final String BLOCKING_STORE_FILES_KEY =
-    "hbase.hstore.blockingStoreFiles";
-
   private Map<String, Integer> tableTTL = new HashMap<>();
 
   private final TimelineMetricConfiguration configuration;
@@ -259,11 +239,9 @@ public class PhoenixHBaseAccessor {
     this.cacheCommitInterval = Integer.valueOf(metricsConf.get(TIMELINE_METRICS_CACHE_COMMIT_INTERVAL, "3"));
     this.insertCache = new ArrayBlockingQueue<TimelineMetrics>(cacheSize);
     this.skipBlockCacheForAggregatorsEnabled = metricsConf.getBoolean(AGGREGATORS_SKIP_BLOCK_CACHE, false);
-    this.timelineMetricsTablesDurability = metricsConf.get(TIMELINE_METRICS_AGGREGATE_TABLES_DURABILITY, "");
-    this.timelineMetricsPrecisionTableDurability = metricsConf.get(TIMELINE_METRICS_PRECISION_TABLE_DURABILITY, "");
 
     tableTTL.put(METRICS_RECORD_TABLE_NAME, metricsConf.getInt(PRECISION_TABLE_TTL, 1 * 86400));  // 1 day
-    tableTTL.put(CONTAINER_METRICS_TABLE_NAME, metricsConf.getInt(CONTAINER_METRICS_TTL, 30 * 86400));  // 30 days
+    tableTTL.put(CONTAINER_METRICS_TABLE_NAME, metricsConf.getInt(CONTAINER_METRICS_TTL, 14 * 86400));  // 30 days
     tableTTL.put(METRICS_AGGREGATE_MINUTE_TABLE_NAME, metricsConf.getInt(HOST_MINUTE_TABLE_TTL, 7 * 86400)); //7 days
     tableTTL.put(METRICS_AGGREGATE_HOURLY_TABLE_NAME, metricsConf.getInt(HOST_HOUR_TABLE_TTL, 30 * 86400)); //30 days
     tableTTL.put(METRICS_AGGREGATE_DAILY_TABLE_NAME, metricsConf.getInt(HOST_DAILY_TABLE_TTL, 365 * 86400)); //1 year
@@ -470,7 +448,7 @@ public class PhoenixHBaseAccessor {
     return mapper.readValue(json, metricValuesTypeRef);
   }
 
-  private Connection getConnectionRetryingOnException() throws SQLException, InterruptedException {
+  public Connection getConnectionRetryingOnException() throws SQLException, InterruptedException {
     RetryCounter retryCounter = retryCounterFactory.create();
     while (true) {
       try{
@@ -511,6 +489,9 @@ public class PhoenixHBaseAccessor {
   protected void initMetricSchema() {
     Connection conn = null;
     Statement stmt = null;
+    PreparedStatement pStmt = null;
+    TimelineMetricSplitPointComputer splitPointComputer = new TimelineMetricSplitPointComputer(
+      metricsConf, hbaseConf, metadataManagerInstance);
 
     String encoding = metricsConf.get(HBASE_ENCODING_SCHEME, DEFAULT_ENCODING);
     String compression = metricsConf.get(HBASE_COMPRESSION_SCHEME, DEFAULT_TABLE_COMPRESSION);
@@ -521,21 +502,6 @@ public class PhoenixHBaseAccessor {
       conn = getConnectionRetryingOnException();
       stmt = conn.createStatement();
 
-      // Metadata
-      String metadataSql = String.format(CREATE_METRICS_METADATA_TABLE_SQL,
-        encoding, compression);
-      stmt.executeUpdate(metadataSql);
-      stmt.executeUpdate(ALTER_METRICS_METADATA_TABLE);
-
-      String hostedAppSql = String.format(CREATE_HOSTED_APPS_METADATA_TABLE_SQL,
-        encoding, compression);
-      stmt.executeUpdate(hostedAppSql);
-
-      //Host Instances table
-      String hostedInstancesSql = String.format(CREATE_INSTANCE_HOST_TABLE_SQL,
-        encoding, compression);
-      stmt.executeUpdate(hostedInstancesSql);
-
       // Container Metrics
       stmt.executeUpdate( String.format(CREATE_CONTAINER_METRICS_TABLE_SQL,
         encoding, tableTTL.get(CONTAINER_METRICS_TABLE_NAME), compression));
@@ -543,13 +509,15 @@ public class PhoenixHBaseAccessor {
       // Host level
       String precisionSql = String.format(CREATE_METRICS_TABLE_SQL,
         encoding, tableTTL.get(METRICS_RECORD_TABLE_NAME), compression);
-      stmt.executeUpdate(precisionSql);
+      pStmt = prepareCreateMetricsTableStatement(conn, precisionSql, splitPointComputer.getPrecisionSplitPoints());
+      pStmt.executeUpdate();
 
       String hostMinuteAggregrateSql = String.format(CREATE_METRICS_AGGREGATE_TABLE_SQL,
         METRICS_AGGREGATE_MINUTE_TABLE_NAME, encoding,
         tableTTL.get(METRICS_AGGREGATE_MINUTE_TABLE_NAME),
         compression);
-      stmt.executeUpdate(hostMinuteAggregrateSql);
+      pStmt = prepareCreateMetricsTableStatement(conn, hostMinuteAggregrateSql, splitPointComputer.getHostAggregateSplitPoints());
+      pStmt.executeUpdate();
 
       stmt.executeUpdate(String.format(CREATE_METRICS_AGGREGATE_TABLE_SQL,
         METRICS_AGGREGATE_HOURLY_TABLE_NAME, encoding,
@@ -565,8 +533,9 @@ public class PhoenixHBaseAccessor {
         METRICS_CLUSTER_AGGREGATE_TABLE_NAME, encoding,
         tableTTL.get(METRICS_CLUSTER_AGGREGATE_TABLE_NAME),
         compression);
+      pStmt = prepareCreateMetricsTableStatement(conn, aggregateSql, splitPointComputer.getClusterAggregateSplitPoints());
+      pStmt.executeUpdate();
 
-      stmt.executeUpdate(aggregateSql);
       stmt.executeUpdate(String.format(CREATE_METRICS_CLUSTER_AGGREGATE_GROUPED_TABLE_SQL,
         METRICS_CLUSTER_AGGREGATE_MINUTE_TABLE_NAME, encoding,
         tableTTL.get(METRICS_CLUSTER_AGGREGATE_MINUTE_TABLE_NAME),
@@ -603,6 +572,13 @@ public class PhoenixHBaseAccessor {
           // Ignore
         }
       }
+      if (pStmt != null) {
+        try {
+          pStmt.close();
+        } catch (Exception e) {
+          // Ignore
+        }
+      }
       if (conn != null) {
         try {
           conn.close();
@@ -613,7 +589,7 @@ public class PhoenixHBaseAccessor {
     }
   }
 
-  protected void initPoliciesAndTTL() {
+  void initPoliciesAndTTL() {
     Admin hBaseAdmin = null;
     try {
       hBaseAdmin = dataSource.getHBaseAdmin();
@@ -622,9 +598,13 @@ public class PhoenixHBaseAccessor {
     }
 
     TableName[] tableNames = null;
+    TableName[] containerMetricsTableName = null;
+
     if (hBaseAdmin != null) {
       try {
         tableNames = hBaseAdmin.listTableNames(PHOENIX_TABLES_REGEX_PATTERN, false);
+        containerMetricsTableName = hBaseAdmin.listTableNames(CONTAINER_METRICS_TABLE_NAME, false);
+        tableNames = (TableName[]) ArrayUtils.addAll(tableNames, containerMetricsTableName);
       } catch (IOException e) {
         LOG.warn("Unable to get table names from HBaseAdmin for setting policies.", e);
         return;
@@ -708,72 +688,44 @@ public class PhoenixHBaseAccessor {
   }
 
   private boolean setDurabilityForTable(String tableName, TableDescriptorBuilder tableDescriptor) {
-
-    boolean modifyTable = false;
-
-    if (METRIC_TRANSIENT_TABLE_NAME.equalsIgnoreCase(tableName)) {
-      tableDescriptor.setDurability(Durability.SKIP_WAL);
-      modifyTable = true;
-    } else if (METRICS_RECORD_TABLE_NAME.equals(tableName)) {
-      if (!timelineMetricsPrecisionTableDurability.isEmpty()) {
-        LOG.info("Setting WAL option " + timelineMetricsPrecisionTableDurability + " for table : " + tableName);
-        boolean validDurability = true;
-        if ("SKIP_WAL".equals(timelineMetricsPrecisionTableDurability)) {
-          tableDescriptor.setDurability(Durability.SKIP_WAL);
-        } else if ("SYNC_WAL".equals(timelineMetricsPrecisionTableDurability)) {
-          tableDescriptor.setDurability(Durability.SYNC_WAL);
-        } else if ("ASYNC_WAL".equals(timelineMetricsPrecisionTableDurability)) {
-          tableDescriptor.setDurability(Durability.ASYNC_WAL);
-        } else if ("FSYNC_WAL".equals(timelineMetricsPrecisionTableDurability)) {
-          tableDescriptor.setDurability(Durability.FSYNC_WAL);
-        } else {
-          LOG.info("Unknown value for " + TIMELINE_METRICS_PRECISION_TABLE_DURABILITY + " : " + timelineMetricsPrecisionTableDurability);
-          validDurability = false;
-        }
-        if (validDurability) {
-          modifyTable = true;
-        }
-      }
-    } else {
-      if (!timelineMetricsTablesDurability.isEmpty()) {
-        LOG.info("Setting WAL option " + timelineMetricsTablesDurability + " for table : " + tableName);
-        boolean validDurability = true;
-        if ("SKIP_WAL".equals(timelineMetricsTablesDurability)) {
-          tableDescriptor.setDurability(Durability.SKIP_WAL);
-        } else if ("SYNC_WAL".equals(timelineMetricsTablesDurability)) {
-          tableDescriptor.setDurability(Durability.SYNC_WAL);
-        } else if ("ASYNC_WAL".equals(timelineMetricsTablesDurability)) {
-          tableDescriptor.setDurability(Durability.ASYNC_WAL);
-        } else if ("FSYNC_WAL".equals(timelineMetricsTablesDurability)) {
-          tableDescriptor.setDurability(Durability.FSYNC_WAL);
-        } else {
-          LOG.info("Unknown value for " + TIMELINE_METRICS_AGGREGATE_TABLES_DURABILITY + " : " + timelineMetricsTablesDurability);
-          validDurability = false;
-        }
-        if (validDurability) {
-          modifyTable = true;
-        }
+    String tableDurability = metricsConf.get("timeline.metrics." + tableName + ".durability", "");
+    if (StringUtils.isNotEmpty(tableDurability)) {
+      LOG.info("Setting WAL option " + tableDurability + " for table : " + tableName);
+      boolean validDurability = true;
+      if ("SKIP_WAL".equals(tableDurability)) {
+        tableDescriptor.setDurability(Durability.SKIP_WAL);
+      } else if ("SYNC_WAL".equals(tableDurability)) {
+        tableDescriptor.setDurability(Durability.SYNC_WAL);
+      } else if ("ASYNC_WAL".equals(tableDurability)) {
+        tableDescriptor.setDurability(Durability.ASYNC_WAL);
+      } else if ("FSYNC_WAL".equals(tableDurability)) {
+        tableDescriptor.setDurability(Durability.FSYNC_WAL);
+      } else {
+        LOG.info("Unknown value for durability : " + tableDurability);
+        validDurability = false;
       }
+      return validDurability;
     }
-    return modifyTable;
+    return false;
   }
 
+
   private boolean setCompactionPolicyForTable(String tableName, TableDescriptorBuilder tableDescriptorBuilder) {
 
     boolean modifyTable = false;
 
-    String compactionPolicyKey = metricsConf.get(TIMELINE_METRICS_HBASE_AGGREGATE_TABLE_COMPACTION_POLICY_KEY,
-      HSTORE_ENGINE_CLASS);
-    String compactionPolicyClass = metricsConf.get(TIMELINE_METRICS_HBASE_AGGREGATE_TABLE_COMPACTION_POLICY_CLASS,
-      DATE_TIERED_COMPACTION_POLICY);
-    int blockingStoreFiles = hbaseConf.getInt(TIMELINE_METRICS_AGGREGATE_TABLE_HBASE_BLOCKING_STORE_FILES, 60);
-
-    if (tableName.equals(METRICS_RECORD_TABLE_NAME) || tableName.equalsIgnoreCase(METRIC_TRANSIENT_TABLE_NAME)) {
-      compactionPolicyKey = metricsConf.get(TIMELINE_METRICS_HBASE_PRECISION_TABLE_COMPACTION_POLICY_KEY,
-        HSTORE_COMPACTION_CLASS_KEY);
-      compactionPolicyClass = metricsConf.get(TIMELINE_METRICS_HBASE_PRECISION_TABLE_COMPACTION_POLICY_CLASS,
-        FIFO_COMPACTION_POLICY_CLASS);
-      blockingStoreFiles = hbaseConf.getInt(TIMELINE_METRICS_PRECISION_TABLE_HBASE_BLOCKING_STORE_FILES, 1000);
+    String keyConfig = "timeline.metrics." + tableName + ".compaction.policy.key";
+    String policyConfig = "timeline.metrics." + tableName + ".compaction.policy";
+    String storeFilesConfig = "timeline.metrics." + tableName + ".blocking.store.files";
+
+    String compactionPolicyKey = metricsConf.get(keyConfig, HSTORE_ENGINE_CLASS);
+    String compactionPolicyClass = metricsConf.get(policyConfig, DATE_TIERED_COMPACTION_POLICY);
+    int blockingStoreFiles = hbaseConf.getInt(storeFilesConfig, 60);
+
+    if (tableName.equals(METRICS_RECORD_TABLE_NAME)) {
+      compactionPolicyKey = metricsConf.get(keyConfig, HSTORE_COMPACTION_CLASS_KEY);
+      compactionPolicyClass = metricsConf.get(policyConfig, FIFO_COMPACTION_POLICY_CLASS);
+      blockingStoreFiles = hbaseConf.getInt(storeFilesConfig, 1000);
     }
 
     if (StringUtils.isEmpty(compactionPolicyKey) || StringUtils.isEmpty(compactionPolicyClass)) {
@@ -781,46 +733,54 @@ public class PhoenixHBaseAccessor {
       modifyTable = setHbaseBlockingStoreFiles(tableDescriptorBuilder, tableName, 300);
     } else {
       tableDescriptorBuilder.setValue(compactionPolicyKey, compactionPolicyClass);
-      tableDescriptorBuilder.removeValue(HSTORE_ENGINE_CLASS.getBytes());
-      tableDescriptorBuilder.removeValue(HSTORE_COMPACTION_CLASS_KEY.getBytes());
       setHbaseBlockingStoreFiles(tableDescriptorBuilder, tableName, blockingStoreFiles);
       modifyTable = true;
     }
 
+    if (!compactionPolicyKey.equals(HSTORE_ENGINE_CLASS)) {
+      tableDescriptorBuilder.removeValue(HSTORE_ENGINE_CLASS.getBytes());
+    }
+    if (!compactionPolicyKey.equals(HSTORE_COMPACTION_CLASS_KEY)) {
+      tableDescriptorBuilder.removeValue(HSTORE_COMPACTION_CLASS_KEY.getBytes());
+    }
+
     return modifyTable;
   }
 
   private boolean setHbaseBlockingStoreFiles(TableDescriptorBuilder tableDescriptor, String tableName, int value) {
-    int blockingStoreFiles = hbaseConf.getInt(HBASE_BLOCKING_STORE_FILES, value);
-    if (blockingStoreFiles != value) {
-      blockingStoreFiles = value;
-      tableDescriptor.setValue(BLOCKING_STORE_FILES_KEY, String.valueOf(value));
-      LOG.info("Setting config property " + BLOCKING_STORE_FILES_KEY +
-        " = " + blockingStoreFiles + " for " + tableName);
-      return true;
+    tableDescriptor.setValue(BLOCKING_STORE_FILES_KEY, String.valueOf(value));
+    LOG.info("Setting config property " + BLOCKING_STORE_FILES_KEY +
+      " = " + value + " for " + tableName);
+    return true;
+  }
+
+
+  private PreparedStatement prepareCreateMetricsTableStatement(Connection connection,
+                                                               String sql,
+                                                               List<byte[]> splitPoints) throws SQLException {
+
+    String createTableWithSplitPointsSql = sql + getSplitPointsStr(splitPoints.size());
+    LOG.info(createTableWithSplitPointsSql);
+    PreparedStatement statement = connection.prepareStatement(createTableWithSplitPointsSql);
+    for (int i = 1; i <= splitPoints.size(); i++) {
+      statement.setBytes(i, splitPoints.get(i - 1));
     }
-    return false;
+    return statement;
   }
 
-  protected String getSplitPointsStr(String splitPoints) {
-    if (StringUtils.isEmpty(splitPoints.trim())) {
+  private String getSplitPointsStr(int numSplits) {
+    if (numSplits <= 0) {
       return "";
     }
-    String[] points = splitPoints.split(",");
-    if (points.length > 0) {
-      StringBuilder sb = new StringBuilder(" SPLIT ON ");
-      sb.append("(");
-      for (String point : points) {
-        sb.append("'");
-        sb.append(point.trim());
-        sb.append("'");
-        sb.append(",");
-      }
-      sb.deleteCharAt(sb.length() - 1);
-      sb.append(")");
-      return sb.toString();
+    StringBuilder sb = new StringBuilder(" SPLIT ON ");
+    sb.append("(");
+    for (int i = 0; i < numSplits; i++) {
+      sb.append("?");
+      sb.append(",");
     }
-    return "";
+    sb.deleteCharAt(sb.length() - 1);
+    sb.append(")");
+    return sb.toString();
   }
 
   /**
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/TimelineMetricConfiguration.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/TimelineMetricConfiguration.java
index 6ec2c6b..393d4a3 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/TimelineMetricConfiguration.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/TimelineMetricConfiguration.java
@@ -237,12 +237,6 @@ public class TimelineMetricConfiguration {
   public static final String WATCHER_MAX_FAILURES =
     "timeline.metrics.service.watcher.max.failures";
 
-  public static final String PRECISION_TABLE_SPLIT_POINTS =
-    "timeline.metrics.host.aggregate.splitpoints";
-
-  public static final String AGGREGATE_TABLE_SPLIT_POINTS =
-    "timeline.metrics.cluster.aggregate.splitpoints";
-
   public static final String AGGREGATORS_SKIP_BLOCK_CACHE =
     "timeline.metrics.aggregators.skip.blockcache.enabled";
 
@@ -261,12 +255,6 @@ public class TimelineMetricConfiguration {
   public static final String TIMELINE_METRICS_SINK_COLLECTION_PERIOD =
     "timeline.metrics.sink.collection.period";
 
-  public static final String TIMELINE_METRICS_PRECISION_TABLE_DURABILITY =
-    "timeline.metrics.precision.table.durability";
-
-  public static final String TIMELINE_METRICS_AGGREGATE_TABLES_DURABILITY =
-      "timeline.metrics.aggregate.tables.durability";
-
   public static final String TIMELINE_METRICS_WHITELIST_ENABLED =
     "timeline.metrics.whitelisting.enabled";
 
@@ -285,33 +273,9 @@ public class TimelineMetricConfiguration {
   public static final String TIMELINE_METRICS_APPS_WHITELIST =
     "timeline.metrics.apps.whitelist";
 
-  public static final String HBASE_BLOCKING_STORE_FILES =
-    "hbase.hstore.blockingStoreFiles";
-
-  public static final String DEFAULT_TOPN_HOSTS_LIMIT =
-    "timeline.metrics.default.topn.hosts.limit";
-
   public static final String TIMELINE_METRIC_AGGREGATION_SQL_FILTERS =
     "timeline.metrics.cluster.aggregation.sql.filters";
 
-  public static final String TIMELINE_METRICS_HBASE_AGGREGATE_TABLE_COMPACTION_POLICY_KEY =
-    "timeline.metrics.hbase.aggregate.table.compaction.policy.key";
-
-  public static final String TIMELINE_METRICS_HBASE_AGGREGATE_TABLE_COMPACTION_POLICY_CLASS =
-    "timeline.metrics.hbase.aggregate.table.compaction.policy.class";
-
-  public static final String TIMELINE_METRICS_AGGREGATE_TABLE_HBASE_BLOCKING_STORE_FILES =
-    "timeline.metrics.aggregate.table.hbase.hstore.blockingStoreFiles";
-
-  public static final String TIMELINE_METRICS_HBASE_PRECISION_TABLE_COMPACTION_POLICY_KEY =
-    "timeline.metrics.hbase.precision.table.compaction.policy.key";
-
-  public static final String TIMELINE_METRICS_HBASE_PRECISION_TABLE_COMPACTION_POLICY_CLASS =
-    "timeline.metrics.hbase.precision.table.compaction.policy.class";
-
-  public static final String TIMELINE_METRICS_PRECISION_TABLE_HBASE_BLOCKING_STORE_FILES =
-    "timeline.metrics.precision.table.hbase.hstore.blockingStoreFiles";
-
   public static final String TIMELINE_METRICS_SUPPORT_MULTIPLE_CLUSTERS =
     "timeline.metrics.support.multiple.clusters";
 
@@ -346,6 +310,9 @@ public class TimelineMetricConfiguration {
 
   public static final String TRANSIENT_METRIC_PATTERNS = "timeline.metrics.transient.metric.patterns";
 
+  public static final String TIMELINE_METRIC_INITIAL_CONFIGURED_MASTER_COMPONENTS = "timeline.metrics.initial.configured.master.components";
+  public static final String TIMELINE_METRIC_INITIAL_CONFIGURED_SLAVE_COMPONENTS = "timeline.metrics.initial.configured.slave.components";
+
   public static final String KAFKA_SERVERS = "timeline.metrics.external.sink.kafka.bootstrap.servers";
   public static final String KAFKA_ACKS = "timeline.metrics.external.sink.kafka.acks";
   public static final String KAFKA_RETRIES = "timeline.metrics.external.sink.kafka.bootstrap.retries";
@@ -353,7 +320,13 @@ public class TimelineMetricConfiguration {
   public static final String KAFKA_LINGER_MS = "timeline.metrics.external.sink.kafka.linger.ms";
   public static final String KAFKA_BUFFER_MEM = "timeline.metrics.external.sink.kafka.buffer.memory";
   public static final String KAFKA_SINK_TIMEOUT_SECONDS = "timeline.metrics.external.sink.kafka.timeout.seconds";
-  
+
+  public static final String HSTORE_COMPACTION_CLASS_KEY = "hbase.hstore.defaultengine.compactionpolicy.class";
+  public static final String HSTORE_ENGINE_CLASS = "hbase.hstore.engine.class";
+  public static final String FIFO_COMPACTION_POLICY_CLASS = "org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy";
+  public static final String DATE_TIERED_COMPACTION_POLICY = "org.apache.hadoop.hbase.regionserver.DateTieredStoreEngine";
+  public static final String BLOCKING_STORE_FILES_KEY = "hbase.hstore.blockingStoreFiles";
+
   private Configuration hbaseConf;
   private Configuration metricsConf;
   private Configuration metricsSslConf;
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/TimelineMetricSplitPointComputer.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/TimelineMetricSplitPointComputer.java
new file mode 100644
index 0000000..89bb843
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/TimelineMetricSplitPointComputer.java
@@ -0,0 +1,239 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.metrics.core.timeline;
+
+import org.apache.ambari.metrics.core.timeline.aggregators.TimelineClusterMetric;
+import org.apache.ambari.metrics.core.timeline.discovery.TimelineMetricMetadataManager;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+
+import java.io.BufferedReader;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TIMELINE_METRIC_INITIAL_CONFIGURED_MASTER_COMPONENTS;
+import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TIMELINE_METRIC_INITIAL_CONFIGURED_SLAVE_COMPONENTS;
+
+public class TimelineMetricSplitPointComputer {
+
+  private static final Log LOG = LogFactory.getLog(TimelineMetricSplitPointComputer.class);
+  private Set<String> masterComponents = new HashSet<>();
+  private Set<String> slaveComponents = new HashSet<>();
+
+  private static final int MINIMUM_PRECISION_TABLE_REGIONS = 4;
+  private static final int MINIMUM_AGGREGATE_TABLE_REGIONS = 2;
+  private static final int OTHER_TABLE_STATIC_REGIONS = 8;
+  private static final int SLAVE_EQUIDISTANT_POINTS = 50;
+  private static final int MASTER_EQUIDISTANT_POINTS = 5;
+
+  private List<byte[]> precisionSplitPoints = new ArrayList<>();
+  private List<byte[]> aggregateSplitPoints = new ArrayList<>();
+
+  public TimelineMetricSplitPointComputer(Configuration metricsConf,
+                                          Configuration hbaseConf,
+                                          TimelineMetricMetadataManager timelineMetricMetadataManager) {
+
+    String componentsString = metricsConf.get(TIMELINE_METRIC_INITIAL_CONFIGURED_MASTER_COMPONENTS, "");
+    if (StringUtils.isNotEmpty(componentsString)) {
+      masterComponents.addAll(Arrays.asList(componentsString.split(",")));
+    }
+
+   componentsString = metricsConf.get(TIMELINE_METRIC_INITIAL_CONFIGURED_SLAVE_COMPONENTS, "");
+    if (StringUtils.isNotEmpty(componentsString)) {
+      slaveComponents.addAll(Arrays.asList(componentsString.split(",")));
+    }
+
+    double hbaseTotalHeapsize = metricsConf.getDouble("hbase_total_heapsize", 1024*1024*1024);
+    double hbaseMemstoreUpperLimit = hbaseConf.getDouble("hbase.regionserver.global.memstore.upperLimit", 0.5);
+    double hbaseMemstoreFlushSize = hbaseConf.getDouble("hbase.hregion.memstore.flush.size", 134217728);
+
+    computeSplitPoints(hbaseTotalHeapsize, hbaseMemstoreUpperLimit, hbaseMemstoreFlushSize, timelineMetricMetadataManager);
+  }
+
+
+  private void computeSplitPoints(double hbaseTotalHeapsize,
+                                  double hbaseMemstoreUpperLimit,
+                                  double hbaseMemstoreFlushSize,
+                                  TimelineMetricMetadataManager timelineMetricMetadataManager) {
+
+    double memstoreMaxMemory = hbaseMemstoreUpperLimit * hbaseTotalHeapsize;
+    int maxInMemoryRegions = (int) ((memstoreMaxMemory / hbaseMemstoreFlushSize) - OTHER_TABLE_STATIC_REGIONS);
+
+    int targetPrecisionTableRegionCount = MINIMUM_PRECISION_TABLE_REGIONS;
+    int targetAggregateTableRegionCount = MINIMUM_AGGREGATE_TABLE_REGIONS;
+
+    if (maxInMemoryRegions > 2) {
+      targetPrecisionTableRegionCount =  Math.max(4, (int)(0.70 * maxInMemoryRegions));
+      targetAggregateTableRegionCount =  Math.max(2, (int)(0.15 * maxInMemoryRegions));
+    }
+
+    List<MetricApp> metricList = new ArrayList<>();
+
+    for (String component : masterComponents) {
+      metricList.addAll(getSortedMetricListForSplitPoint(component, false));
+    }
+
+    for (String component : slaveComponents) {
+      metricList.addAll(getSortedMetricListForSplitPoint(component, true));
+    }
+
+    int totalMetricLength = metricList.size();
+
+    if (targetPrecisionTableRegionCount > 1) {
+      int idx = (int) Math.ceil(totalMetricLength / targetPrecisionTableRegionCount);
+      int index = idx;
+      for (int i = 0; i < targetPrecisionTableRegionCount; i++) {
+        if (index < totalMetricLength - 1) {
+          MetricApp metricAppService = metricList.get(index);
+          byte[] uuid = timelineMetricMetadataManager.getUuid(
+            new TimelineClusterMetric(metricAppService.metricName, metricAppService.appId, null, -1),
+            true);
+          precisionSplitPoints.add(uuid);
+          index += idx;
+        }
+      }
+    }
+
+    if (targetAggregateTableRegionCount > 1) {
+      int idx = (int) Math.ceil(totalMetricLength / targetAggregateTableRegionCount);
+      int index = idx;
+      for (int i = 0; i < targetAggregateTableRegionCount; i++) {
+        if (index < totalMetricLength - 1) {
+          MetricApp metricAppService = metricList.get(index);
+          byte[] uuid = timelineMetricMetadataManager.getUuid(
+            new TimelineClusterMetric(metricAppService.metricName, metricAppService.appId, null, -1),
+            true);
+          aggregateSplitPoints.add(uuid);
+          index += idx;
+        }
+      }
+    }
+  }
+
+  private List<MetricApp> getSortedMetricListForSplitPoint(String component, boolean isSlave) {
+
+    String appId = getAppId(component);
+    List<MetricApp> metricList = new ArrayList<>();
+
+    ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
+    if (classLoader == null) {
+      classLoader = getClass().getClassLoader();
+    }
+
+    String strLine;
+    BufferedReader bufferedReader;
+
+    try (InputStream inputStream = classLoader.getResourceAsStream("metrics_def/" + appId.toUpperCase() + ".dat")) {
+
+      if (inputStream != null) {
+        bufferedReader = new BufferedReader(new InputStreamReader(inputStream));
+        LOG.info("Found split point candidate metrics for : " + appId);
+
+        while ((strLine = bufferedReader.readLine()) != null) {
+          metricList.add(new MetricApp(strLine.trim(), appId));
+        }
+      } else {
+        LOG.info("Split point candidate metrics not found for : " + appId);
+      }
+    } catch (Exception e) {
+      LOG.info("Error reading split point candidate metrics for component : " + component);
+      LOG.error(e);
+    }
+
+    if (isSlave) {
+      return getEquidistantMetrics(metricList, SLAVE_EQUIDISTANT_POINTS);
+    } else {
+      return getEquidistantMetrics(metricList, MASTER_EQUIDISTANT_POINTS);
+    }
+  }
+
+  private List<MetricApp> getEquidistantMetrics(List<MetricApp> metrics, int distance) {
+    List<MetricApp> selectedMetricApps = new ArrayList<>();
+
+    int idx = metrics.size() / distance;
+    if (idx == 0) {
+      return metrics;
+    }
+
+    int index = idx;
+    for (int i = 0; i < distance; i++) {
+      selectedMetricApps.add(metrics.get(index - 1));
+      index += idx;
+    }
+    return selectedMetricApps;
+  }
+
+
+  public List<byte[]> getPrecisionSplitPoints() {
+    return precisionSplitPoints;
+  }
+
+  public List<byte[]> getClusterAggregateSplitPoints() {
+    return aggregateSplitPoints;
+  }
+
+  public List<byte[]> getHostAggregateSplitPoints() {
+    return aggregateSplitPoints;
+  }
+
+  private String getAppId(String component) {
+
+    if (component.equalsIgnoreCase("METRICS_COLLECTOR")) {
+      return "ams-hbase";
+    }
+
+    if (component.equalsIgnoreCase("METRICS_MONITOR")) {
+      return "HOST";
+    }
+    return component;
+  }
+}
+
+class MetricApp implements Comparable{
+  String metricName;
+  String appId;
+
+  MetricApp(String metricName, String appId) {
+    this.metricName = metricName;
+    if (appId.startsWith("hbase")) {
+      this.appId = "hbase";
+    } else {
+      this.appId = appId;
+    }
+  }
+
+  @Override
+  public int compareTo(Object o) {
+    MetricApp that = (MetricApp)o;
+
+    int metricCompare = metricName.compareTo(that.metricName);
+    if (metricCompare != 0) {
+      return metricCompare;
+    }
+
+    return appId.compareTo(that.appId);
+  }
+}
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/TimelineMetricStoreWatcher.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/TimelineMetricStoreWatcher.java
index 0ab7929..ba7ce44 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/TimelineMetricStoreWatcher.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/TimelineMetricStoreWatcher.java
@@ -45,13 +45,13 @@ public class TimelineMetricStoreWatcher implements Runnable {
   private static int failures = 0;
   private final TimelineMetricConfiguration configuration;
 
-  private TimelineMetricStore timelineMetricStore;
+  private HBaseTimelineMetricsService timelineMetricStore;
 
   //used to call timelineMetricStore blocking methods with timeout
   private ExecutorService executor = Executors.newSingleThreadExecutor();
 
 
-  public TimelineMetricStoreWatcher(TimelineMetricStore timelineMetricStore,
+  public TimelineMetricStoreWatcher(HBaseTimelineMetricsService timelineMetricStore,
                                     TimelineMetricConfiguration configuration) {
     this.timelineMetricStore = timelineMetricStore;
     this.configuration = configuration;
@@ -100,7 +100,7 @@ public class TimelineMetricStoreWatcher implements Runnable {
 
     Callable<TimelineMetric> task = new Callable<TimelineMetric>() {
       public TimelineMetric call() throws Exception {
-        timelineMetricStore.putMetrics(metrics);
+        timelineMetricStore.putMetricsSkipCache(metrics);
         TimelineMetrics timelineMetrics = timelineMetricStore.getTimelineMetrics(
           Collections.singletonList(FAKE_METRIC_NAME), Collections.singletonList(FAKE_HOSTNAME),
           FAKE_APP_ID, null, startTime - delay * 2 * 1000,
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/discovery/TimelineMetricMetadataManager.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/discovery/TimelineMetricMetadataManager.java
index 1ca5bc0..737c2ff 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/discovery/TimelineMetricMetadataManager.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/discovery/TimelineMetricMetadataManager.java
@@ -20,7 +20,9 @@ package org.apache.ambari.metrics.core.timeline.discovery;
 import java.io.IOException;
 import java.net.MalformedURLException;
 import java.net.URISyntaxException;
+import java.sql.Connection;
 import java.sql.SQLException;
+import java.sql.Statement;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -38,9 +40,12 @@ import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import org.apache.ambari.metrics.core.timeline.MetricsSystemInitializationException;
 import org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration;
 import org.apache.ambari.metrics.core.timeline.uuid.MetricUuidGenStrategy;
 import org.apache.ambari.metrics.core.timeline.uuid.MD5UuidGenStrategy;
+import org.apache.ambari.metrics.core.timeline.uuid.Murmur3HashUuidGenStrategy;
+import org.apache.ambari.metrics.core.timeline.uuid.TimelineMetricUuid;
 import org.apache.commons.collections.CollectionUtils;
 import org.apache.commons.collections.MapUtils;
 import org.apache.commons.lang.ArrayUtils;
@@ -53,13 +58,19 @@ import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetricMetadata;
 import org.apache.ambari.metrics.core.timeline.PhoenixHBaseAccessor;
 import org.apache.ambari.metrics.core.timeline.aggregators.TimelineClusterMetric;
-import org.apache.ambari.metrics.core.timeline.uuid.HashBasedUuidGenStrategy;
 
+import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.HBASE_COMPRESSION_SCHEME;
+import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.HBASE_ENCODING_SCHEME;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TRANSIENT_METRIC_PATTERNS;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.METRICS_METADATA_SYNC_INIT_DELAY;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.METRICS_METADATA_SYNC_SCHEDULE_DELAY;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TIMELINE_METRICS_UUID_GEN_STRATEGY;
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TIMELINE_METRIC_METADATA_FILTERS;
+import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.CREATE_HOSTED_APPS_METADATA_TABLE_SQL;
+import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.CREATE_INSTANCE_HOST_TABLE_SQL;
+import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.CREATE_METRICS_METADATA_TABLE_SQL;
+import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.DEFAULT_ENCODING;
+import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.DEFAULT_TABLE_COMPRESSION;
 import static org.apache.hadoop.metrics2.sink.timeline.TimelineMetricUtils.getJavaMetricPatterns;
 import static org.apache.hadoop.metrics2.sink.timeline.TimelineMetricUtils.getJavaRegexFromSqlRegex;
 
@@ -67,18 +78,18 @@ public class TimelineMetricMetadataManager {
   private static final Log LOG = LogFactory.getLog(TimelineMetricMetadataManager.class);
   // Cache all metadata on retrieval
   private final Map<TimelineMetricMetadataKey, TimelineMetricMetadata> METADATA_CACHE = new ConcurrentHashMap<>();
-  private final Map<String, TimelineMetricMetadataKey> uuidKeyMap = new ConcurrentHashMap<>();
+  private final Map<TimelineMetricUuid, TimelineMetricMetadataKey> uuidKeyMap = new ConcurrentHashMap<>();
   // Map to lookup apps on a host
   private final Map<String, TimelineMetricHostMetadata> HOSTED_APPS_MAP = new ConcurrentHashMap<>();
-  private final Map<String, String> uuidHostMap = new ConcurrentHashMap<>();
+  private final Map<TimelineMetricUuid, String> uuidHostMap = new ConcurrentHashMap<>();
   private final Map<String, Set<String>> INSTANCE_HOST_MAP = new ConcurrentHashMap<>();
   // Sync only when needed
   AtomicBoolean SYNC_HOSTED_APPS_METADATA = new AtomicBoolean(false);
   AtomicBoolean SYNC_HOSTED_INSTANCES_METADATA = new AtomicBoolean(false);
 
-  private MetricUuidGenStrategy uuidGenStrategy = new HashBasedUuidGenStrategy();
+  private MetricUuidGenStrategy uuidGenStrategy = new Murmur3HashUuidGenStrategy();
   public static final int TIMELINE_METRIC_UUID_LENGTH = 16;
-  public static final int HOSTNAME_UUID_LENGTH = 16;
+  public static int HOSTNAME_UUID_LENGTH = 4;
 
   //Transient metric patterns. No UUID management and aggregation for such metrics.
   private List<String> transientMetricPatterns = new ArrayList<>();
@@ -120,7 +131,54 @@ public class TimelineMetricMetadataManager {
    * Initialize Metadata from the store
    */
   public void initializeMetadata() {
-    metricMetadataSync = new TimelineMetricMetadataSync(this);
+
+    //Create metadata schema
+    Connection conn = null;
+    Statement stmt = null;
+
+    String encoding = metricsConf.get(HBASE_ENCODING_SCHEME, DEFAULT_ENCODING);
+    String compression = metricsConf.get(HBASE_COMPRESSION_SCHEME, DEFAULT_TABLE_COMPRESSION);
+
+    try {
+      LOG.info("Initializing metrics metadata schema...");
+      conn = hBaseAccessor.getConnectionRetryingOnException();
+      stmt = conn.createStatement();
+
+      // Metadata
+      String metadataSql = String.format(CREATE_METRICS_METADATA_TABLE_SQL,
+        encoding, compression);
+      stmt.executeUpdate(metadataSql);
+
+      String hostedAppSql = String.format(CREATE_HOSTED_APPS_METADATA_TABLE_SQL,
+        encoding, compression);
+      stmt.executeUpdate(hostedAppSql);
+
+      //Host Instances table
+      String hostedInstancesSql = String.format(CREATE_INSTANCE_HOST_TABLE_SQL,
+        encoding, compression);
+      stmt.executeUpdate(hostedInstancesSql);
+    } catch (SQLException | InterruptedException sql) {
+      LOG.error("Error creating Metrics Schema in HBase using Phoenix.", sql);
+      throw new MetricsSystemInitializationException(
+        "Error creating Metrics Metadata Schema in HBase using Phoenix.", sql);
+    } finally {
+      if (stmt != null) {
+        try {
+          stmt.close();
+        } catch (SQLException e) {
+          // Ignore
+        }
+      }
+      if (conn != null) {
+        try {
+          conn.close();
+        } catch (SQLException e) {
+          // Ignore
+        }
+      }
+    }
+
+      metricMetadataSync = new TimelineMetricMetadataSync(this);
     // Schedule the executor to sync to store
     executorService.scheduleWithFixedDelay(metricMetadataSync,
       metricsConf.getInt(METRICS_METADATA_SYNC_INIT_DELAY, 120), // 2 minutes
@@ -335,14 +393,26 @@ public class TimelineMetricMetadataManager {
     for (TimelineMetricMetadataKey key : METADATA_CACHE.keySet()) {
       TimelineMetricMetadata timelineMetricMetadata = METADATA_CACHE.get(key);
       if (timelineMetricMetadata != null && timelineMetricMetadata.getUuid() != null) {
-        uuidKeyMap.put(new String(timelineMetricMetadata.getUuid()), key);
+        uuidKeyMap.put(new TimelineMetricUuid(timelineMetricMetadata.getUuid()), key);
+      }
+    }
+
+    if (!HOSTED_APPS_MAP.isEmpty()) {
+      Map.Entry<String, TimelineMetricHostMetadata> entry = HOSTED_APPS_MAP.entrySet().iterator().next();
+      TimelineMetricHostMetadata timelineMetricHostMetadata = entry.getValue();
+      if (timelineMetricHostMetadata.getUuid() != null  && timelineMetricHostMetadata.getUuid().length == 16) {
+        HOSTNAME_UUID_LENGTH = 16;
+        uuidGenStrategy = new MD5UuidGenStrategy();
+      } else {
+        HOSTNAME_UUID_LENGTH = 4;
+        uuidGenStrategy = new Murmur3HashUuidGenStrategy();
       }
     }
 
     for (String host : HOSTED_APPS_MAP.keySet()) {
       TimelineMetricHostMetadata timelineMetricHostMetadata = HOSTED_APPS_MAP.get(host);
       if (timelineMetricHostMetadata != null && timelineMetricHostMetadata.getUuid() != null) {
-        uuidHostMap.put(new String(timelineMetricHostMetadata.getUuid()), host);
+        uuidHostMap.put(new TimelineMetricUuid(timelineMetricHostMetadata.getUuid()), host);
       }
     }
   }
@@ -354,11 +424,11 @@ public class TimelineMetricMetadataManager {
    */
   private MetricUuidGenStrategy getUuidStrategy(Configuration configuration) {
     String strategy = configuration.get(TIMELINE_METRICS_UUID_GEN_STRATEGY, "");
-    if ("hash".equalsIgnoreCase(strategy)) {
-      return new HashBasedUuidGenStrategy();
+    if ("md5".equalsIgnoreCase(strategy)){
+      return new MD5UuidGenStrategy();
     } else {
       //Default
-      return new MD5UuidGenStrategy();
+      return new Murmur3HashUuidGenStrategy();
     }
   }
 
@@ -379,14 +449,13 @@ public class TimelineMetricMetadataManager {
     }
 
     if (!createIfNotPresent) {
-      LOG.warn("UUID not found for " + hostname + ", createIfNotPresent is false");
+      LOG.debug("UUID not found for " + hostname + ", createIfNotPresent is false");
       return null;
     }
 
     byte[] uuid = uuidGenStrategy.computeUuid(hostname, HOSTNAME_UUID_LENGTH);
-    String uuidStr = new String(uuid);
-    if (uuidHostMap.containsKey(uuidStr)) {
-      LOG.error("Duplicate key computed for " + hostname +", Collides with  " + uuidHostMap.get(uuidStr));
+    if (uuidHostMap.containsKey(new TimelineMetricUuid(uuid))) {
+      LOG.error("Duplicate key computed for " + hostname +", Collides with  " + uuidHostMap.get(uuid));
       return null;
     }
 
@@ -395,7 +464,7 @@ public class TimelineMetricMetadataManager {
       HOSTED_APPS_MAP.put(hostname, timelineMetricHostMetadata);
     }
     timelineMetricHostMetadata.setUuid(uuid);
-    uuidHostMap.put(uuidStr, hostname);
+    uuidHostMap.put(new TimelineMetricUuid(uuid), hostname);
 
     return uuid;
   }
@@ -420,17 +489,16 @@ public class TimelineMetricMetadataManager {
     }
 
     if (!createIfNotPresent) {
-      LOG.warn("UUID not found for " + key + ", createIfNotPresent is false");
+      LOG.debug("UUID not found for " + key + ", createIfNotPresent is false");
       return null;
     }
 
-    byte[] uuid = uuidGenStrategy.computeUuid(timelineClusterMetric, TIMELINE_METRIC_UUID_LENGTH);
+    byte[] uuidBytes = uuidGenStrategy.computeUuid(timelineClusterMetric, TIMELINE_METRIC_UUID_LENGTH);
 
-    String uuidStr = new String(uuid);
-    if (uuidKeyMap.containsKey(uuidStr) && !uuidKeyMap.get(uuidStr).equals(key)) {
-      TimelineMetricMetadataKey collidingKey = (TimelineMetricMetadataKey)uuidKeyMap.get(uuidStr);
-      LOG.error("Duplicate key " + Arrays.toString(uuid) + "(" + uuid +  ") computed for " + timelineClusterMetric.toString()
-        + ", Collides with  " + collidingKey.toString());
+    TimelineMetricUuid uuid = new TimelineMetricUuid(uuidBytes);
+    if (uuidKeyMap.containsKey(uuid) && !uuidKeyMap.get(uuid).equals(key)) {
+      TimelineMetricMetadataKey collidingKey = uuidKeyMap.get(uuid);
+      LOG.error("Duplicate key " + uuid + " computed for " + timelineClusterMetric + ", Collides with  " + collidingKey);
       return null;
     }
 
@@ -442,10 +510,10 @@ public class TimelineMetricMetadataManager {
       METADATA_CACHE.put(key, timelineMetricMetadata);
     }
 
-    timelineMetricMetadata.setUuid(uuid);
+    timelineMetricMetadata.setUuid(uuid.uuid);
     timelineMetricMetadata.setIsPersisted(false);
-    uuidKeyMap.put(uuidStr, key);
-    return uuid;
+    uuidKeyMap.put(uuid, key);
+    return uuid.uuid;
   }
 
   /**
@@ -484,14 +552,14 @@ public class TimelineMetricMetadataManager {
     return metricUuid;
   }
 
-  public String getMetricNameFromUuid(byte[]  uuid) {
+  public String getMetricNameFromUuid(byte[] uuid) {
 
     byte[] metricUuid = uuid;
     if (uuid.length == TIMELINE_METRIC_UUID_LENGTH + HOSTNAME_UUID_LENGTH) {
       metricUuid = ArrayUtils.subarray(uuid, 0, TIMELINE_METRIC_UUID_LENGTH);
     }
 
-    TimelineMetricMetadataKey key = uuidKeyMap.get(new String(metricUuid));
+    TimelineMetricMetadataKey key = uuidKeyMap.get(new TimelineMetricUuid(metricUuid));
     return key != null ? key.getMetricName() : null;
   }
 
@@ -506,11 +574,11 @@ public class TimelineMetricMetadataManager {
     }
 
     if (uuid.length == TIMELINE_METRIC_UUID_LENGTH) {
-      TimelineMetricMetadataKey key = uuidKeyMap.get(new String(uuid));
+      TimelineMetricMetadataKey key = uuidKeyMap.get(new TimelineMetricUuid(uuid));
       return key != null ? new TimelineMetric(key.metricName, null, key.appId, key.instanceId) : null;
     } else {
       byte[] metricUuid = ArrayUtils.subarray(uuid, 0, TIMELINE_METRIC_UUID_LENGTH);
-      TimelineMetricMetadataKey key = uuidKeyMap.get(new String(metricUuid));
+      TimelineMetricMetadataKey key = uuidKeyMap.get(new TimelineMetricUuid(metricUuid));
       if (key == null) {
         LOG.error("TimelineMetricMetadataKey is null for : " + Arrays.toString(uuid));
         return null;
@@ -521,7 +589,7 @@ public class TimelineMetricMetadataManager {
       timelineMetric.setInstanceId(key.instanceId);
 
       byte[] hostUuid = ArrayUtils.subarray(uuid, TIMELINE_METRIC_UUID_LENGTH, HOSTNAME_UUID_LENGTH + TIMELINE_METRIC_UUID_LENGTH);
-      timelineMetric.setHostName(uuidHostMap.get(new String(hostUuid)));
+      timelineMetric.setHostName(uuidHostMap.get(new TimelineMetricUuid(hostUuid)));
       return timelineMetric;
     }
   }
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/query/PhoenixTransactSQL.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/query/PhoenixTransactSQL.java
index f76933a..e0cc642 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/query/PhoenixTransactSQL.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/query/PhoenixTransactSQL.java
@@ -43,8 +43,11 @@ public class PhoenixTransactSQL {
   /**
    * Create table to store individual metric records.
    */
+
+  public static final String METRICS_RECORD_TABLE_NAME = "METRIC_RECORD_UUID";
+
   public static final String CREATE_METRICS_TABLE_SQL = "CREATE TABLE IF NOT " +
-    "EXISTS METRIC_RECORD_UUID (UUID BINARY(32) NOT NULL, " +
+    "EXISTS " + METRICS_RECORD_TABLE_NAME + " (UUID BINARY(20) NOT NULL, " +
     "SERVER_TIME BIGINT NOT NULL, " +
     "METRIC_SUM DOUBLE, " +
     "METRIC_COUNT UNSIGNED_INT, " +
@@ -83,7 +86,7 @@ public class PhoenixTransactSQL {
 
   public static final String CREATE_METRICS_AGGREGATE_TABLE_SQL =
     "CREATE TABLE IF NOT EXISTS %s " +
-      "(UUID BINARY(32) NOT NULL, " +
+      "(UUID BINARY(20) NOT NULL, " +
       "SERVER_TIME BIGINT NOT NULL, " +
       "METRIC_SUM DOUBLE," +
       "METRIC_COUNT UNSIGNED_INT, " +
@@ -155,7 +158,7 @@ public class PhoenixTransactSQL {
 
   public static final String CREATE_HOSTED_APPS_METADATA_TABLE_SQL =
     "CREATE TABLE IF NOT EXISTS HOSTED_APPS_METADATA_UUID " +
-      "(HOSTNAME VARCHAR, UUID BINARY(16), APP_IDS VARCHAR, " +
+      "(HOSTNAME VARCHAR, UUID BINARY(4), APP_IDS VARCHAR, " +
       "CONSTRAINT pk PRIMARY KEY (HOSTNAME))" +
       "DATA_BLOCK_ENCODING='%s', COMPRESSION='%s'";
 
@@ -165,9 +168,6 @@ public class PhoenixTransactSQL {
       "CONSTRAINT pk PRIMARY KEY (INSTANCE_ID, HOSTNAME))" +
       "DATA_BLOCK_ENCODING='%s', COMPRESSION='%s'";
 
-  public static final String ALTER_METRICS_METADATA_TABLE =
-    "ALTER TABLE METRICS_METADATA_UUID ADD IF NOT EXISTS IS_WHITELISTED BOOLEAN";
-
   ////////////////////////////////
 
   /**
@@ -442,8 +442,6 @@ public class PhoenixTransactSQL {
   public static final String METRICS_CLUSTER_AGGREGATE_DAILY_V1_TABLE_NAME =
     "METRIC_AGGREGATE_DAILY";
 
-  public static final String METRICS_RECORD_TABLE_NAME = "METRIC_RECORD_UUID";
-
   public static final String METRICS_AGGREGATE_MINUTE_TABLE_NAME =
     "METRIC_RECORD_MINUTE_UUID";
   public static final String METRICS_AGGREGATE_HOURLY_TABLE_NAME =
@@ -459,7 +457,7 @@ public class PhoenixTransactSQL {
   public static final String METRICS_CLUSTER_AGGREGATE_DAILY_TABLE_NAME =
     "METRIC_AGGREGATE_DAILY_UUID";
 
-  public static final Pattern PHOENIX_TABLES_REGEX_PATTERN = Pattern.compile("METRIC_.*_UUID");
+  public static final Pattern PHOENIX_TABLES_REGEX_PATTERN = Pattern.compile("METRIC_.*");
 
   public static final String[] PHOENIX_TABLES = {
     METRICS_RECORD_TABLE_NAME,
@@ -469,7 +467,9 @@ public class PhoenixTransactSQL {
     METRICS_CLUSTER_AGGREGATE_TABLE_NAME,
     METRICS_CLUSTER_AGGREGATE_MINUTE_TABLE_NAME,
     METRICS_CLUSTER_AGGREGATE_HOURLY_TABLE_NAME,
-    METRICS_CLUSTER_AGGREGATE_DAILY_TABLE_NAME
+    METRICS_CLUSTER_AGGREGATE_DAILY_TABLE_NAME,
+    METRIC_TRANSIENT_TABLE_NAME,
+    CONTAINER_METRICS_TABLE_NAME
   };
 
   public static final String DEFAULT_TABLE_COMPRESSION = "SNAPPY";
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/uuid/Murmur3HashUuidGenStrategy.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/uuid/Murmur3HashUuidGenStrategy.java
new file mode 100644
index 0000000..9418aa4
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/uuid/Murmur3HashUuidGenStrategy.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES   OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.metrics.core.timeline.uuid;
+
+import com.google.common.hash.Hashing;
+import org.apache.ambari.metrics.core.timeline.aggregators.TimelineClusterMetric;
+import org.apache.commons.lang.StringUtils;
+
+public class Murmur3HashUuidGenStrategy implements MetricUuidGenStrategy{
+
+  @Override
+  public byte[] computeUuid(TimelineClusterMetric timelineClusterMetric, int maxLength) {
+
+    String metricString = timelineClusterMetric.getMetricName() + timelineClusterMetric.getAppId();
+    if (StringUtils.isNotEmpty(timelineClusterMetric.getInstanceId())) {
+      metricString += timelineClusterMetric.getInstanceId();
+    }
+    byte[] metricBytes = metricString.getBytes();
+    return Hashing.murmur3_128().hashBytes(metricBytes).asBytes();
+  }
+
+  @Override
+  public byte[] computeUuid(String value, int maxLength) {
+    byte[] valueBytes = value.getBytes();
+    return Hashing.murmur3_32().hashBytes(valueBytes).asBytes();
+  }
+}
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/uuid/TimelineMetricUuid.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/uuid/TimelineMetricUuid.java
new file mode 100644
index 0000000..7907ff6
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/uuid/TimelineMetricUuid.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.metrics.core.timeline.uuid;
+
+import java.util.Arrays;
+
+public class TimelineMetricUuid {
+  public byte[] uuid;
+
+  public TimelineMetricUuid(byte[] uuid) {
+    this.uuid = uuid;
+  }
+
+  @Override
+  public int hashCode() {
+    return Arrays.hashCode(uuid);
+  }
+
+  @Override
+  public boolean equals(Object o) {
+
+    if (this == o) {
+      return false;
+    }
+
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
+
+    TimelineMetricUuid that = (TimelineMetricUuid) o;
+
+    return Arrays.equals(this.uuid, that.uuid);
+  }
+
+  @Override
+  public String toString() {
+    return Arrays.toString(uuid);
+  }
+}
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/webapp/TimelineWebServices.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/webapp/TimelineWebServices.java
index 9c88b1a..3bcbaf6 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/webapp/TimelineWebServices.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/webapp/TimelineWebServices.java
@@ -144,8 +144,8 @@ public class TimelineWebServices {
       // TODO: Check ACLs for MetricEntity using the TimelineACLManager.
       // TODO: Save owner of the MetricEntity.
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Storing metrics: " +
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("Storing metrics: " +
           TimelineUtils.dumpTimelineRecordtoJSON(metrics, true));
       }
 
@@ -175,8 +175,8 @@ public class TimelineWebServices {
     }
 
     try {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Storing aggregated metrics: " +
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("Storing aggregated metrics: " +
                 TimelineUtils.dumpTimelineRecordtoJSON(metrics, true));
       }
 
@@ -200,8 +200,8 @@ public class TimelineWebServices {
     }
 
     try {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Storing container metrics: " + TimelineUtils
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("Storing container metrics: " + TimelineUtils
             .dumpTimelineRecordtoJSON(metrics, true));
       }
 
@@ -250,8 +250,8 @@ public class TimelineWebServices {
   ) {
     init(res);
     try {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Request for metrics => metricNames: " + metricNames + ", " +
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("Request for metrics => metricNames: " + metricNames + ", " +
           "appId: " + appId + ", instanceId: " + instanceId + ", " +
           "hostname: " + hostname + ", startTime: " + startTime + ", " +
           "endTime: " + endTime + ", " +
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/AMSSMOKETESTFAKE.dat b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/AMSSMOKETESTFAKE.dat
new file mode 100644
index 0000000..f5c181a
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/AMSSMOKETESTFAKE.dat
@@ -0,0 +1 @@
+AMBARI_METRICS.SmokeTest.FakeMetric
\ No newline at end of file
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/MASTER_HBASE.dat b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/HBASE_MASTER.dat
similarity index 100%
rename from ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/MASTER_HBASE.dat
rename to ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/HBASE_MASTER.dat
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/SLAVE_HBASE.dat b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/HBASE_REGIONSERVER.dat
similarity index 100%
rename from ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/SLAVE_HBASE.dat
rename to ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/HBASE_REGIONSERVER.dat
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/TIMELINE_METRIC_STORE_WATCHER.dat b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/TIMELINE_METRIC_STORE_WATCHER.dat
new file mode 100644
index 0000000..af73d02
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/TIMELINE_METRIC_STORE_WATCHER.dat
@@ -0,0 +1 @@
+TimelineMetricStoreWatcher.FakeMetric
\ No newline at end of file
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/AbstractMiniHBaseClusterTest.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/AbstractMiniHBaseClusterTest.java
index 258054c..26078cb 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/AbstractMiniHBaseClusterTest.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/AbstractMiniHBaseClusterTest.java
@@ -18,6 +18,7 @@
 package org.apache.ambari.metrics.core.timeline;
 
 import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.OUT_OFF_BAND_DATA_TIME_ALLOWANCE;
+import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.METRICS_CLUSTER_AGGREGATE_TABLE_NAME;
 import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.METRICS_RECORD_TABLE_NAME;
 import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.UPSERT_METRICS_SQL;
 import static org.apache.phoenix.end2end.ParallelStatsDisabledIT.tearDownMiniCluster;
@@ -100,8 +101,12 @@ public abstract class AbstractMiniHBaseClusterTest extends BaseTest {
     // inits connection, starts mini cluster
     conn = getConnection(getUrl());
 
+    Configuration metricsConf = new Configuration();
+    metricsConf.set(TimelineMetricConfiguration.HBASE_COMPRESSION_SCHEME, "NONE");
+
+    metadataManager = new TimelineMetricMetadataManager(metricsConf, hdb);
+    metadataManager.initializeMetadata();
     hdb.initMetricSchema();
-    metadataManager = new TimelineMetricMetadataManager(new Configuration(), hdb);
     hdb.setMetadataInstance(metadataManager);
   }
 
@@ -206,6 +211,8 @@ public abstract class AbstractMiniHBaseClusterTest extends BaseTest {
     metricsConf.set("timeline.metrics.transient.metric.patterns", "topology%");
     // Unit tests insert values into the future
     metricsConf.setLong(OUT_OFF_BAND_DATA_TIME_ALLOWANCE, 600000);
+    metricsConf.set("timeline.metrics." + METRICS_RECORD_TABLE_NAME + ".durability", "SKIP_WAL");
+    metricsConf.set("timeline.metrics." + METRICS_CLUSTER_AGGREGATE_TABLE_NAME + ".durability", "ASYNC_WAL");
 
     return
       new PhoenixHBaseAccessor(new TimelineMetricConfiguration(new Configuration(), metricsConf),
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/ITPhoenixHBaseAccessor.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/ITPhoenixHBaseAccessor.java
index 65b5a1b..20fbc58 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/ITPhoenixHBaseAccessor.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/ITPhoenixHBaseAccessor.java
@@ -19,11 +19,13 @@ package org.apache.ambari.metrics.core.timeline;
 
 import static junit.framework.Assert.assertEquals;
 import static junit.framework.Assert.assertTrue;
-import static org.apache.ambari.metrics.core.timeline.PhoenixHBaseAccessor.DATE_TIERED_COMPACTION_POLICY;
-import static org.apache.ambari.metrics.core.timeline.PhoenixHBaseAccessor.FIFO_COMPACTION_POLICY_CLASS;
-import static org.apache.ambari.metrics.core.timeline.PhoenixHBaseAccessor.HSTORE_COMPACTION_CLASS_KEY;
-import static org.apache.ambari.metrics.core.timeline.PhoenixHBaseAccessor.HSTORE_ENGINE_CLASS;
+import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.DATE_TIERED_COMPACTION_POLICY;
+import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.FIFO_COMPACTION_POLICY_CLASS;
+import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.HSTORE_COMPACTION_CLASS_KEY;
+import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.HSTORE_ENGINE_CLASS;
+import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.CONTAINER_METRICS_TABLE_NAME;
 import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.METRICS_AGGREGATE_MINUTE_TABLE_NAME;
+import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.METRICS_CLUSTER_AGGREGATE_TABLE_NAME;
 import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.METRICS_RECORD_TABLE_NAME;
 import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.PHOENIX_TABLES;
 import static org.apache.ambari.metrics.core.timeline.query.PhoenixTransactSQL.PHOENIX_TABLES_REGEX_PATTERN;
@@ -42,6 +44,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Optional;
 
+import org.apache.commons.lang.ArrayUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
@@ -367,28 +370,30 @@ public class ITPhoenixHBaseAccessor extends AbstractMiniHBaseClusterTest {
     precisionValues.put(METRICS_RECORD_TABLE_NAME, precisionTtl);
     f.set(hdb, precisionValues);
 
-    Field f2 = PhoenixHBaseAccessor.class.getDeclaredField("timelineMetricsTablesDurability");
-    f2.setAccessible(true);
-    f2.set(hdb, "ASYNC_WAL");
-
     hdb.initPoliciesAndTTL();
 
     // Verify expected policies are set
     boolean normalizerEnabled = false;
     String precisionTableCompactionPolicy = null;
     String aggregateTableCompactionPolicy = null;
-    boolean tableDurabilitySet  = false;
-    for (int i = 0; i < 10; i++) {
+    boolean precisionTableDurabilitySet  = false;
+    boolean aggregateTableDurabilitySet  = false;
+
+    boolean isComplete = false;
+
+    for (int i = 0; i < 10 && !isComplete; i++) {
       LOG.warn("Policy check retry : " + i);
       for (String tableName : PHOENIX_TABLES) {
         TableName[] tableNames = hBaseAdmin.listTableNames(PHOENIX_TABLES_REGEX_PATTERN, false);
+        TableName[] containerMetricsTableName = hBaseAdmin.listTableNames(CONTAINER_METRICS_TABLE_NAME, false);
+        tableNames = (TableName[]) ArrayUtils.addAll(tableNames, containerMetricsTableName);
+
         Optional<TableName> tableNameOptional = Arrays.stream(tableNames)
           .filter(t -> tableName.equals(t.getNameAsString())).findFirst();
 
         TableDescriptor tableDescriptor = hBaseAdmin.getTableDescriptor(tableNameOptional.get());
         
         normalizerEnabled = tableDescriptor.isNormalizationEnabled();
-        tableDurabilitySet = (Durability.ASYNC_WAL.equals(tableDescriptor.getDurability()));
         if (tableName.equals(METRICS_RECORD_TABLE_NAME)) {
           precisionTableCompactionPolicy = tableDescriptor.getValue(HSTORE_COMPACTION_CLASS_KEY);
         } else {
@@ -398,17 +403,25 @@ public class ITPhoenixHBaseAccessor extends AbstractMiniHBaseClusterTest {
         // Best effort for 20 seconds
         if (normalizerEnabled || (precisionTableCompactionPolicy == null && aggregateTableCompactionPolicy == null)) {
           Thread.sleep(2000l);
+        } else {
+          isComplete = true;
         }
         if (tableName.equals(METRICS_RECORD_TABLE_NAME)) {
+          precisionTableDurabilitySet = (Durability.SKIP_WAL.equals(tableDescriptor.getDurability()));
           for (ColumnFamilyDescriptor family : tableDescriptor.getColumnFamilies()) {
             precisionTtl = family.getTimeToLive();
           }
         }
+
+        if (tableName.equals(METRICS_CLUSTER_AGGREGATE_TABLE_NAME)) {
+          aggregateTableDurabilitySet = (Durability.ASYNC_WAL.equals(tableDescriptor.getDurability()));
+        }
       }
     }
 
     Assert.assertFalse("Normalizer disabled.", normalizerEnabled);
-    Assert.assertTrue("Durability Set.", tableDurabilitySet);
+    Assert.assertTrue("METRIC_RECORD_UUID Durability Set.", precisionTableDurabilitySet);
+    Assert.assertTrue("METRIC_AGGREGATE_UUID Durability Set.", aggregateTableDurabilitySet);
     Assert.assertEquals("FIFO compaction policy is set for METRIC_RECORD_UUID.", FIFO_COMPACTION_POLICY_CLASS, precisionTableCompactionPolicy);
     Assert.assertEquals("FIFO compaction policy is set for aggregate tables", DATE_TIERED_COMPACTION_POLICY, aggregateTableCompactionPolicy);
     Assert.assertEquals("Precision TTL value as expected.", 86400, precisionTtl);
@@ -441,7 +454,7 @@ public class ITPhoenixHBaseAccessor extends AbstractMiniHBaseClusterTest {
     metric.setExitCode(0);
     List<ContainerMetric> list = Arrays.asList(metric);
     hdb.insertContainerMetrics(list);
-    PreparedStatement stmt = conn.prepareStatement("SELECT * FROM CONTAINER_METRICS_UUID");
+    PreparedStatement stmt = conn.prepareStatement("SELECT * FROM CONTAINER_METRICS");
     ResultSet set = stmt.executeQuery();
     // check each filed is set properly when read back.
     boolean foundRecord = false;
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/PhoenixHBaseAccessorTest.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/PhoenixHBaseAccessorTest.java
index 9d1b2a4..63ec59e 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/PhoenixHBaseAccessorTest.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/PhoenixHBaseAccessorTest.java
@@ -109,7 +109,7 @@ public class PhoenixHBaseAccessorTest {
 
     mockStatic(PhoenixTransactSQL.class);
     PreparedStatement preparedStatementMock = EasyMock.createNiceMock(PreparedStatement.class);
-    Condition condition = new DefaultCondition(Collections.singletonList(new byte[32]), metricNames, hostnames, "appid", "instanceid", 123L, 234L, Precision.SECONDS, 10, true);
+    Condition condition = new DefaultCondition(Collections.singletonList(new byte[20]), metricNames, hostnames, "appid", "instanceid", 123L, 234L, Precision.SECONDS, 10, true);
     expect(PhoenixTransactSQL.prepareGetMetricsSqlStmt(null, condition)).andReturn(preparedStatementMock).once();
     ResultSet rsMock = EasyMock.createNiceMock(ResultSet.class);
     expect(preparedStatementMock.executeQuery()).andReturn(rsMock);
@@ -138,7 +138,7 @@ public class PhoenixHBaseAccessorTest {
 
     mockStatic(PhoenixTransactSQL.class);
     PreparedStatement preparedStatementMock = EasyMock.createNiceMock(PreparedStatement.class);
-    Condition condition = new DefaultCondition(Collections.singletonList(new byte[32]), metricNames, hostnames, "appid", "instanceid", 123L, 234L, Precision.SECONDS, 10, true);
+    Condition condition = new DefaultCondition(Collections.singletonList(new byte[20]), metricNames, hostnames, "appid", "instanceid", 123L, 234L, Precision.SECONDS, 10, true);
     expect(PhoenixTransactSQL.prepareGetMetricsSqlStmt(null, condition)).andReturn(preparedStatementMock).once();
     ResultSet rsMock = EasyMock.createNiceMock(ResultSet.class);
     RuntimeException runtimeException = EasyMock.createNiceMock(RuntimeException.class);
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/TimelineMetricSplitPointComputerTest.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/TimelineMetricSplitPointComputerTest.java
new file mode 100644
index 0000000..4d663cc
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/TimelineMetricSplitPointComputerTest.java
@@ -0,0 +1,138 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.metrics.core.timeline;
+
+import org.apache.ambari.metrics.core.timeline.aggregators.TimelineClusterMetric;
+import org.apache.ambari.metrics.core.timeline.discovery.TimelineMetricMetadataManager;
+import org.apache.hadoop.conf.Configuration;
+import org.easymock.EasyMock;
+import org.junit.Assert;
+import org.junit.Test;
+
+import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TIMELINE_METRIC_INITIAL_CONFIGURED_MASTER_COMPONENTS;
+import static org.apache.ambari.metrics.core.timeline.TimelineMetricConfiguration.TIMELINE_METRIC_INITIAL_CONFIGURED_SLAVE_COMPONENTS;
+import static org.easymock.EasyMock.anyBoolean;
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+
+
+public class TimelineMetricSplitPointComputerTest {
+
+  @Test
+  public void testSplitPointComputationForBasicCluster() {
+
+    /**
+     *  HBase Total heap = 1G.
+     *  HDFS,HBASE,YARN services deployed.
+     */
+    Configuration metricsConfMock = EasyMock.createMock(Configuration.class);
+
+    expect(metricsConfMock.get(TIMELINE_METRIC_INITIAL_CONFIGURED_MASTER_COMPONENTS, "")).
+      andReturn("METRICS_COLLECTOR,AMBARI_SERVER,NAMENODE,RESOURCEMANAGER").once();
+    expect(metricsConfMock.get(TIMELINE_METRIC_INITIAL_CONFIGURED_SLAVE_COMPONENTS, "")).
+      andReturn("METRICS_MONITOR,DATANODE,NODEMANAGER,HBASE_REGIONSERVER").once();
+    expect(metricsConfMock.getDouble("hbase_total_heapsize", 1024*1024*1024)).andReturn(1024 * 1024 * 1024.0).once();
+
+    Configuration hbaseConfMock = EasyMock.createMock(Configuration.class);
+    expect(hbaseConfMock.getDouble("hbase.regionserver.global.memstore.upperLimit", 0.5)).andReturn(0.5).once();
+    expect(hbaseConfMock.getDouble("hbase.hregion.memstore.flush.size", 134217728)).andReturn(134217728.0).once();
+
+    TimelineMetricMetadataManager metricMetadataManagerMock = EasyMock.createNiceMock(TimelineMetricMetadataManager.class);
+    expect(metricMetadataManagerMock.getUuid(anyObject(TimelineClusterMetric.class), anyBoolean())).andReturn(new byte[16]);
+
+    replay(metricsConfMock, hbaseConfMock, metricMetadataManagerMock);
+
+    TimelineMetricSplitPointComputer timelineMetricSplitPointComputer = new TimelineMetricSplitPointComputer(metricsConfMock,
+      hbaseConfMock,
+      metricMetadataManagerMock);
+
+    Assert.assertEquals(timelineMetricSplitPointComputer.getPrecisionSplitPoints().size(), 3);
+    Assert.assertEquals(timelineMetricSplitPointComputer.getClusterAggregateSplitPoints().size(), 1);
+    Assert.assertEquals(timelineMetricSplitPointComputer.getHostAggregateSplitPoints().size(), 1);
+  }
+
+  @Test
+  public void testSplitPointComputationForMediumCluster() {
+
+    /**
+     *  HBase Total heap = 8G.
+     *  All services deployed.
+     */
+    Configuration metricsConfMock = EasyMock.createMock(Configuration.class);
+
+    expect(metricsConfMock.get(TIMELINE_METRIC_INITIAL_CONFIGURED_MASTER_COMPONENTS, "")).
+      andReturn("METRICS_COLLECTOR,AMBARI_SERVER,NAMENODE,RESOURCEMANAGER," +
+        "NIMBUS,HIVESERVER2,HIVEMETASTORE,HBASE_MASTER,KAFKA_BROKER").once();
+    expect(metricsConfMock.get(TIMELINE_METRIC_INITIAL_CONFIGURED_SLAVE_COMPONENTS, "")).
+      andReturn("METRICS_MONITOR,DATANODE,NODEMANAGER,HBASE_REGIONSERVER").once();
+    expect(metricsConfMock.getDouble("hbase_total_heapsize", 1024*1024*1024)).andReturn(8589934592.0).once();
+
+    Configuration hbaseConfMock = EasyMock.createMock(Configuration.class);
+    expect(hbaseConfMock.getDouble("hbase.regionserver.global.memstore.upperLimit", 0.5)).andReturn(0.5).once();
+    expect(hbaseConfMock.getDouble("hbase.hregion.memstore.flush.size", 134217728)).andReturn(134217728.0).once();
+
+    TimelineMetricMetadataManager metricMetadataManagerMock = EasyMock.createNiceMock(TimelineMetricMetadataManager.class);
+    expect(metricMetadataManagerMock.getUuid(anyObject(TimelineClusterMetric.class), anyBoolean())).andReturn(new byte[16]);
+
+    replay(metricsConfMock, hbaseConfMock, metricMetadataManagerMock);
+
+    TimelineMetricSplitPointComputer timelineMetricSplitPointComputer = new TimelineMetricSplitPointComputer(metricsConfMock,
+      hbaseConfMock,
+      metricMetadataManagerMock);
+
+    Assert.assertEquals(timelineMetricSplitPointComputer.getPrecisionSplitPoints().size(), 16);
+    Assert.assertEquals(timelineMetricSplitPointComputer.getClusterAggregateSplitPoints().size(), 3);
+    Assert.assertEquals(timelineMetricSplitPointComputer.getHostAggregateSplitPoints().size(), 3);
+  }
+
+  @Test
+  public void testSplitPointComputationForLargeCluster() {
+
+    /**
+     *  HBase Total heap = 24G.
+     *  All services deployed.
+     */
+    Configuration metricsConfMock = EasyMock.createMock(Configuration.class);
+
+    expect(metricsConfMock.get(TIMELINE_METRIC_INITIAL_CONFIGURED_MASTER_COMPONENTS, "")).
+      andReturn("METRICS_COLLECTOR,AMBARI_SERVER,NAMENODE,RESOURCEMANAGER," +
+        "NIMBUS,HIVESERVER2,HIVEMETASTORE,HBASE_MASTER,KAFKA_BROKER").once();
+    expect(metricsConfMock.get(TIMELINE_METRIC_INITIAL_CONFIGURED_SLAVE_COMPONENTS, "")).
+      andReturn("METRICS_MONITOR,DATANODE,NODEMANAGER,HBASE_REGIONSERVER").once();
+    expect(metricsConfMock.getDouble("hbase_total_heapsize", 1024*1024*1024)).andReturn(24 * 1024 * 1024 * 1024.0).once();
+
+    Configuration hbaseConfMock = EasyMock.createMock(Configuration.class);
+    expect(hbaseConfMock.getDouble("hbase.regionserver.global.memstore.upperLimit", 0.5)).andReturn(0.5).once();
+    expect(hbaseConfMock.getDouble("hbase.hregion.memstore.flush.size", 134217728)).andReturn(2 * 134217728.0).once();
+
+    TimelineMetricMetadataManager metricMetadataManagerMock = EasyMock.createNiceMock(TimelineMetricMetadataManager.class);
+    expect(metricMetadataManagerMock.getUuid(anyObject(TimelineClusterMetric.class), anyBoolean())).andReturn(new byte[16]);
+
+    replay(metricsConfMock, hbaseConfMock, metricMetadataManagerMock);
+
+    TimelineMetricSplitPointComputer timelineMetricSplitPointComputer = new TimelineMetricSplitPointComputer(metricsConfMock,
+      hbaseConfMock,
+      metricMetadataManagerMock);
+
+    Assert.assertEquals(timelineMetricSplitPointComputer.getPrecisionSplitPoints().size(), 28);
+    Assert.assertEquals(timelineMetricSplitPointComputer.getClusterAggregateSplitPoints().size(), 6);
+    Assert.assertEquals(timelineMetricSplitPointComputer.getHostAggregateSplitPoints().size(), 6);
+  }
+}
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/TimelineMetricStoreWatcherTest.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/TimelineMetricStoreWatcherTest.java
index de0236c..eb64198 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/TimelineMetricStoreWatcherTest.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/TimelineMetricStoreWatcherTest.java
@@ -48,10 +48,10 @@ public class TimelineMetricStoreWatcherTest {
 
   @Test
   public void testRunPositive() throws Exception {
-    TimelineMetricStore metricStore = createNiceMock(TimelineMetricStore.class);
+    HBaseTimelineMetricsService metricStore = createNiceMock(HBaseTimelineMetricsService.class);
 
-    expect(metricStore.putMetrics(anyObject(TimelineMetrics.class)))
-      .andReturn(new TimelinePutResponse());
+    metricStore.putMetricsSkipCache(anyObject(TimelineMetrics.class));
+    expectLastCall().once();
 
     // metric found
     expect(metricStore.getTimelineMetrics(EasyMock.<List<String>>anyObject(),
@@ -75,7 +75,7 @@ public class TimelineMetricStoreWatcherTest {
 
   @Test
   public void testRunNegative() throws Exception {
-    TimelineMetricStore metricStore = createNiceMock(TimelineMetricStore.class);
+    HBaseTimelineMetricsService metricStore = createNiceMock(HBaseTimelineMetricsService.class);
 
     expect(metricStore.putMetrics(anyObject(TimelineMetrics.class)))
       .andReturn(new TimelinePutResponse());
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/discovery/TestMetadataManager.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/discovery/TestMetadataManager.java
index 2f2b0b5..28bb75e 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/discovery/TestMetadataManager.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/discovery/TestMetadataManager.java
@@ -160,7 +160,7 @@ public class TestMetadataManager extends AbstractMiniHBaseClusterTest {
 
     byte[] uuid = metadataManager.getUuid(timelineMetric, true);
     Assert.assertNotNull(uuid);
-    Assert.assertEquals(uuid.length, 32);
+    Assert.assertEquals(uuid.length, 20);
 
     byte[] uuidWithoutHost = metadataManager.getUuid(new TimelineClusterMetric(timelineMetric.getMetricName(), timelineMetric.getAppId(), timelineMetric.getInstanceId(), -1), true);
     Assert.assertNotNull(uuidWithoutHost);
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/uuid/TimelineMetricUuidManagerTest.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/uuid/MetricUuidGenStrategyTest.java
similarity index 59%
rename from ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/uuid/TimelineMetricUuidManagerTest.java
rename to ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/uuid/MetricUuidGenStrategyTest.java
index e4018bb..a25310b 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/uuid/TimelineMetricUuidManagerTest.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/uuid/MetricUuidGenStrategyTest.java
@@ -20,6 +20,7 @@ package org.apache.ambari.metrics.core.timeline.uuid;
 
 import org.apache.ambari.metrics.core.timeline.aggregators.TimelineClusterMetric;
 import org.junit.Assert;
+import org.junit.BeforeClass;
 import org.junit.Ignore;
 import org.junit.Test;
 
@@ -37,39 +38,30 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
-public class TimelineMetricUuidManagerTest {
+public class MetricUuidGenStrategyTest {
 
 
-  private List<String> apps = Arrays.asList("namenode",
-    "datanode", "master_hbase", "slave_hbase", "kafka_broker", "nimbus", "ams-hbase",
+  private static List<String> apps = Arrays.asList("namenode",
+    "datanode", "hbase_master", "hbase_regionserver", "kafka_broker", "nimbus", "ams-hbase",
     "accumulo", "nodemanager", "resourcemanager", "ambari_server", "HOST", "timeline_metric_store_watcher",
-    "jobhistoryserver", "hiveserver2", "hivemetastore", "applicationhistoryserver", "amssmoketestfake");
+    "jobhistoryserver", "hiveserver2", "hivemetastore", "applicationhistoryserver", "amssmoketestfake", "llapdaemon");
 
-  private Map<String, Set<String>> metricSet  = new HashMap<>(populateMetricWhitelistFromFile());
+  private static Map<String, Set<String>> metricSet  = new HashMap<>();
 
-  @Test
-  @Ignore("Collisions possible")
-  public void testHashBasedUuidForMetricName() throws SQLException {
-
-    MetricUuidGenStrategy strategy = new HashBasedUuidGenStrategy();
-    Map<String, TimelineClusterMetric> uuids = new HashMap<>();
-    for (String app : metricSet.keySet()) {
-      Set<String> metrics = metricSet.get(app);
-      for (String metric : metrics) {
-        TimelineClusterMetric timelineClusterMetric = new TimelineClusterMetric(metric, app, null, -1l);
-        byte[] uuid = strategy.computeUuid(timelineClusterMetric, 16);
-        Assert.assertNotNull(uuid);
-        Assert.assertTrue(uuid.length == 16);
-        String uuidStr = new String(uuid);
-        Assert.assertFalse(uuids.containsKey(uuidStr) && !uuids.containsValue(timelineClusterMetric));
-        uuids.put(uuidStr, timelineClusterMetric);
-      }
-    }
+  @BeforeClass
+  public static void init() {
+    metricSet  = new HashMap<>(populateMetricWhitelistFromFile());
   }
 
   @Test
-  public void testHaseBasedUuidForAppIds() throws SQLException {
+  @Ignore
+  public void testHashBasedUuid() throws SQLException {
+    testMetricCollisionsForUuidGenStrategy(new HashBasedUuidGenStrategy(), 16);
+  }
 
+  @Test
+  @Ignore
+  public void testHashBasedUuidForAppIds() throws SQLException {
     MetricUuidGenStrategy strategy = new HashBasedUuidGenStrategy();
     Map<String, TimelineClusterMetric> uuids = new HashMap<>();
     for (String app : metricSet.keySet()) {
@@ -82,53 +74,64 @@ public class TimelineMetricUuidManagerTest {
   }
 
   @Test
+  @Ignore
   public void testHashBasedUuidForHostnames() throws SQLException {
+    testHostCollisionsForUuidGenStrategy(new HashBasedUuidGenStrategy(), 16);
+  }
 
-    MetricUuidGenStrategy strategy = new HashBasedUuidGenStrategy();
-    Map<String, String> uuids = new HashMap<>();
 
-    List<String> hosts = new ArrayList<>();
-    String hostPrefix = "TestHost.";
-    String hostSuffix = ".ambari.apache.org";
+  @Test
+  public void testMD5BasedUuid() throws SQLException {
+    testMetricCollisionsForUuidGenStrategy(new MD5UuidGenStrategy(), 16);
 
-    for (int i=0; i<=2000; i++) {
-      hosts.add(hostPrefix + i + hostSuffix);
-    }
+  }
 
-    for (String host : hosts) {
-      byte[] uuid = strategy.computeUuid(host, 16);
-      Assert.assertNotNull(uuid);
-      Assert.assertTrue(uuid.length == 16);
-      String uuidStr = new String(uuid);
-      Assert.assertFalse(uuids.containsKey(uuidStr));
-      uuids.put(uuidStr, host);
-    }
+  @Test
+  public void testMD5BasedUuidForHostnames() throws SQLException {
+    testHostCollisionsForUuidGenStrategy(new MD5UuidGenStrategy(), 16);
+  }
+
+
+  @Test
+  public void testMD5ConsistentHashing() throws SQLException, InterruptedException {
+    testConsistencyForUuidGenStrategy(new MD5UuidGenStrategy(), 16);
   }
 
 
   @Test
-  public void testRandomUuidForWhitelistedMetrics() throws SQLException {
+  public void testMurmur3HashUuid() throws SQLException {
+    testMetricCollisionsForUuidGenStrategy(new Murmur3HashUuidGenStrategy(), 16);
+  }
 
-    MetricUuidGenStrategy strategy = new MD5UuidGenStrategy();
-    Map<String, String> uuids = new HashMap<>();
+  @Test
+  public void testMurmur3HashingBasedUuidForHostnames() throws SQLException {
+    testHostCollisionsForUuidGenStrategy(new Murmur3HashUuidGenStrategy(), 4);
+  }
+
+  @Test
+  public void testMurmur3ConsistentHashing() throws SQLException, InterruptedException {
+    testConsistencyForUuidGenStrategy(new Murmur3HashUuidGenStrategy(), 4);
+  }
+
+  private void testMetricCollisionsForUuidGenStrategy(MetricUuidGenStrategy strategy, int uuidLength) {
+    Map<TimelineMetricUuid, TimelineClusterMetric> uuids = new HashMap<>();
     for (String app : metricSet.keySet()) {
       Set<String> metrics = metricSet.get(app);
-      for (String metric : metrics) {
-        byte[] uuid = strategy.computeUuid(new TimelineClusterMetric(metric, app, null, -1l), 16);
+      for (String m : metrics) {
+        TimelineClusterMetric metric = new TimelineClusterMetric(m, app, null, -1l);
+        byte[] uuid = strategy.computeUuid(metric, uuidLength);
         Assert.assertNotNull(uuid);
-        Assert.assertTrue(uuid.length == 16);
-        String uuidStr = new String(uuid);
+        Assert.assertTrue(uuid.length == uuidLength);
+        TimelineMetricUuid uuidStr = new TimelineMetricUuid(uuid);
         Assert.assertFalse(uuids.containsKey(uuidStr) && !uuids.containsValue(metric));
         uuids.put(uuidStr, metric);
       }
     }
   }
 
-  @Test
-  public void testRandomUuidForHostnames() throws SQLException {
 
-    MetricUuidGenStrategy strategy = new MD5UuidGenStrategy();
-    Map<String, String> uuids = new HashMap<>();
+  private void testHostCollisionsForUuidGenStrategy(MetricUuidGenStrategy strategy, int uuidLength) {
+    Map<TimelineMetricUuid, String> uuids = new HashMap<>();
 
     List<String> hosts = new ArrayList<>();
     String hostPrefix = "TestHost.";
@@ -138,40 +141,33 @@ public class TimelineMetricUuidManagerTest {
       hosts.add(hostPrefix + i + hostSuffix);
     }
 
-    int numC = 0;
     for (String host : hosts) {
-      byte[] uuid = strategy.computeUuid(host, 16);
+      byte[] uuid = strategy.computeUuid(host, uuidLength);
       Assert.assertNotNull(uuid);
-      Assert.assertTrue(uuid.length == 16);
-      String uuidStr = new String(uuid);
+      Assert.assertTrue(uuid.length == uuidLength);
+      TimelineMetricUuid uuidStr = new TimelineMetricUuid(uuid);
       Assert.assertFalse(uuids.containsKey(uuidStr));
       uuids.put(uuidStr, host);
     }
   }
 
-
-  @Test
-  public void testConsistentHashing() throws SQLException, InterruptedException {
-
-    MetricUuidGenStrategy strategy = new MD5UuidGenStrategy();
+  private void testConsistencyForUuidGenStrategy(MetricUuidGenStrategy strategy, int length) throws InterruptedException {
     String key = "TestString";
 
-    byte[] uuid = strategy.computeUuid(key, 16);
+    byte[] uuid = strategy.computeUuid(key, length);
     Assert.assertNotNull(uuid);
-    Assert.assertTrue(uuid.length == 16);
+    Assert.assertTrue(uuid.length == length);
 
     for (int i = 0; i<100; i++) {
-      byte[] uuid2 = strategy.computeUuid(key, 16);
+      byte[] uuid2 = strategy.computeUuid(key, length);
       Assert.assertNotNull(uuid2);
-      Assert.assertTrue(uuid2.length == 16);
+      Assert.assertTrue(uuid2.length == length);
       Assert.assertArrayEquals(uuid, uuid2);
       Thread.sleep(10);
     }
   }
 
-
-  public Map<String, Set<String>> populateMetricWhitelistFromFile() {
-
+  private static Map<String, Set<String>> populateMetricWhitelistFromFile() {
 
     Map<String, Set<String>> metricSet = new HashMap<String, Set<String>>();
     FileInputStream fstream = null;
@@ -207,7 +203,7 @@ public class TimelineMetricUuidManagerTest {
         }
       }
       metricsForApp.add("live_hosts");
-      if (appId.equals("master_hbase") || appId.equals("slave_hbase")) {
+      if (appId.startsWith("hbase")) {
         hbaseMetrics.addAll(metricsForApp);
       } else {
         metricSet.put(appId, metricsForApp);
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog270.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog270.java
index b7a55ae..a111764 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog270.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog270.java
@@ -1601,10 +1601,25 @@ public class UpgradeCatalog270 extends AbstractUpgradeCatalog {
     if (clusters != null) {
       Map<String, Cluster> clusterMap = clusters.getClusters();
 
-      ConfigHelper configHelper = injector.getInstance(ConfigHelper.class);
       if (clusterMap != null && !clusterMap.isEmpty()) {
         for (final Cluster cluster : clusterMap.values()) {
-          updateConfigurationPropertiesForCluster(cluster, "ams-site", Collections.singletonMap("timeline.metrics.service.default.result.limit", "5760"), true, true);
+          Map<String, String> newProperties = new HashMap<>();
+          newProperties.put("timeline.metrics.service.default.result.limit", "5760");
+
+          Config config = cluster.getDesiredConfigByType("ams-site");
+          if (config != null) {
+            Map<String, String> oldAmsSite = config.getProperties();
+            if (MapUtils.isNotEmpty(oldAmsSite)) {
+              int oldTtl = Integer.parseInt(oldAmsSite.get("timeline.container-metrics.ttl"));
+              if (oldTtl > 14 * 86400) {
+                newProperties.put("timeline.container-metrics.ttl", "1209600");
+              }
+            }
+          }
+          Set<String> removeProperties = Sets.newHashSet("timeline.metrics.host.aggregate.splitpoints",
+            "timeline.metrics.cluster.aggregate.splitpoints");
+          updateConfigurationPropertiesForCluster(cluster, "ams-site", newProperties, removeProperties, true, true);
+
         }
       }
     }
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
index 0ffbf8a..e7eb3cb 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
@@ -541,60 +541,6 @@
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
-    <name>timeline.metrics.host.aggregate.splitpoints</name>
-    <value> </value>
-    <description>
-      Pre-split regions using the split points corresponding to this property
-      for the precision table that stores seconds aggregate data.
-    </description>
-    <depends-on>
-      <property>
-        <type>ams-hbase-site</type>
-        <name>hbase.regionserver.global.memstore.upperLimit</name>
-      </property>
-      <property>
-        <type>ams-hbase-site</type>
-        <name>hbase.hregion.memstore.flush.size</name>
-      </property>
-      <property>
-        <type>ams-hbase-env</type>
-        <name>hbase_master_heapsize</name>
-      </property>
-      <property>
-        <type>ams-hbase-env</type>
-        <name>hbase_regionserver_heapsize</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>timeline.metrics.cluster.aggregate.splitpoints</name>
-    <value> </value>
-    <description>
-      Pre-split regions using the split points corresponding to this property
-      for the aggregate table that stores seconds aggregate data across hosts.
-    </description>
-    <depends-on>
-      <property>
-        <type>ams-hbase-site</type>
-        <name>hbase.regionserver.global.memstore.upperLimit</name>
-      </property>
-      <property>
-        <type>ams-hbase-site</type>
-        <name>hbase.hregion.memstore.flush.size</name>
-      </property>
-      <property>
-        <type>ams-hbase-env</type>
-        <name>hbase_master_heapsize</name>
-      </property>
-      <property>
-        <type>ams-hbase-env</type>
-        <name>hbase_regionserver_heapsize</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
     <name>timeline.metrics.sink.report.interval</name>
     <value>60</value>
     <description>
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/service-metrics/KAFKA.txt b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/service-metrics/KAFKA.txt
old mode 100755
new mode 100644
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
index 73341f1..d0ee66e 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
@@ -226,9 +226,35 @@ def ams(name=None, action=None):
               recursive_ownership = True
     )
 
+    new_ams_site = {}
+    new_ams_site.update(params.config['configurations']['ams-site'])
+    if params.clusterHostInfoDict:
+      master_components = []
+      slave_components = []
+      components = dict(params.clusterHostInfoDict).keys()
+      known_slave_components = ["nodemanager", "metrics_monitor", "datanode", "hbase_regionserver"]
+      for component in components:
+        if component and component.endswith("_hosts"):
+          component_name = component[:-6]
+        elif component and component.endswith("_host"):
+          component_name = component[:-5]
+        else:
+          continue
+        if component_name in known_slave_components:
+          slave_components.append(component_name)
+        else:
+          master_components.append(component_name)
+
+      if slave_components:
+        new_ams_site['timeline.metrics.initial.configured.slave.components'] = ",".join(slave_components)
+      if master_components:
+        if 'ambari_server' not in master_components:
+          master_components.append('ambari_server')
+        new_ams_site['timeline.metrics.initial.configured.master.components'] = ",".join(master_components)
+
     XmlConfig("ams-site.xml",
               conf_dir=params.ams_collector_conf_dir,
-              configurations=params.config['configurations']['ams-site'],
+              configurations=new_ams_site,
               configuration_attributes=params.config['configurationAttributes']['ams-site'],
               owner=params.ams_user,
               group=params.user_group
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
index de0fcf1..9424752 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
@@ -393,6 +393,7 @@ hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
 
 
+clusterHostInfoDict = config["clusterHostInfo"]
 
 hdfs_site = config['configurations']['hdfs-site']
 default_fs = config['configurations']['core-site']['fs.defaultFS']
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/service_advisor.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/service_advisor.py
index c78d48a..7deeae6 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/service_advisor.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/service_advisor.py
@@ -33,15 +33,6 @@ SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
 STACKS_DIR = os.path.join(SCRIPT_DIR, '../../../stacks/')
 PARENT_FILE = os.path.join(STACKS_DIR, 'service_advisor.py')
 
-#split points
-metricsDir = os.path.join(SCRIPT_DIR, 'package')
-print "METRICS_DIR=>" + str(metricsDir)
-serviceMetricsDir = os.path.join(metricsDir, 'files', 'service-metrics')
-customServiceMetricsDir = os.path.join(SCRIPT_DIR, '../../../dashboards/service-metrics')
-sys.path.append(os.path.join(metricsDir, 'scripts'))
-
-from split_points import FindSplitPointsForAMSRegions
-
 try:
   with open(PARENT_FILE, 'rb') as fp:
     service_advisor = imp.load_module('service_advisor', fp, PARENT_FILE, ('.py', 'rb', imp.PY_SOURCE))
@@ -439,19 +430,6 @@ class AMBARI_METRICSRecommender(service_advisor.ServiceAdvisor):
     if not ams_hbase_env:
       ams_hbase_env = configurations["ams-hbase-env"]["properties"]
 
-    split_point_finder = FindSplitPointsForAMSRegions(
-      ams_hbase_site, ams_hbase_env, serviceMetricsDir, customServiceMetricsDir, operatingMode, servicesList)
-
-    result = split_point_finder.get_split_points()
-    precision_splits = ' '
-    aggregate_splits = ' '
-    if result.precision:
-      precision_splits = result.precision
-    if result.aggregate:
-      aggregate_splits = result.aggregate
-    putAmsSiteProperty("timeline.metrics.host.aggregate.splitpoints", ','.join(precision_splits))
-    putAmsSiteProperty("timeline.metrics.cluster.aggregate.splitpoints", ','.join(aggregate_splits))
-
     component_grafana_exists = False
     for service in services['services']:
       if 'components' in service:
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog270Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog270Test.java
index 2268694..ad6d435 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog270Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog270Test.java
@@ -1340,11 +1340,15 @@ public class UpgradeCatalog270Test {
     Map<String, String> oldProperties = new HashMap<String, String>() {
       {
         put("timeline.metrics.service.default.result.limit", "15840");
+        put("timeline.container-metrics.ttl", "2592000");
+        put("timeline.metrics.cluster.aggregate.splitpoints", "cpu_user,mem_free");
+        put("timeline.metrics.host.aggregate.splitpoints", "kafka.metric,nimbus.metric");
       }
     };
     Map<String, String> newProperties = new HashMap<String, String>() {
       {
         put("timeline.metrics.service.default.result.limit", "5760");
+        put("timeline.container-metrics.ttl", "1209600");
       }
     };
 
diff --git a/ambari-server/src/test/python/common-services/AMBARI_METRICS/test_service_advisor.py b/ambari-server/src/test/python/common-services/AMBARI_METRICS/test_service_advisor.py
index a97866b..ef2ac04 100644
--- a/ambari-server/src/test/python/common-services/AMBARI_METRICS/test_service_advisor.py
+++ b/ambari-server/src/test/python/common-services/AMBARI_METRICS/test_service_advisor.py
@@ -154,8 +154,6 @@ class TestAMBARI_METRICS010ServiceAdvisor(TestCase):
                                                                                          'hbase.unsafe.stream.capability.enforce': 'true'}},
                   'ams-site': {'properties': {'timeline.metrics.cache.commit.interval': '10',
                                                                              'timeline.metrics.cache.size': '100',
-                                                                             'timeline.metrics.cluster.aggregate.splitpoints': 'master.Balancer.BalancerCluster_95th_percentile',
-                                                                             'timeline.metrics.host.aggregate.splitpoints': 'master.Balancer.BalancerCluster_95th_percentile',
                                                                              'timeline.metrics.service.handler.thread.count': '20',
                                                                              'timeline.metrics.service.operation.mode': 'distributed',
                                                                              'timeline.metrics.service.watcher.disabled': 'true',
@@ -221,8 +219,6 @@ class TestAMBARI_METRICS010ServiceAdvisor(TestCase):
                                                                                          'hbase.unsafe.stream.capability.enforce': 'true'}},
                 'ams-site': {'properties': {'timeline.metrics.cache.commit.interval': '10',
                                                                              'timeline.metrics.cache.size': '100',
-                                                                             'timeline.metrics.cluster.aggregate.splitpoints': 'master.Balancer.BalancerCluster_95th_percentile',
-                                                                             'timeline.metrics.host.aggregate.splitpoints': 'master.Balancer.BalancerCluster_95th_percentile',
                                                                              'timeline.metrics.service.handler.thread.count': '20',
                                                                              'timeline.metrics.service.operation.mode': 'distributed',
                                                                              'timeline.metrics.service.watcher.disabled': 'true',
diff --git a/ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_collector.py b/ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_collector.py
index 58418f9..fc4d79d 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_collector.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_collector.py
@@ -147,7 +147,7 @@ class TestMetricsCollector(RMFTestCase):
                               group = 'hadoop',
                               conf_dir = '/etc/ambari-metrics-collector/conf',
                               configurations = self.getConfig()['configurations']['ams-site'],
-                              configuration_attributes = self.getConfig()['configurationAttributes']['ams-hbase-site']
+                              configuration_attributes = self.getConfig()['configurationAttributes']['ams-site']
     )
 
     self.assertResourceCalled('XmlConfig', 'ssl-server.xml',
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
index 220cc72..69bd8d8 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
@@ -1076,7 +1076,9 @@
             "timeline.metrics.host.aggregator.daily.checkpointCutOffMultiplier": "1",
             "timeline.metrics.daily.aggregator.minute.interval": "86400",
             "timeline.metrics.cluster.aggregator.minute.interval": "120",
-            "timeline.metrics.host.aggregator.hourly.interval": "3600"
+            "timeline.metrics.host.aggregator.hourly.interval": "3600",
+            "timeline.metrics.initial.configured.master.components": "drpc_server,hive_server,resourcemanager,all,webhcat_server,snamenode,storm_ui_server,falcon_server,namenode,ganglia_server,logviewer_server,hive_metastore,nimbus,zookeeper_server,historyserver,hbase_master,oozie_server,metrics_collector,supervisor,ganglia_monitor,hive_mysql,ambari_server",
+            "timeline.metrics.initial.configured.slave.components": "hbase_regionserver,datanode,nodemanager"
         },
         "ams-grafana-env": {
             "metrics_grafana_log_dir": "/var/log/ambari-metrics-grafana",
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json
index 688c1c7..92cd24a 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json
@@ -994,7 +994,8 @@
             "timeline.metrics.host.aggregator.daily.checkpointCutOffMultiplier": "1",
             "timeline.metrics.daily.aggregator.minute.interval": "86400",
             "timeline.metrics.cluster.aggregator.minute.interval": "120",
-            "timeline.metrics.host.aggregator.hourly.interval": "3600"
+            "timeline.metrics.host.aggregator.hourly.interval": "3600",
+            "timeline.metrics.initial.configured.master.components": "snamenode,nm,drpc_server,hive_server,resourcemanager,all,slave,webhcat_server,ganglia_server,storm_ui_server,falcon_server,hs,hive_metastore,logviewer_server,nimbus,zookeeper_server,hbase_rs,namenode,hbase_master,metrics_collector,ambari_server,supervisor,ganglia_monitor,hive_mysql"
         }
     },
     "configurationAttributes": {

-- 
To stop receiving notification emails like this one, please contact
avijayan@apache.org.

[ambari] 02/03: AMBARI-23804 : Refine AMS HBase region splitting calculation based on UUID work (Pass hbase total heapsize to collector).

Posted by av...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

avijayan pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git

commit 5bae5c66268591f999e639ca32a50931d803f9f2
Author: Aravindan Vijayan <av...@hortonworks.com>
AuthorDate: Wed May 9 21:12:10 2018 -0700

    AMBARI-23804 : Refine AMS HBase region splitting calculation based on UUID work (Pass hbase total heapsize to collector).
---
 .../common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py       | 4 ++++
 ambari-server/src/test/python/stacks/2.0.6/configs/default.json       | 3 ++-
 .../src/test/python/stacks/2.0.6/configs/default_ams_embedded.json    | 3 ++-
 3 files changed, 8 insertions(+), 2 deletions(-)

diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
index d0ee66e..fc57ffc 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
@@ -252,6 +252,10 @@ def ams(name=None, action=None):
           master_components.append('ambari_server')
         new_ams_site['timeline.metrics.initial.configured.master.components'] = ",".join(master_components)
 
+    hbase_total_heapsize_with_trailing_m = params.hbase_heapsize
+    hbase_total_heapsize = int(hbase_total_heapsize_with_trailing_m[:-1]) * 1024 * 1024
+    new_ams_site['hbase_total_heapsize'] = hbase_total_heapsize
+
     XmlConfig("ams-site.xml",
               conf_dir=params.ams_collector_conf_dir,
               configurations=new_ams_site,
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
index 69bd8d8..98fc30d 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
@@ -1078,7 +1078,8 @@
             "timeline.metrics.cluster.aggregator.minute.interval": "120",
             "timeline.metrics.host.aggregator.hourly.interval": "3600",
             "timeline.metrics.initial.configured.master.components": "drpc_server,hive_server,resourcemanager,all,webhcat_server,snamenode,storm_ui_server,falcon_server,namenode,ganglia_server,logviewer_server,hive_metastore,nimbus,zookeeper_server,historyserver,hbase_master,oozie_server,metrics_collector,supervisor,ganglia_monitor,hive_mysql,ambari_server",
-            "timeline.metrics.initial.configured.slave.components": "hbase_regionserver,datanode,nodemanager"
+            "timeline.metrics.initial.configured.slave.components": "hbase_regionserver,datanode,nodemanager",
+            "hbase_total_heapsize": 536870912
         },
         "ams-grafana-env": {
             "metrics_grafana_log_dir": "/var/log/ambari-metrics-grafana",
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json
index 92cd24a..a802b74 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json
@@ -995,7 +995,8 @@
             "timeline.metrics.daily.aggregator.minute.interval": "86400",
             "timeline.metrics.cluster.aggregator.minute.interval": "120",
             "timeline.metrics.host.aggregator.hourly.interval": "3600",
-            "timeline.metrics.initial.configured.master.components": "snamenode,nm,drpc_server,hive_server,resourcemanager,all,slave,webhcat_server,ganglia_server,storm_ui_server,falcon_server,hs,hive_metastore,logviewer_server,nimbus,zookeeper_server,hbase_rs,namenode,hbase_master,metrics_collector,ambari_server,supervisor,ganglia_monitor,hive_mysql"
+            "timeline.metrics.initial.configured.master.components": "snamenode,nm,drpc_server,hive_server,resourcemanager,all,slave,webhcat_server,ganglia_server,storm_ui_server,falcon_server,hs,hive_metastore,logviewer_server,nimbus,zookeeper_server,hbase_rs,namenode,hbase_master,metrics_collector,ambari_server,supervisor,ganglia_monitor,hive_mysql",
+            "hbase_total_heapsize": 1073741824
         }
     },
     "configurationAttributes": {

-- 
To stop receiving notification emails like this one, please contact
avijayan@apache.org.

[ambari] 03/03: AMBARI-23804 : Refine AMS HBase region splitting calculation based on UUID work (Refactor Split point computation. Allow only Murmur3Hash).

Posted by av...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

avijayan pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git

commit d4d2767ae0d9c8e3043144df52bd5d6094b61171
Author: Aravindan Vijayan <av...@hortonworks.com>
AuthorDate: Thu May 10 11:46:21 2018 -0700

    AMBARI-23804 : Refine AMS HBase region splitting calculation based on UUID work (Refactor Split point computation. Allow only Murmur3Hash).
---
 .../metrics/core/timeline/PhoenixHBaseAccessor.java |  1 +
 .../timeline/TimelineMetricSplitPointComputer.java  | 19 ++++++++++---------
 .../discovery/TimelineMetricMetadataManager.java    | 20 +-------------------
 .../timeline/uuid/Murmur3HashUuidGenStrategy.java   | 13 ++++++++++++-
 .../TimelineMetricSplitPointComputerTest.java       | 21 ++++++++++++---------
 5 files changed, 36 insertions(+), 38 deletions(-)

diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/PhoenixHBaseAccessor.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/PhoenixHBaseAccessor.java
index dec7850..c684b0a 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/PhoenixHBaseAccessor.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/PhoenixHBaseAccessor.java
@@ -492,6 +492,7 @@ public class PhoenixHBaseAccessor {
     PreparedStatement pStmt = null;
     TimelineMetricSplitPointComputer splitPointComputer = new TimelineMetricSplitPointComputer(
       metricsConf, hbaseConf, metadataManagerInstance);
+    splitPointComputer.computeSplitPoints();
 
     String encoding = metricsConf.get(HBASE_ENCODING_SCHEME, DEFAULT_ENCODING);
     String compression = metricsConf.get(HBASE_COMPRESSION_SCHEME, DEFAULT_TABLE_COMPRESSION);
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/TimelineMetricSplitPointComputer.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/TimelineMetricSplitPointComputer.java
index 89bb843..05366cc 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/TimelineMetricSplitPointComputer.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/TimelineMetricSplitPointComputer.java
@@ -49,6 +49,11 @@ public class TimelineMetricSplitPointComputer {
   private static final int SLAVE_EQUIDISTANT_POINTS = 50;
   private static final int MASTER_EQUIDISTANT_POINTS = 5;
 
+  private double hbaseTotalHeapsize;
+  private double hbaseMemstoreUpperLimit;
+  private double hbaseMemstoreFlushSize;
+  private TimelineMetricMetadataManager timelineMetricMetadataManager = null;
+
   private List<byte[]> precisionSplitPoints = new ArrayList<>();
   private List<byte[]> aggregateSplitPoints = new ArrayList<>();
 
@@ -66,18 +71,14 @@ public class TimelineMetricSplitPointComputer {
       slaveComponents.addAll(Arrays.asList(componentsString.split(",")));
     }
 
-    double hbaseTotalHeapsize = metricsConf.getDouble("hbase_total_heapsize", 1024*1024*1024);
-    double hbaseMemstoreUpperLimit = hbaseConf.getDouble("hbase.regionserver.global.memstore.upperLimit", 0.5);
-    double hbaseMemstoreFlushSize = hbaseConf.getDouble("hbase.hregion.memstore.flush.size", 134217728);
-
-    computeSplitPoints(hbaseTotalHeapsize, hbaseMemstoreUpperLimit, hbaseMemstoreFlushSize, timelineMetricMetadataManager);
+    this.timelineMetricMetadataManager = timelineMetricMetadataManager;
+    hbaseTotalHeapsize = metricsConf.getDouble("hbase_total_heapsize", 1024*1024*1024);
+    hbaseMemstoreUpperLimit = hbaseConf.getDouble("hbase.regionserver.global.memstore.upperLimit", 0.3);
+    hbaseMemstoreFlushSize = hbaseConf.getDouble("hbase.hregion.memstore.flush.size", 134217728);
   }
 
 
-  private void computeSplitPoints(double hbaseTotalHeapsize,
-                                  double hbaseMemstoreUpperLimit,
-                                  double hbaseMemstoreFlushSize,
-                                  TimelineMetricMetadataManager timelineMetricMetadataManager) {
+  protected void computeSplitPoints() {
 
     double memstoreMaxMemory = hbaseMemstoreUpperLimit * hbaseTotalHeapsize;
     int maxInMemoryRegions = (int) ((memstoreMaxMemory / hbaseMemstoreFlushSize) - OTHER_TABLE_STATIC_REGIONS);
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/discovery/TimelineMetricMetadataManager.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/discovery/TimelineMetricMetadataManager.java
index 737c2ff..86226ec 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/discovery/TimelineMetricMetadataManager.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/discovery/TimelineMetricMetadataManager.java
@@ -397,18 +397,6 @@ public class TimelineMetricMetadataManager {
       }
     }
 
-    if (!HOSTED_APPS_MAP.isEmpty()) {
-      Map.Entry<String, TimelineMetricHostMetadata> entry = HOSTED_APPS_MAP.entrySet().iterator().next();
-      TimelineMetricHostMetadata timelineMetricHostMetadata = entry.getValue();
-      if (timelineMetricHostMetadata.getUuid() != null  && timelineMetricHostMetadata.getUuid().length == 16) {
-        HOSTNAME_UUID_LENGTH = 16;
-        uuidGenStrategy = new MD5UuidGenStrategy();
-      } else {
-        HOSTNAME_UUID_LENGTH = 4;
-        uuidGenStrategy = new Murmur3HashUuidGenStrategy();
-      }
-    }
-
     for (String host : HOSTED_APPS_MAP.keySet()) {
       TimelineMetricHostMetadata timelineMetricHostMetadata = HOSTED_APPS_MAP.get(host);
       if (timelineMetricHostMetadata != null && timelineMetricHostMetadata.getUuid() != null) {
@@ -423,13 +411,7 @@ public class TimelineMetricMetadataManager {
    * @return the UUID generator of type org.apache.ambari.metrics.core.timeline.uuid.MetricUuidGenStrategy
    */
   private MetricUuidGenStrategy getUuidStrategy(Configuration configuration) {
-    String strategy = configuration.get(TIMELINE_METRICS_UUID_GEN_STRATEGY, "");
-    if ("md5".equalsIgnoreCase(strategy)){
-      return new MD5UuidGenStrategy();
-    } else {
-      //Default
-      return new Murmur3HashUuidGenStrategy();
-    }
+    return new Murmur3HashUuidGenStrategy();
   }
 
   /**
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/uuid/Murmur3HashUuidGenStrategy.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/uuid/Murmur3HashUuidGenStrategy.java
index 9418aa4..af8cee5 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/uuid/Murmur3HashUuidGenStrategy.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/uuid/Murmur3HashUuidGenStrategy.java
@@ -24,7 +24,12 @@ import org.apache.commons.lang.StringUtils;
 
 public class Murmur3HashUuidGenStrategy implements MetricUuidGenStrategy{
 
-  @Override
+  /**
+   * Compute Murmur3Hash 16 byte UUID for a Metric-App-Instance.
+   * @param timelineClusterMetric input metric
+   * @param maxLength Max length of returned UUID. (Will always be 16 for this technique)
+   * @return 16 byte UUID.
+   */  @Override
   public byte[] computeUuid(TimelineClusterMetric timelineClusterMetric, int maxLength) {
 
     String metricString = timelineClusterMetric.getMetricName() + timelineClusterMetric.getAppId();
@@ -35,6 +40,12 @@ public class Murmur3HashUuidGenStrategy implements MetricUuidGenStrategy{
     return Hashing.murmur3_128().hashBytes(metricBytes).asBytes();
   }
 
+  /**
+   * Compute Murmur3Hash 4 byte UUID for a String.
+   * @param value String input
+   * @param maxLength Max length of returned UUID. (Will always be 4 for this technique)
+   * @return 4 byte UUID.
+   */
   @Override
   public byte[] computeUuid(String value, int maxLength) {
     byte[] valueBytes = value.getBytes();
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/TimelineMetricSplitPointComputerTest.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/TimelineMetricSplitPointComputerTest.java
index 4d663cc..150dac2 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/TimelineMetricSplitPointComputerTest.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/ambari/metrics/core/timeline/TimelineMetricSplitPointComputerTest.java
@@ -51,7 +51,7 @@ public class TimelineMetricSplitPointComputerTest {
     expect(metricsConfMock.getDouble("hbase_total_heapsize", 1024*1024*1024)).andReturn(1024 * 1024 * 1024.0).once();
 
     Configuration hbaseConfMock = EasyMock.createMock(Configuration.class);
-    expect(hbaseConfMock.getDouble("hbase.regionserver.global.memstore.upperLimit", 0.5)).andReturn(0.5).once();
+    expect(hbaseConfMock.getDouble("hbase.regionserver.global.memstore.upperLimit", 0.3)).andReturn(0.3).once();
     expect(hbaseConfMock.getDouble("hbase.hregion.memstore.flush.size", 134217728)).andReturn(134217728.0).once();
 
     TimelineMetricMetadataManager metricMetadataManagerMock = EasyMock.createNiceMock(TimelineMetricMetadataManager.class);
@@ -62,6 +62,7 @@ public class TimelineMetricSplitPointComputerTest {
     TimelineMetricSplitPointComputer timelineMetricSplitPointComputer = new TimelineMetricSplitPointComputer(metricsConfMock,
       hbaseConfMock,
       metricMetadataManagerMock);
+    timelineMetricSplitPointComputer.computeSplitPoints();
 
     Assert.assertEquals(timelineMetricSplitPointComputer.getPrecisionSplitPoints().size(), 3);
     Assert.assertEquals(timelineMetricSplitPointComputer.getClusterAggregateSplitPoints().size(), 1);
@@ -85,7 +86,7 @@ public class TimelineMetricSplitPointComputerTest {
     expect(metricsConfMock.getDouble("hbase_total_heapsize", 1024*1024*1024)).andReturn(8589934592.0).once();
 
     Configuration hbaseConfMock = EasyMock.createMock(Configuration.class);
-    expect(hbaseConfMock.getDouble("hbase.regionserver.global.memstore.upperLimit", 0.5)).andReturn(0.5).once();
+    expect(hbaseConfMock.getDouble("hbase.regionserver.global.memstore.upperLimit", 0.3)).andReturn(0.3).once();
     expect(hbaseConfMock.getDouble("hbase.hregion.memstore.flush.size", 134217728)).andReturn(134217728.0).once();
 
     TimelineMetricMetadataManager metricMetadataManagerMock = EasyMock.createNiceMock(TimelineMetricMetadataManager.class);
@@ -96,10 +97,11 @@ public class TimelineMetricSplitPointComputerTest {
     TimelineMetricSplitPointComputer timelineMetricSplitPointComputer = new TimelineMetricSplitPointComputer(metricsConfMock,
       hbaseConfMock,
       metricMetadataManagerMock);
+    timelineMetricSplitPointComputer.computeSplitPoints();
 
-    Assert.assertEquals(timelineMetricSplitPointComputer.getPrecisionSplitPoints().size(), 16);
-    Assert.assertEquals(timelineMetricSplitPointComputer.getClusterAggregateSplitPoints().size(), 3);
-    Assert.assertEquals(timelineMetricSplitPointComputer.getHostAggregateSplitPoints().size(), 3);
+    Assert.assertEquals(timelineMetricSplitPointComputer.getPrecisionSplitPoints().size(), 6);
+    Assert.assertEquals(timelineMetricSplitPointComputer.getClusterAggregateSplitPoints().size(), 1);
+    Assert.assertEquals(timelineMetricSplitPointComputer.getHostAggregateSplitPoints().size(), 1);
   }
 
   @Test
@@ -119,7 +121,7 @@ public class TimelineMetricSplitPointComputerTest {
     expect(metricsConfMock.getDouble("hbase_total_heapsize", 1024*1024*1024)).andReturn(24 * 1024 * 1024 * 1024.0).once();
 
     Configuration hbaseConfMock = EasyMock.createMock(Configuration.class);
-    expect(hbaseConfMock.getDouble("hbase.regionserver.global.memstore.upperLimit", 0.5)).andReturn(0.5).once();
+    expect(hbaseConfMock.getDouble("hbase.regionserver.global.memstore.upperLimit", 0.3)).andReturn(0.3).once();
     expect(hbaseConfMock.getDouble("hbase.hregion.memstore.flush.size", 134217728)).andReturn(2 * 134217728.0).once();
 
     TimelineMetricMetadataManager metricMetadataManagerMock = EasyMock.createNiceMock(TimelineMetricMetadataManager.class);
@@ -130,9 +132,10 @@ public class TimelineMetricSplitPointComputerTest {
     TimelineMetricSplitPointComputer timelineMetricSplitPointComputer = new TimelineMetricSplitPointComputer(metricsConfMock,
       hbaseConfMock,
       metricMetadataManagerMock);
+    timelineMetricSplitPointComputer.computeSplitPoints();
 
-    Assert.assertEquals(timelineMetricSplitPointComputer.getPrecisionSplitPoints().size(), 28);
-    Assert.assertEquals(timelineMetricSplitPointComputer.getClusterAggregateSplitPoints().size(), 6);
-    Assert.assertEquals(timelineMetricSplitPointComputer.getHostAggregateSplitPoints().size(), 6);
+    Assert.assertEquals(timelineMetricSplitPointComputer.getPrecisionSplitPoints().size(), 14);
+    Assert.assertEquals(timelineMetricSplitPointComputer.getClusterAggregateSplitPoints().size(), 3);
+    Assert.assertEquals(timelineMetricSplitPointComputer.getHostAggregateSplitPoints().size(), 3);
   }
 }

-- 
To stop receiving notification emails like this one, please contact
avijayan@apache.org.