You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by sw...@apache.org on 2014/12/02 18:28:31 UTC

[15/30] ambari git commit: AMBARI-5707. Replace Ganglia with high performant and pluggable Metrics System. (swagle)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a52f8a55/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricClusterAggregator.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricClusterAggregator.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricClusterAggregator.java
new file mode 100644
index 0000000..654c188
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricClusterAggregator.java
@@ -0,0 +1,220 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
+
+
+import org.apache.commons.io.FilenameUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
+import java.io.IOException;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.Condition;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.GET_METRIC_SQL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.METRICS_RECORD_TABLE_NAME;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.NATIVE_TIME_RANGE_DELTA;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.CLUSTER_AGGREGATOR_MINUTE_CHECKPOINT_CUTOFF_MULTIPLIER;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.CLUSTER_AGGREGATOR_MINUTE_DISABLED;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.CLUSTER_AGGREGATOR_MINUTE_SLEEP_INTERVAL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.CLUSTER_AGGREGATOR_TIMESLICE_INTERVAL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.DEFAULT_CHECKPOINT_LOCATION;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.TIMELINE_METRICS_AGGREGATOR_CHECKPOINT_DIR;
+
+/**
+ * Aggregates a metric across all hosts in the cluster. Reads metrics from
+ * the precision table and saves into the aggregate.
+ */
+public class TimelineMetricClusterAggregator extends AbstractTimelineAggregator {
+  private static final Log LOG = LogFactory.getLog(TimelineMetricClusterAggregator.class);
+  private static final String CLUSTER_AGGREGATOR_CHECKPOINT_FILE =
+    "timeline-metrics-cluster-aggregator-checkpoint";
+  private final String checkpointLocation;
+  private final Long sleepIntervalMillis;
+  public final int timeSliceIntervalMillis;
+  private final Integer checkpointCutOffMultiplier;
+
+  public TimelineMetricClusterAggregator(PhoenixHBaseAccessor hBaseAccessor,
+                                         Configuration metricsConf) {
+    super(hBaseAccessor, metricsConf);
+
+    String checkpointDir = metricsConf.get(
+      TIMELINE_METRICS_AGGREGATOR_CHECKPOINT_DIR, DEFAULT_CHECKPOINT_LOCATION);
+
+    checkpointLocation = FilenameUtils.concat(checkpointDir,
+      CLUSTER_AGGREGATOR_CHECKPOINT_FILE);
+
+    sleepIntervalMillis = SECONDS.toMillis(metricsConf.getLong
+      (CLUSTER_AGGREGATOR_MINUTE_SLEEP_INTERVAL, 120l));
+    timeSliceIntervalMillis = (int)SECONDS.toMillis(metricsConf.getInt
+      (CLUSTER_AGGREGATOR_TIMESLICE_INTERVAL, 15));
+    checkpointCutOffMultiplier =
+      metricsConf.getInt(CLUSTER_AGGREGATOR_MINUTE_CHECKPOINT_CUTOFF_MULTIPLIER, 2);
+  }
+
+  @Override
+  protected String getCheckpointLocation() {
+    return checkpointLocation;
+  }
+
+  @Override
+  protected void aggregate(ResultSet rs, long startTime, long endTime)
+    throws SQLException, IOException {
+    List<Long[]> timeSlices = getTimeSlices(startTime, endTime);
+    Map<TimelineClusterMetric, MetricClusterAggregate>
+      aggregateClusterMetrics = aggregateMetricsFromResultSet(rs, timeSlices);
+
+    LOG.info("Saving " + aggregateClusterMetrics.size() + " metric aggregates.");
+    hBaseAccessor.saveClusterAggregateRecords(aggregateClusterMetrics);
+  }
+
+  @Override
+  protected Condition prepareMetricQueryCondition(long startTime, long endTime) {
+    Condition condition = new Condition(null, null, null, null, startTime,
+      endTime, null, true);
+    condition.setNoLimit();
+    condition.setFetchSize(resultsetFetchSize);
+    condition.setStatement(String.format(GET_METRIC_SQL,
+      PhoenixTransactSQL.getNaiveTimeRangeHint(startTime, NATIVE_TIME_RANGE_DELTA),
+      METRICS_RECORD_TABLE_NAME));
+    condition.addOrderByColumn("METRIC_NAME");
+    condition.addOrderByColumn("APP_ID");
+    condition.addOrderByColumn("INSTANCE_ID");
+    condition.addOrderByColumn("SERVER_TIME");
+    return condition;
+  }
+
+  private List<Long[]> getTimeSlices(long startTime, long endTime) {
+    List<Long[]> timeSlices = new ArrayList<Long[]>();
+    long sliceStartTime = startTime;
+    while (sliceStartTime < endTime) {
+      timeSlices.add(new Long[] { sliceStartTime, sliceStartTime + timeSliceIntervalMillis});
+      sliceStartTime += timeSliceIntervalMillis;
+    }
+    return timeSlices;
+  }
+
+  private Map<TimelineClusterMetric, MetricClusterAggregate>
+  aggregateMetricsFromResultSet(ResultSet rs, List<Long[]> timeSlices)
+    throws SQLException, IOException {
+    Map<TimelineClusterMetric, MetricClusterAggregate> aggregateClusterMetrics =
+      new HashMap<TimelineClusterMetric, MetricClusterAggregate>();
+    // Create time slices
+
+    while (rs.next()) {
+      TimelineMetric metric =
+        PhoenixHBaseAccessor.getTimelineMetricFromResultSet(rs);
+
+      Map<TimelineClusterMetric, Double> clusterMetrics =
+        sliceFromTimelineMetric(metric, timeSlices);
+
+      if (clusterMetrics != null && !clusterMetrics.isEmpty()) {
+        for (Map.Entry<TimelineClusterMetric, Double> clusterMetricEntry :
+            clusterMetrics.entrySet()) {
+          TimelineClusterMetric clusterMetric = clusterMetricEntry.getKey();
+          MetricClusterAggregate aggregate = aggregateClusterMetrics.get(clusterMetric);
+          Double avgValue = clusterMetricEntry.getValue();
+
+          if (aggregate == null) {
+            aggregate = new MetricClusterAggregate(avgValue, 1, null,
+              avgValue, avgValue);
+            aggregateClusterMetrics.put(clusterMetric, aggregate);
+          } else {
+            aggregate.updateSum(avgValue);
+            aggregate.updateNumberOfHosts(1);
+            aggregate.updateMax(avgValue);
+            aggregate.updateMin(avgValue);
+          }
+        }
+      }
+    }
+    return aggregateClusterMetrics;
+  }
+
+  @Override
+  protected Long getSleepIntervalMillis() {
+    return sleepIntervalMillis;
+  }
+
+  @Override
+  protected Integer getCheckpointCutOffMultiplier() {
+    return checkpointCutOffMultiplier;
+  }
+
+  @Override
+  protected boolean isDisabled() {
+    return metricsConf.getBoolean(CLUSTER_AGGREGATOR_MINUTE_DISABLED, false);
+  }
+
+  private Map<TimelineClusterMetric, Double> sliceFromTimelineMetric(
+        TimelineMetric timelineMetric, List<Long[]> timeSlices) {
+
+    if (timelineMetric.getMetricValues().isEmpty()) {
+      return null;
+    }
+
+    Map<TimelineClusterMetric, Double> timelineClusterMetricMap =
+      new HashMap<TimelineClusterMetric, Double>();
+
+    for (Map.Entry<Long, Double> metric : timelineMetric.getMetricValues().entrySet()) {
+      // TODO: investigate null values - pre filter
+      if (metric.getValue() == null) {
+        continue;
+      }
+      Long timestamp = getSliceTimeForMetric(timeSlices,
+                       Long.parseLong(metric.getKey().toString()));
+      if (timestamp != -1) {
+        // Metric is within desired time range
+        TimelineClusterMetric clusterMetric = new TimelineClusterMetric(
+          timelineMetric.getMetricName(),
+          timelineMetric.getAppId(),
+          timelineMetric.getInstanceId(),
+          timestamp,
+          timelineMetric.getType());
+        if (!timelineClusterMetricMap.containsKey(clusterMetric)) {
+          timelineClusterMetricMap.put(clusterMetric, metric.getValue());
+        } else {
+          Double oldValue = timelineClusterMetricMap.get(clusterMetric);
+          Double newValue = (oldValue + metric.getValue()) / 2;
+          timelineClusterMetricMap.put(clusterMetric, newValue);
+        }
+      }
+    }
+
+    return timelineClusterMetricMap;
+  }
+
+  /**
+   * Return beginning of the time slice into which the metric fits.
+   */
+  private Long getSliceTimeForMetric(List<Long[]> timeSlices, Long timestamp) {
+    for (Long[] timeSlice : timeSlices) {
+      if (timestamp >= timeSlice[0] && timestamp < timeSlice[1]) {
+        return timeSlice[0];
+      }
+    }
+    return -1l;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/a52f8a55/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricClusterAggregatorHourly.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricClusterAggregatorHourly.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricClusterAggregatorHourly.java
new file mode 100644
index 0000000..7764ea3
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricClusterAggregatorHourly.java
@@ -0,0 +1,173 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics
+  .timeline;
+
+import org.apache.commons.io.FilenameUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import java.io.IOException;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.HashMap;
+import java.util.Map;
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixHBaseAccessor.getMetricClusterAggregateFromResultSet;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixHBaseAccessor.getTimelineMetricClusterKeyFromResultSet;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.Condition;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.GET_CLUSTER_AGGREGATE_SQL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.METRICS_CLUSTER_AGGREGATE_HOURLY_TABLE_NAME;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.CLUSTER_AGGREGATOR_HOUR_CHECKPOINT_CUTOFF_INTERVAL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.CLUSTER_AGGREGATOR_HOUR_CHECKPOINT_CUTOFF_MULTIPLIER;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.CLUSTER_AGGREGATOR_HOUR_DISABLED;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.CLUSTER_AGGREGATOR_HOUR_SLEEP_INTERVAL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.DEFAULT_CHECKPOINT_LOCATION;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.TIMELINE_METRICS_AGGREGATOR_CHECKPOINT_DIR;
+
+public class TimelineMetricClusterAggregatorHourly extends
+  AbstractTimelineAggregator {
+  private static final Log LOG = LogFactory.getLog
+    (TimelineMetricClusterAggregatorHourly.class);
+  private static final String CLUSTER_AGGREGATOR_HOURLY_CHECKPOINT_FILE =
+    "timeline-metrics-cluster-aggregator-hourly-checkpoint";
+  private final String checkpointLocation;
+  private final long sleepIntervalMillis;
+  private final Integer checkpointCutOffMultiplier;
+  private long checkpointCutOffIntervalMillis;
+  private static final Long NATIVE_TIME_RANGE_DELTA = 3600000l; // 1 hour
+
+  public TimelineMetricClusterAggregatorHourly(
+    PhoenixHBaseAccessor hBaseAccessor, Configuration metricsConf) {
+    super(hBaseAccessor, metricsConf);
+
+    String checkpointDir = metricsConf.get(
+      TIMELINE_METRICS_AGGREGATOR_CHECKPOINT_DIR, DEFAULT_CHECKPOINT_LOCATION);
+
+    checkpointLocation = FilenameUtils.concat(checkpointDir,
+      CLUSTER_AGGREGATOR_HOURLY_CHECKPOINT_FILE);
+
+    sleepIntervalMillis = SECONDS.toMillis(metricsConf.getLong
+      (CLUSTER_AGGREGATOR_HOUR_SLEEP_INTERVAL, 3600l));
+    checkpointCutOffIntervalMillis =  SECONDS.toMillis(metricsConf.getLong
+      (CLUSTER_AGGREGATOR_HOUR_CHECKPOINT_CUTOFF_INTERVAL, 7200l));
+    checkpointCutOffMultiplier = metricsConf.getInt
+      (CLUSTER_AGGREGATOR_HOUR_CHECKPOINT_CUTOFF_MULTIPLIER, 2);
+  }
+
+  @Override
+  protected String getCheckpointLocation() {
+    return checkpointLocation;
+  }
+
+  @Override
+  protected void aggregate(ResultSet rs, long startTime, long endTime)
+    throws SQLException, IOException {
+      Map<TimelineClusterMetric, MetricHostAggregate> hostAggregateMap =
+        aggregateMetricsFromResultSet(rs);
+
+    LOG.info("Saving " + hostAggregateMap.size() + " metric aggregates.");
+    hBaseAccessor.saveClusterAggregateHourlyRecords(hostAggregateMap,
+      METRICS_CLUSTER_AGGREGATE_HOURLY_TABLE_NAME);
+  }
+
+  @Override
+  protected Condition prepareMetricQueryCondition(long startTime,
+                                                  long endTime) {
+    Condition condition = new Condition(null, null, null, null, startTime,
+      endTime, null, true);
+    condition.setNoLimit();
+    condition.setFetchSize(resultsetFetchSize);
+    condition.setStatement(String.format(GET_CLUSTER_AGGREGATE_SQL,
+      PhoenixTransactSQL.getNaiveTimeRangeHint(startTime, NATIVE_TIME_RANGE_DELTA)));
+    condition.addOrderByColumn("METRIC_NAME");
+    condition.addOrderByColumn("APP_ID");
+    condition.addOrderByColumn("INSTANCE_ID");
+    condition.addOrderByColumn("SERVER_TIME");
+    return condition;
+  }
+
+  private Map<TimelineClusterMetric, MetricHostAggregate>
+  aggregateMetricsFromResultSet(ResultSet rs) throws IOException, SQLException {
+
+    TimelineClusterMetric existingMetric = null;
+    MetricHostAggregate hostAggregate = null;
+    Map<TimelineClusterMetric, MetricHostAggregate> hostAggregateMap =
+      new HashMap<TimelineClusterMetric, MetricHostAggregate>();
+
+    while (rs.next()) {
+      TimelineClusterMetric currentMetric =
+        getTimelineMetricClusterKeyFromResultSet(rs);
+      MetricClusterAggregate currentHostAggregate =
+        getMetricClusterAggregateFromResultSet(rs);
+
+      if (existingMetric == null) {
+        // First row
+        existingMetric = currentMetric;
+        hostAggregate = new MetricHostAggregate();
+        hostAggregateMap.put(currentMetric, hostAggregate);
+      }
+
+      if (existingMetric.equalsExceptTime(currentMetric)) {
+        // Recalculate totals with current metric
+        updateAggregatesFromHost(hostAggregate, currentHostAggregate);
+
+      } else {
+        // Switched over to a new metric - save existing
+        hostAggregate = new MetricHostAggregate();
+        updateAggregatesFromHost(hostAggregate, currentHostAggregate);
+        hostAggregateMap.put(currentMetric, hostAggregate);
+        existingMetric = currentMetric;
+      }
+
+    }
+
+    return hostAggregateMap;
+  }
+
+  private void updateAggregatesFromHost(
+    MetricHostAggregate agg,
+    MetricClusterAggregate currentClusterAggregate) {
+    agg.updateMax(currentClusterAggregate.getMax());
+    agg.updateMin(currentClusterAggregate.getMin());
+    agg.updateSum(currentClusterAggregate.getSum());
+    agg.updateNumberOfSamples(currentClusterAggregate.getNumberOfHosts());
+  }
+
+  @Override
+  protected Long getSleepIntervalMillis() {
+    return sleepIntervalMillis;
+  }
+
+  @Override
+  protected Integer getCheckpointCutOffMultiplier() {
+    return checkpointCutOffMultiplier;
+  }
+
+  @Override
+  protected Long getCheckpointCutOffIntervalMillis() {
+    return checkpointCutOffIntervalMillis;
+  }
+
+  @Override
+  protected boolean isDisabled() {
+    return metricsConf.getBoolean(CLUSTER_AGGREGATOR_HOUR_DISABLED, false);
+  }
+
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/a52f8a55/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
new file mode 100644
index 0000000..60833d0
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
@@ -0,0 +1,115 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Configuration class that reads properties from ams-site.xml. All values
+ * for time or intervals are given in seconds.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public interface TimelineMetricConfiguration {
+  public static final String HBASE_SITE_CONFIGURATION_FILE = "hbase-site.xml";
+  public static final String METRICS_SITE_CONFIGURATION_FILE = "ams-site.xml";
+
+  public static final String TIMELINE_METRICS_AGGREGATOR_CHECKPOINT_DIR =
+    "timeline.metrics.aggregator.checkpoint.dir";
+
+  public static final String DEFAULT_CHECKPOINT_LOCATION =
+    System.getProperty("java.io.tmpdir");
+
+  public static final String HBASE_ENCODING_SCHEME =
+    "timeline.metrics.hbase.data.block.encoding";
+
+  public static final String HBASE_COMPRESSION_SCHEME =
+    "timeline.metrics.hbase.compression.scheme";
+
+  public static final String PRECISION_TABLE_TTL =
+    "timeline.metrics.host.aggregator.ttl";
+  public static final String HOST_MINUTE_TABLE_TTL =
+    "timeline.metrics.host.aggregator.minute.ttl";
+  public static final String HOST_HOUR_TABLE_TTL =
+    "timeline.metrics.host.aggregator.hourly.ttl";
+  public static final String CLUSTER_MINUTE_TABLE_TTL =
+    "timeline.metrics.cluster.aggregator.minute.ttl";
+  public static final String CLUSTER_HOUR_TABLE_TTL =
+    "timeline.metrics.cluster.aggregator.hourly.ttl";
+
+  public static final String CLUSTER_AGGREGATOR_TIMESLICE_INTERVAL =
+    "timeline.metrics.cluster.aggregator.minute.timeslice.interval";
+
+  public static final String AGGREGATOR_CHECKPOINT_DELAY =
+    "timeline.metrics.service.checkpointDelay";
+
+  public static final String RESULTSET_FETCH_SIZE =
+    "timeline.metrics.service.resultset.fetchSize";
+
+  public static final String HOST_AGGREGATOR_MINUTE_SLEEP_INTERVAL =
+    "timeline.metrics.host.aggregator.minute.interval";
+
+  public static final String HOST_AGGREGATOR_HOUR_SLEEP_INTERVAL =
+    "timeline.metrics.host.aggregator.hourly.interval";
+
+  public static final String CLUSTER_AGGREGATOR_MINUTE_SLEEP_INTERVAL =
+    "timeline.metrics.cluster.aggregator.minute.interval";
+
+  public static final String CLUSTER_AGGREGATOR_HOUR_SLEEP_INTERVAL =
+    "timeline.metrics.cluster.aggregator.hourly.interval";
+
+  public static final String HOST_AGGREGATOR_MINUTE_CHECKPOINT_CUTOFF_MULTIPLIER =
+    "timeline.metrics.host.aggregator.minute.checkpointCutOffMultiplier";
+
+  public static final String HOST_AGGREGATOR_HOUR_CHECKPOINT_CUTOFF_MULTIPLIER =
+    "timeline.metrics.host.aggregator.hourly.checkpointCutOffMultiplier";
+
+  public static final String CLUSTER_AGGREGATOR_MINUTE_CHECKPOINT_CUTOFF_MULTIPLIER =
+    "timeline.metrics.cluster.aggregator.minute.checkpointCutOffMultiplier";
+
+  public static final String CLUSTER_AGGREGATOR_HOUR_CHECKPOINT_CUTOFF_MULTIPLIER =
+    "timeline.metrics.cluster.aggregator.hourly.checkpointCutOffMultiplier";
+
+  public static final String CLUSTER_AGGREGATOR_HOUR_CHECKPOINT_CUTOFF_INTERVAL =
+    "timeline.metrics.cluster.aggregator.hourly.checkpointCutOffInterval";
+
+  public static final String GLOBAL_RESULT_LIMIT =
+    "timeline.metrics.service.default.result.limit";
+
+  public static final String GLOBAL_MAX_RETRIES =
+    "timeline.metrics.service.default.max_retries";
+
+  public static final String GLOBAL_RETRY_INTERVAL =
+    "timeline.metrics.service.default.retryInterval";
+
+  public static final String HOST_AGGREGATOR_MINUTE_DISABLED =
+    "timeline.metrics.host.aggregator.minute.disabled";
+
+  public static final String HOST_AGGREGATOR_HOUR_DISABLED =
+    "timeline.metrics.host.aggregator.hourly.disabled";
+
+  public static final String CLUSTER_AGGREGATOR_MINUTE_DISABLED =
+    "timeline.metrics.cluster.aggregator.minute.disabled";
+
+  public static final String CLUSTER_AGGREGATOR_HOUR_DISABLED =
+    "timeline.metrics.cluster.aggregator.hourly.disabled";
+
+  public static final String DISABLE_APPLICATION_TIMELINE_STORE =
+    "timeline.service.disable.application.timeline.store";
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/a52f8a55/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricStore.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricStore.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricStore.java
new file mode 100644
index 0000000..5224450
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricStore.java
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
+
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
+import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
+import java.io.IOException;
+import java.sql.SQLException;
+import java.util.List;
+
+public interface TimelineMetricStore {
+  /**
+   * This method retrieves metrics stored byu the Timeline store.
+   *
+   * @param metricNames Names of the metric, e.g.: cpu_user
+   * @param hostname Name of the host where the metric originated from
+   * @param applicationId Id of the application to which this metric belongs
+   * @param instanceId Application instance id.
+   * @param startTime Start timestamp
+   * @param endTime End timestamp
+   * @param limit Override default result limit
+   * @param groupedByHosts Group {@link TimelineMetric} by metric name, hostname,
+   *                app id and instance id
+   *
+   * @return {@link TimelineMetric}
+   * @throws java.sql.SQLException
+   */
+  TimelineMetrics getTimelineMetrics(List<String> metricNames, String hostname,
+      String applicationId, String instanceId, Long startTime,
+      Long endTime, Integer limit, boolean groupedByHosts)
+    throws SQLException, IOException;
+
+
+  /**
+   * Return all records for a single metric satisfying the filter criteria.
+   * @return {@link TimelineMetric}
+   */
+  TimelineMetric getTimelineMetric(String metricName, String hostname,
+      String applicationId, String instanceId, Long startTime,
+      Long endTime, Integer limit)
+      throws SQLException, IOException;
+
+
+  /**
+   * Stores metric information to the timeline store. Any errors occurring for
+   * individual put request objects will be reported in the response.
+   *
+   * @param metrics An {@link TimelineMetrics}.
+   * @return An {@link org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse}.
+   * @throws SQLException, IOException
+   */
+  TimelinePutResponse putMetrics(TimelineMetrics metrics)
+    throws SQLException, IOException;
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/a52f8a55/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationAttemptFinishData.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationAttemptFinishData.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationAttemptFinishData.java
new file mode 100644
index 0000000..7ba51af
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationAttemptFinishData.java
@@ -0,0 +1,95 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.records;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ * The class contains the fields that can be determined when
+ * <code>RMAppAttempt</code> finishes, and that need to be stored persistently.
+ */
+@Public
+@Unstable
+public abstract class ApplicationAttemptFinishData {
+
+  @Public
+  @Unstable
+  public static ApplicationAttemptFinishData newInstance(
+      ApplicationAttemptId appAttemptId, String diagnosticsInfo,
+      String trackingURL, FinalApplicationStatus finalApplicationStatus,
+      YarnApplicationAttemptState yarnApplicationAttemptState) {
+    ApplicationAttemptFinishData appAttemptFD =
+        Records.newRecord(ApplicationAttemptFinishData.class);
+    appAttemptFD.setApplicationAttemptId(appAttemptId);
+    appAttemptFD.setDiagnosticsInfo(diagnosticsInfo);
+    appAttemptFD.setTrackingURL(trackingURL);
+    appAttemptFD.setFinalApplicationStatus(finalApplicationStatus);
+    appAttemptFD.setYarnApplicationAttemptState(yarnApplicationAttemptState);
+    return appAttemptFD;
+  }
+
+  @Public
+  @Unstable
+  public abstract ApplicationAttemptId getApplicationAttemptId();
+
+  @Public
+  @Unstable
+  public abstract void setApplicationAttemptId(
+      ApplicationAttemptId applicationAttemptId);
+
+  @Public
+  @Unstable
+  public abstract String getTrackingURL();
+
+  @Public
+  @Unstable
+  public abstract void setTrackingURL(String trackingURL);
+
+  @Public
+  @Unstable
+  public abstract String getDiagnosticsInfo();
+
+  @Public
+  @Unstable
+  public abstract void setDiagnosticsInfo(String diagnosticsInfo);
+
+  @Public
+  @Unstable
+  public abstract FinalApplicationStatus getFinalApplicationStatus();
+
+  @Public
+  @Unstable
+  public abstract void setFinalApplicationStatus(
+      FinalApplicationStatus finalApplicationStatus);
+
+  @Public
+  @Unstable
+  public abstract YarnApplicationAttemptState getYarnApplicationAttemptState();
+
+  @Public
+  @Unstable
+  public abstract void setYarnApplicationAttemptState(
+      YarnApplicationAttemptState yarnApplicationAttemptState);
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/a52f8a55/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationAttemptHistoryData.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationAttemptHistoryData.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationAttemptHistoryData.java
new file mode 100644
index 0000000..b759ab1
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationAttemptHistoryData.java
@@ -0,0 +1,171 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.records;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
+
+/**
+ * The class contains all the fields that are stored persistently for
+ * <code>RMAppAttempt</code>.
+ */
+@Public
+@Unstable
+public class ApplicationAttemptHistoryData {
+
+  private ApplicationAttemptId applicationAttemptId;
+
+  private String host;
+
+  private int rpcPort;
+
+  private String trackingURL;
+
+  private String diagnosticsInfo;
+
+  private FinalApplicationStatus finalApplicationStatus;
+
+  private ContainerId masterContainerId;
+
+  private YarnApplicationAttemptState yarnApplicationAttemptState;
+
+  @Public
+  @Unstable
+  public static ApplicationAttemptHistoryData newInstance(
+      ApplicationAttemptId appAttemptId, String host, int rpcPort,
+      ContainerId masterContainerId, String diagnosticsInfo,
+      String trackingURL, FinalApplicationStatus finalApplicationStatus,
+      YarnApplicationAttemptState yarnApplicationAttemptState) {
+    ApplicationAttemptHistoryData appAttemptHD =
+        new ApplicationAttemptHistoryData();
+    appAttemptHD.setApplicationAttemptId(appAttemptId);
+    appAttemptHD.setHost(host);
+    appAttemptHD.setRPCPort(rpcPort);
+    appAttemptHD.setMasterContainerId(masterContainerId);
+    appAttemptHD.setDiagnosticsInfo(diagnosticsInfo);
+    appAttemptHD.setTrackingURL(trackingURL);
+    appAttemptHD.setFinalApplicationStatus(finalApplicationStatus);
+    appAttemptHD.setYarnApplicationAttemptState(yarnApplicationAttemptState);
+    return appAttemptHD;
+  }
+
+  @Public
+  @Unstable
+  public ApplicationAttemptId getApplicationAttemptId() {
+    return applicationAttemptId;
+  }
+
+  @Public
+  @Unstable
+  public void
+      setApplicationAttemptId(ApplicationAttemptId applicationAttemptId) {
+    this.applicationAttemptId = applicationAttemptId;
+  }
+
+  @Public
+  @Unstable
+  public String getHost() {
+    return host;
+  }
+
+  @Public
+  @Unstable
+  public void setHost(String host) {
+    this.host = host;
+  }
+
+  @Public
+  @Unstable
+  public int getRPCPort() {
+    return rpcPort;
+  }
+
+  @Public
+  @Unstable
+  public void setRPCPort(int rpcPort) {
+    this.rpcPort = rpcPort;
+  }
+
+  @Public
+  @Unstable
+  public String getTrackingURL() {
+    return trackingURL;
+  }
+
+  @Public
+  @Unstable
+  public void setTrackingURL(String trackingURL) {
+    this.trackingURL = trackingURL;
+  }
+
+  @Public
+  @Unstable
+  public String getDiagnosticsInfo() {
+    return diagnosticsInfo;
+  }
+
+  @Public
+  @Unstable
+  public void setDiagnosticsInfo(String diagnosticsInfo) {
+    this.diagnosticsInfo = diagnosticsInfo;
+  }
+
+  @Public
+  @Unstable
+  public FinalApplicationStatus getFinalApplicationStatus() {
+    return finalApplicationStatus;
+  }
+
+  @Public
+  @Unstable
+  public void setFinalApplicationStatus(
+      FinalApplicationStatus finalApplicationStatus) {
+    this.finalApplicationStatus = finalApplicationStatus;
+  }
+
+  @Public
+  @Unstable
+  public ContainerId getMasterContainerId() {
+    return masterContainerId;
+  }
+
+  @Public
+  @Unstable
+  public void setMasterContainerId(ContainerId masterContainerId) {
+    this.masterContainerId = masterContainerId;
+  }
+
+  @Public
+  @Unstable
+  public YarnApplicationAttemptState getYarnApplicationAttemptState() {
+    return yarnApplicationAttemptState;
+  }
+
+  @Public
+  @Unstable
+  public void setYarnApplicationAttemptState(
+      YarnApplicationAttemptState yarnApplicationAttemptState) {
+    this.yarnApplicationAttemptState = yarnApplicationAttemptState;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/a52f8a55/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationAttemptStartData.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationAttemptStartData.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationAttemptStartData.java
new file mode 100644
index 0000000..7ca43fa
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationAttemptStartData.java
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.records;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ * The class contains the fields that can be determined when
+ * <code>RMAppAttempt</code> starts, and that need to be stored persistently.
+ */
+@Public
+@Unstable
+public abstract class ApplicationAttemptStartData {
+
+  @Public
+  @Unstable
+  public static ApplicationAttemptStartData newInstance(
+      ApplicationAttemptId appAttemptId, String host, int rpcPort,
+      ContainerId masterContainerId) {
+    ApplicationAttemptStartData appAttemptSD =
+        Records.newRecord(ApplicationAttemptStartData.class);
+    appAttemptSD.setApplicationAttemptId(appAttemptId);
+    appAttemptSD.setHost(host);
+    appAttemptSD.setRPCPort(rpcPort);
+    appAttemptSD.setMasterContainerId(masterContainerId);
+    return appAttemptSD;
+  }
+
+  @Public
+  @Unstable
+  public abstract ApplicationAttemptId getApplicationAttemptId();
+
+  @Public
+  @Unstable
+  public abstract void setApplicationAttemptId(
+      ApplicationAttemptId applicationAttemptId);
+
+  @Public
+  @Unstable
+  public abstract String getHost();
+
+  @Public
+  @Unstable
+  public abstract void setHost(String host);
+
+  @Public
+  @Unstable
+  public abstract int getRPCPort();
+
+  @Public
+  @Unstable
+  public abstract void setRPCPort(int rpcPort);
+
+  @Public
+  @Unstable
+  public abstract ContainerId getMasterContainerId();
+
+  @Public
+  @Unstable
+  public abstract void setMasterContainerId(ContainerId masterContainerId);
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/a52f8a55/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationFinishData.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationFinishData.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationFinishData.java
new file mode 100644
index 0000000..997fa6c
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationFinishData.java
@@ -0,0 +1,94 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.records;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ * The class contains the fields that can be determined when <code>RMApp</code>
+ * finishes, and that need to be stored persistently.
+ */
+@Public
+@Unstable
+public abstract class ApplicationFinishData {
+
+  @Public
+  @Unstable
+  public static ApplicationFinishData newInstance(ApplicationId applicationId,
+      long finishTime, String diagnosticsInfo,
+      FinalApplicationStatus finalApplicationStatus,
+      YarnApplicationState yarnApplicationState) {
+    ApplicationFinishData appFD =
+        Records.newRecord(ApplicationFinishData.class);
+    appFD.setApplicationId(applicationId);
+    appFD.setFinishTime(finishTime);
+    appFD.setDiagnosticsInfo(diagnosticsInfo);
+    appFD.setFinalApplicationStatus(finalApplicationStatus);
+    appFD.setYarnApplicationState(yarnApplicationState);
+    return appFD;
+  }
+
+  @Public
+  @Unstable
+  public abstract ApplicationId getApplicationId();
+
+  @Public
+  @Unstable
+  public abstract void setApplicationId(ApplicationId applicationId);
+
+  @Public
+  @Unstable
+  public abstract long getFinishTime();
+
+  @Public
+  @Unstable
+  public abstract void setFinishTime(long finishTime);
+
+  @Public
+  @Unstable
+  public abstract String getDiagnosticsInfo();
+
+  @Public
+  @Unstable
+  public abstract void setDiagnosticsInfo(String diagnosticsInfo);
+
+  @Public
+  @Unstable
+  public abstract FinalApplicationStatus getFinalApplicationStatus();
+
+  @Public
+  @Unstable
+  public abstract void setFinalApplicationStatus(
+      FinalApplicationStatus finalApplicationStatus);
+
+  @Public
+  @Unstable
+  public abstract YarnApplicationState getYarnApplicationState();
+
+  @Public
+  @Unstable
+  public abstract void setYarnApplicationState(
+      YarnApplicationState yarnApplicationState);
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/a52f8a55/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationHistoryData.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationHistoryData.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationHistoryData.java
new file mode 100644
index 0000000..b7d16f3
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationHistoryData.java
@@ -0,0 +1,213 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.records;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+
+/**
+ * The class contains all the fields that are stored persistently for
+ * <code>RMApp</code>.
+ */
+@Public
+@Unstable
+public class ApplicationHistoryData {
+
+  private ApplicationId applicationId;
+
+  private String applicationName;
+
+  private String applicationType;
+
+  private String user;
+
+  private String queue;
+
+  private long submitTime;
+
+  private long startTime;
+
+  private long finishTime;
+
+  private String diagnosticsInfo;
+
+  private FinalApplicationStatus finalApplicationStatus;
+
+  private YarnApplicationState yarnApplicationState;
+
+  @Public
+  @Unstable
+  public static ApplicationHistoryData newInstance(ApplicationId applicationId,
+      String applicationName, String applicationType, String queue,
+      String user, long submitTime, long startTime, long finishTime,
+      String diagnosticsInfo, FinalApplicationStatus finalApplicationStatus,
+      YarnApplicationState yarnApplicationState) {
+    ApplicationHistoryData appHD = new ApplicationHistoryData();
+    appHD.setApplicationId(applicationId);
+    appHD.setApplicationName(applicationName);
+    appHD.setApplicationType(applicationType);
+    appHD.setQueue(queue);
+    appHD.setUser(user);
+    appHD.setSubmitTime(submitTime);
+    appHD.setStartTime(startTime);
+    appHD.setFinishTime(finishTime);
+    appHD.setDiagnosticsInfo(diagnosticsInfo);
+    appHD.setFinalApplicationStatus(finalApplicationStatus);
+    appHD.setYarnApplicationState(yarnApplicationState);
+    return appHD;
+  }
+
+  @Public
+  @Unstable
+  public ApplicationId getApplicationId() {
+    return applicationId;
+  }
+
+  @Public
+  @Unstable
+  public void setApplicationId(ApplicationId applicationId) {
+    this.applicationId = applicationId;
+  }
+
+  @Public
+  @Unstable
+  public String getApplicationName() {
+    return applicationName;
+  }
+
+  @Public
+  @Unstable
+  public void setApplicationName(String applicationName) {
+    this.applicationName = applicationName;
+  }
+
+  @Public
+  @Unstable
+  public String getApplicationType() {
+    return applicationType;
+  }
+
+  @Public
+  @Unstable
+  public void setApplicationType(String applicationType) {
+    this.applicationType = applicationType;
+  }
+
+  @Public
+  @Unstable
+  public String getUser() {
+    return user;
+  }
+
+  @Public
+  @Unstable
+  public void setUser(String user) {
+    this.user = user;
+  }
+
+  @Public
+  @Unstable
+  public String getQueue() {
+    return queue;
+  }
+
+  @Public
+  @Unstable
+  public void setQueue(String queue) {
+    this.queue = queue;
+  }
+
+  @Public
+  @Unstable
+  public long getSubmitTime() {
+    return submitTime;
+  }
+
+  @Public
+  @Unstable
+  public void setSubmitTime(long submitTime) {
+    this.submitTime = submitTime;
+  }
+
+  @Public
+  @Unstable
+  public long getStartTime() {
+    return startTime;
+  }
+
+  @Public
+  @Unstable
+  public void setStartTime(long startTime) {
+    this.startTime = startTime;
+  }
+
+  @Public
+  @Unstable
+  public long getFinishTime() {
+    return finishTime;
+  }
+
+  @Public
+  @Unstable
+  public void setFinishTime(long finishTime) {
+    this.finishTime = finishTime;
+  }
+
+  @Public
+  @Unstable
+  public String getDiagnosticsInfo() {
+    return diagnosticsInfo;
+  }
+
+  @Public
+  @Unstable
+  public void setDiagnosticsInfo(String diagnosticsInfo) {
+    this.diagnosticsInfo = diagnosticsInfo;
+  }
+
+  @Public
+  @Unstable
+  public FinalApplicationStatus getFinalApplicationStatus() {
+    return finalApplicationStatus;
+  }
+
+  @Public
+  @Unstable
+  public void setFinalApplicationStatus(
+      FinalApplicationStatus finalApplicationStatus) {
+    this.finalApplicationStatus = finalApplicationStatus;
+  }
+
+  @Public
+  @Unstable
+  public YarnApplicationState getYarnApplicationState() {
+    return this.yarnApplicationState;
+  }
+
+  @Public
+  @Unstable
+  public void
+      setYarnApplicationState(YarnApplicationState yarnApplicationState) {
+    this.yarnApplicationState = yarnApplicationState;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/a52f8a55/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationStartData.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationStartData.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationStartData.java
new file mode 100644
index 0000000..6bc1323
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationStartData.java
@@ -0,0 +1,106 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.records;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ * The class contains the fields that can be determined when <code>RMApp</code>
+ * starts, and that need to be stored persistently.
+ */
+@Public
+@Unstable
+public abstract class ApplicationStartData {
+
+  @Public
+  @Unstable
+  public static ApplicationStartData newInstance(ApplicationId applicationId,
+      String applicationName, String applicationType, String queue,
+      String user, long submitTime, long startTime) {
+    ApplicationStartData appSD = Records.newRecord(ApplicationStartData.class);
+    appSD.setApplicationId(applicationId);
+    appSD.setApplicationName(applicationName);
+    appSD.setApplicationType(applicationType);
+    appSD.setQueue(queue);
+    appSD.setUser(user);
+    appSD.setSubmitTime(submitTime);
+    appSD.setStartTime(startTime);
+    return appSD;
+  }
+
+  @Public
+  @Unstable
+  public abstract ApplicationId getApplicationId();
+
+  @Public
+  @Unstable
+  public abstract void setApplicationId(ApplicationId applicationId);
+
+  @Public
+  @Unstable
+  public abstract String getApplicationName();
+
+  @Public
+  @Unstable
+  public abstract void setApplicationName(String applicationName);
+
+  @Public
+  @Unstable
+  public abstract String getApplicationType();
+
+  @Public
+  @Unstable
+  public abstract void setApplicationType(String applicationType);
+
+  @Public
+  @Unstable
+  public abstract String getUser();
+
+  @Public
+  @Unstable
+  public abstract void setUser(String user);
+
+  @Public
+  @Unstable
+  public abstract String getQueue();
+
+  @Public
+  @Unstable
+  public abstract void setQueue(String queue);
+
+  @Public
+  @Unstable
+  public abstract long getSubmitTime();
+
+  @Public
+  @Unstable
+  public abstract void setSubmitTime(long submitTime);
+
+  @Public
+  @Unstable
+  public abstract long getStartTime();
+
+  @Public
+  @Unstable
+  public abstract void setStartTime(long startTime);
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/a52f8a55/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ContainerFinishData.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ContainerFinishData.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ContainerFinishData.java
new file mode 100644
index 0000000..5eb9ddb
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ContainerFinishData.java
@@ -0,0 +1,90 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.records;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ * The class contains the fields that can be determined when
+ * <code>RMContainer</code> finishes, and that need to be stored persistently.
+ */
+@Public
+@Unstable
+public abstract class ContainerFinishData {
+
+  @Public
+  @Unstable
+  public static ContainerFinishData newInstance(ContainerId containerId,
+      long finishTime, String diagnosticsInfo, int containerExitCode,
+      ContainerState containerState) {
+    ContainerFinishData containerFD =
+        Records.newRecord(ContainerFinishData.class);
+    containerFD.setContainerId(containerId);
+    containerFD.setFinishTime(finishTime);
+    containerFD.setDiagnosticsInfo(diagnosticsInfo);
+    containerFD.setContainerExitStatus(containerExitCode);
+    containerFD.setContainerState(containerState);
+    return containerFD;
+  }
+
+  @Public
+  @Unstable
+  public abstract ContainerId getContainerId();
+
+  @Public
+  @Unstable
+  public abstract void setContainerId(ContainerId containerId);
+
+  @Public
+  @Unstable
+  public abstract long getFinishTime();
+
+  @Public
+  @Unstable
+  public abstract void setFinishTime(long finishTime);
+
+  @Public
+  @Unstable
+  public abstract String getDiagnosticsInfo();
+
+  @Public
+  @Unstable
+  public abstract void setDiagnosticsInfo(String diagnosticsInfo);
+
+  @Public
+  @Unstable
+  public abstract int getContainerExitStatus();
+
+  @Public
+  @Unstable
+  public abstract void setContainerExitStatus(int containerExitStatus);
+
+  @Public
+  @Unstable
+  public abstract ContainerState getContainerState();
+
+  @Public
+  @Unstable
+  public abstract void setContainerState(ContainerState containerState);
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/a52f8a55/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ContainerHistoryData.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ContainerHistoryData.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ContainerHistoryData.java
new file mode 100644
index 0000000..e606185
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ContainerHistoryData.java
@@ -0,0 +1,182 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.records;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+
+/**
+ * The class contains all the fields that are stored persistently for
+ * <code>RMContainer</code>.
+ */
+@Public
+@Unstable
+public class ContainerHistoryData {
+
+  private ContainerId containerId;
+
+  private Resource allocatedResource;
+
+  private NodeId assignedNode;
+
+  private Priority priority;
+
+  private long startTime;
+
+  private long finishTime;
+
+  private String diagnosticsInfo;
+
+  private int containerExitStatus;
+
+  private ContainerState containerState;
+
+  @Public
+  @Unstable
+  public static ContainerHistoryData newInstance(ContainerId containerId,
+      Resource allocatedResource, NodeId assignedNode, Priority priority,
+      long startTime, long finishTime, String diagnosticsInfo,
+      int containerExitCode, ContainerState containerState) {
+    ContainerHistoryData containerHD = new ContainerHistoryData();
+    containerHD.setContainerId(containerId);
+    containerHD.setAllocatedResource(allocatedResource);
+    containerHD.setAssignedNode(assignedNode);
+    containerHD.setPriority(priority);
+    containerHD.setStartTime(startTime);
+    containerHD.setFinishTime(finishTime);
+    containerHD.setDiagnosticsInfo(diagnosticsInfo);
+    containerHD.setContainerExitStatus(containerExitCode);
+    containerHD.setContainerState(containerState);
+    return containerHD;
+  }
+
+  @Public
+  @Unstable
+  public ContainerId getContainerId() {
+    return containerId;
+  }
+
+  @Public
+  @Unstable
+  public void setContainerId(ContainerId containerId) {
+    this.containerId = containerId;
+  }
+
+  @Public
+  @Unstable
+  public Resource getAllocatedResource() {
+    return allocatedResource;
+  }
+
+  @Public
+  @Unstable
+  public void setAllocatedResource(Resource resource) {
+    this.allocatedResource = resource;
+  }
+
+  @Public
+  @Unstable
+  public NodeId getAssignedNode() {
+    return assignedNode;
+  }
+
+  @Public
+  @Unstable
+  public void setAssignedNode(NodeId nodeId) {
+    this.assignedNode = nodeId;
+  }
+
+  @Public
+  @Unstable
+  public Priority getPriority() {
+    return priority;
+  }
+
+  @Public
+  @Unstable
+  public void setPriority(Priority priority) {
+    this.priority = priority;
+  }
+
+  @Public
+  @Unstable
+  public long getStartTime() {
+    return startTime;
+  }
+
+  @Public
+  @Unstable
+  public void setStartTime(long startTime) {
+    this.startTime = startTime;
+  }
+
+  @Public
+  @Unstable
+  public long getFinishTime() {
+    return finishTime;
+  }
+
+  @Public
+  @Unstable
+  public void setFinishTime(long finishTime) {
+    this.finishTime = finishTime;
+  }
+
+  @Public
+  @Unstable
+  public String getDiagnosticsInfo() {
+    return diagnosticsInfo;
+  }
+
+  @Public
+  @Unstable
+  public void setDiagnosticsInfo(String diagnosticsInfo) {
+    this.diagnosticsInfo = diagnosticsInfo;
+  }
+
+  @Public
+  @Unstable
+  public int getContainerExitStatus() {
+    return containerExitStatus;
+  }
+
+  @Public
+  @Unstable
+  public void setContainerExitStatus(int containerExitStatus) {
+    this.containerExitStatus = containerExitStatus;
+  }
+
+  @Public
+  @Unstable
+  public ContainerState getContainerState() {
+    return containerState;
+  }
+
+  @Public
+  @Unstable
+  public void setContainerState(ContainerState containerState) {
+    this.containerState = containerState;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/a52f8a55/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ContainerStartData.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ContainerStartData.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ContainerStartData.java
new file mode 100644
index 0000000..0c6dd81
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ContainerStartData.java
@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.records;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ * The class contains the fields that can be determined when
+ * <code>RMContainer</code> starts, and that need to be stored persistently.
+ */
+@Public
+@Unstable
+public abstract class ContainerStartData {
+
+  @Public
+  @Unstable
+  public static ContainerStartData newInstance(ContainerId containerId,
+      Resource allocatedResource, NodeId assignedNode, Priority priority,
+      long startTime) {
+    ContainerStartData containerSD =
+        Records.newRecord(ContainerStartData.class);
+    containerSD.setContainerId(containerId);
+    containerSD.setAllocatedResource(allocatedResource);
+    containerSD.setAssignedNode(assignedNode);
+    containerSD.setPriority(priority);
+    containerSD.setStartTime(startTime);
+    return containerSD;
+  }
+
+  @Public
+  @Unstable
+  public abstract ContainerId getContainerId();
+
+  @Public
+  @Unstable
+  public abstract void setContainerId(ContainerId containerId);
+
+  @Public
+  @Unstable
+  public abstract Resource getAllocatedResource();
+
+  @Public
+  @Unstable
+  public abstract void setAllocatedResource(Resource resource);
+
+  @Public
+  @Unstable
+  public abstract NodeId getAssignedNode();
+
+  @Public
+  @Unstable
+  public abstract void setAssignedNode(NodeId nodeId);
+
+  @Public
+  @Unstable
+  public abstract Priority getPriority();
+
+  @Public
+  @Unstable
+  public abstract void setPriority(Priority priority);
+
+  @Public
+  @Unstable
+  public abstract long getStartTime();
+
+  @Public
+  @Unstable
+  public abstract void setStartTime(long startTime);
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/a52f8a55/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationAttemptFinishDataPBImpl.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationAttemptFinishDataPBImpl.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationAttemptFinishDataPBImpl.java
new file mode 100644
index 0000000..945c12f
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationAttemptFinishDataPBImpl.java
@@ -0,0 +1,239 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
+import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptIdPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils;
+import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationAttemptFinishDataProto;
+import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationAttemptFinishDataProtoOrBuilder;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptIdProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.FinalApplicationStatusProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.YarnApplicationAttemptStateProto;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptFinishData;
+
+import com.google.protobuf.TextFormat;
+
+public class ApplicationAttemptFinishDataPBImpl extends
+    ApplicationAttemptFinishData {
+
+  ApplicationAttemptFinishDataProto proto = ApplicationAttemptFinishDataProto
+    .getDefaultInstance();
+  ApplicationAttemptFinishDataProto.Builder builder = null;
+  boolean viaProto = false;
+
+  public ApplicationAttemptFinishDataPBImpl() {
+    builder = ApplicationAttemptFinishDataProto.newBuilder();
+  }
+
+  public ApplicationAttemptFinishDataPBImpl(
+      ApplicationAttemptFinishDataProto proto) {
+    this.proto = proto;
+    viaProto = true;
+  }
+
+  private ApplicationAttemptId applicationAttemptId;
+
+  @Override
+  public ApplicationAttemptId getApplicationAttemptId() {
+    if (this.applicationAttemptId != null) {
+      return this.applicationAttemptId;
+    }
+    ApplicationAttemptFinishDataProtoOrBuilder p = viaProto ? proto : builder;
+    if (!p.hasApplicationAttemptId()) {
+      return null;
+    }
+    this.applicationAttemptId =
+        convertFromProtoFormat(p.getApplicationAttemptId());
+    return this.applicationAttemptId;
+  }
+
+  @Override
+  public void
+      setApplicationAttemptId(ApplicationAttemptId applicationAttemptId) {
+    maybeInitBuilder();
+    if (applicationAttemptId == null) {
+      builder.clearApplicationAttemptId();
+    }
+    this.applicationAttemptId = applicationAttemptId;
+  }
+
+  @Override
+  public String getTrackingURL() {
+    ApplicationAttemptFinishDataProtoOrBuilder p = viaProto ? proto : builder;
+    if (!p.hasTrackingUrl()) {
+      return null;
+    }
+    return p.getTrackingUrl();
+  }
+
+  @Override
+  public void setTrackingURL(String trackingURL) {
+    maybeInitBuilder();
+    if (trackingURL == null) {
+      builder.clearTrackingUrl();
+      return;
+    }
+    builder.setTrackingUrl(trackingURL);
+  }
+
+  @Override
+  public String getDiagnosticsInfo() {
+    ApplicationAttemptFinishDataProtoOrBuilder p = viaProto ? proto : builder;
+    if (!p.hasDiagnosticsInfo()) {
+      return null;
+    }
+    return p.getDiagnosticsInfo();
+  }
+
+  @Override
+  public void setDiagnosticsInfo(String diagnosticsInfo) {
+    maybeInitBuilder();
+    if (diagnosticsInfo == null) {
+      builder.clearDiagnosticsInfo();
+      return;
+    }
+    builder.setDiagnosticsInfo(diagnosticsInfo);
+  }
+
+  @Override
+  public FinalApplicationStatus getFinalApplicationStatus() {
+    ApplicationAttemptFinishDataProtoOrBuilder p = viaProto ? proto : builder;
+    if (!p.hasFinalApplicationStatus()) {
+      return null;
+    }
+    return convertFromProtoFormat(p.getFinalApplicationStatus());
+  }
+
+  @Override
+  public void setFinalApplicationStatus(
+      FinalApplicationStatus finalApplicationStatus) {
+    maybeInitBuilder();
+    if (finalApplicationStatus == null) {
+      builder.clearFinalApplicationStatus();
+      return;
+    }
+    builder
+      .setFinalApplicationStatus(convertToProtoFormat(finalApplicationStatus));
+  }
+
+  @Override
+  public YarnApplicationAttemptState getYarnApplicationAttemptState() {
+    ApplicationAttemptFinishDataProtoOrBuilder p = viaProto ? proto : builder;
+    if (!p.hasYarnApplicationAttemptState()) {
+      return null;
+    }
+    return convertFromProtoFormat(p.getYarnApplicationAttemptState());
+  }
+
+  @Override
+  public void setYarnApplicationAttemptState(YarnApplicationAttemptState state) {
+    maybeInitBuilder();
+    if (state == null) {
+      builder.clearYarnApplicationAttemptState();
+      return;
+    }
+    builder.setYarnApplicationAttemptState(convertToProtoFormat(state));
+  }
+
+  public ApplicationAttemptFinishDataProto getProto() {
+    mergeLocalToProto();
+    proto = viaProto ? proto : builder.build();
+    viaProto = true;
+    return proto;
+  }
+
+  @Override
+  public int hashCode() {
+    return getProto().hashCode();
+  }
+
+  @Override
+  public boolean equals(Object other) {
+    if (other == null)
+      return false;
+    if (other.getClass().isAssignableFrom(this.getClass())) {
+      return this.getProto().equals(this.getClass().cast(other).getProto());
+    }
+    return false;
+  }
+
+  @Override
+  public String toString() {
+    return TextFormat.shortDebugString(getProto());
+  }
+
+  private void mergeLocalToBuilder() {
+    if (this.applicationAttemptId != null
+        && !((ApplicationAttemptIdPBImpl) this.applicationAttemptId).getProto()
+          .equals(builder.getApplicationAttemptId())) {
+      builder
+        .setApplicationAttemptId(convertToProtoFormat(this.applicationAttemptId));
+    }
+  }
+
+  private void mergeLocalToProto() {
+    if (viaProto) {
+      maybeInitBuilder();
+    }
+    mergeLocalToBuilder();
+    proto = builder.build();
+    viaProto = true;
+  }
+
+  private void maybeInitBuilder() {
+    if (viaProto || builder == null) {
+      builder = ApplicationAttemptFinishDataProto.newBuilder(proto);
+    }
+    viaProto = false;
+  }
+
+  private ApplicationAttemptIdPBImpl convertFromProtoFormat(
+      ApplicationAttemptIdProto applicationAttemptId) {
+    return new ApplicationAttemptIdPBImpl(applicationAttemptId);
+  }
+
+  private ApplicationAttemptIdProto convertToProtoFormat(
+      ApplicationAttemptId applicationAttemptId) {
+    return ((ApplicationAttemptIdPBImpl) applicationAttemptId).getProto();
+  }
+
+  private FinalApplicationStatus convertFromProtoFormat(
+      FinalApplicationStatusProto finalApplicationStatus) {
+    return ProtoUtils.convertFromProtoFormat(finalApplicationStatus);
+  }
+
+  private FinalApplicationStatusProto convertToProtoFormat(
+      FinalApplicationStatus finalApplicationStatus) {
+    return ProtoUtils.convertToProtoFormat(finalApplicationStatus);
+  }
+
+  private YarnApplicationAttemptStateProto convertToProtoFormat(
+      YarnApplicationAttemptState state) {
+    return ProtoUtils.convertToProtoFormat(state);
+  }
+
+  private YarnApplicationAttemptState convertFromProtoFormat(
+      YarnApplicationAttemptStateProto yarnApplicationAttemptState) {
+    return ProtoUtils.convertFromProtoFormat(yarnApplicationAttemptState);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/a52f8a55/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationAttemptStartDataPBImpl.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationAttemptStartDataPBImpl.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationAttemptStartDataPBImpl.java
new file mode 100644
index 0000000..1f67fc7
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationAttemptStartDataPBImpl.java
@@ -0,0 +1,208 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptIdPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl;
+import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationAttemptStartDataProto;
+import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationAttemptStartDataProtoOrBuilder;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptIdProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptStartData;
+
+import com.google.protobuf.TextFormat;
+
+public class ApplicationAttemptStartDataPBImpl extends
+    ApplicationAttemptStartData {
+
+  ApplicationAttemptStartDataProto proto = ApplicationAttemptStartDataProto
+    .getDefaultInstance();
+  ApplicationAttemptStartDataProto.Builder builder = null;
+  boolean viaProto = false;
+
+  public ApplicationAttemptStartDataPBImpl() {
+    builder = ApplicationAttemptStartDataProto.newBuilder();
+  }
+
+  public ApplicationAttemptStartDataPBImpl(
+      ApplicationAttemptStartDataProto proto) {
+    this.proto = proto;
+    viaProto = true;
+  }
+
+  private ApplicationAttemptId applicationAttemptId;
+  private ContainerId masterContainerId;
+
+  @Override
+  public ApplicationAttemptId getApplicationAttemptId() {
+    if (this.applicationAttemptId != null) {
+      return this.applicationAttemptId;
+    }
+    ApplicationAttemptStartDataProtoOrBuilder p = viaProto ? proto : builder;
+    if (!p.hasApplicationAttemptId()) {
+      return null;
+    }
+    this.applicationAttemptId =
+        convertFromProtoFormat(p.getApplicationAttemptId());
+    return this.applicationAttemptId;
+  }
+
+  @Override
+  public void
+      setApplicationAttemptId(ApplicationAttemptId applicationAttemptId) {
+    maybeInitBuilder();
+    if (applicationAttemptId == null) {
+      builder.clearApplicationAttemptId();
+    }
+    this.applicationAttemptId = applicationAttemptId;
+  }
+
+  @Override
+  public String getHost() {
+    ApplicationAttemptStartDataProtoOrBuilder p = viaProto ? proto : builder;
+    if (!p.hasHost()) {
+      return null;
+    }
+    return p.getHost();
+  }
+
+  @Override
+  public void setHost(String host) {
+    maybeInitBuilder();
+    if (host == null) {
+      builder.clearHost();
+      return;
+    }
+    builder.setHost(host);
+  }
+
+  @Override
+  public int getRPCPort() {
+    ApplicationAttemptStartDataProtoOrBuilder p = viaProto ? proto : builder;
+    return p.getRpcPort();
+  }
+
+  @Override
+  public void setRPCPort(int rpcPort) {
+    maybeInitBuilder();
+    builder.setRpcPort(rpcPort);
+  }
+
+  @Override
+  public ContainerId getMasterContainerId() {
+    if (this.masterContainerId != null) {
+      return this.masterContainerId;
+    }
+    ApplicationAttemptStartDataProtoOrBuilder p = viaProto ? proto : builder;
+    if (!p.hasApplicationAttemptId()) {
+      return null;
+    }
+    this.masterContainerId = convertFromProtoFormat(p.getMasterContainerId());
+    return this.masterContainerId;
+  }
+
+  @Override
+  public void setMasterContainerId(ContainerId masterContainerId) {
+    maybeInitBuilder();
+    if (masterContainerId == null) {
+      builder.clearMasterContainerId();
+    }
+    this.masterContainerId = masterContainerId;
+  }
+
+  public ApplicationAttemptStartDataProto getProto() {
+    mergeLocalToProto();
+    proto = viaProto ? proto : builder.build();
+    viaProto = true;
+    return proto;
+  }
+
+  @Override
+  public int hashCode() {
+    return getProto().hashCode();
+  }
+
+  @Override
+  public boolean equals(Object other) {
+    if (other == null)
+      return false;
+    if (other.getClass().isAssignableFrom(this.getClass())) {
+      return this.getProto().equals(this.getClass().cast(other).getProto());
+    }
+    return false;
+  }
+
+  @Override
+  public String toString() {
+    return TextFormat.shortDebugString(getProto());
+  }
+
+  private void mergeLocalToBuilder() {
+    if (this.applicationAttemptId != null
+        && !((ApplicationAttemptIdPBImpl) this.applicationAttemptId).getProto()
+          .equals(builder.getApplicationAttemptId())) {
+      builder
+        .setApplicationAttemptId(convertToProtoFormat(this.applicationAttemptId));
+    }
+    if (this.masterContainerId != null
+        && !((ContainerIdPBImpl) this.masterContainerId).getProto().equals(
+          builder.getMasterContainerId())) {
+      builder
+        .setMasterContainerId(convertToProtoFormat(this.masterContainerId));
+    }
+  }
+
+  private void mergeLocalToProto() {
+    if (viaProto) {
+      maybeInitBuilder();
+    }
+    mergeLocalToBuilder();
+    proto = builder.build();
+    viaProto = true;
+  }
+
+  private void maybeInitBuilder() {
+    if (viaProto || builder == null) {
+      builder = ApplicationAttemptStartDataProto.newBuilder(proto);
+    }
+    viaProto = false;
+  }
+
+  private ApplicationAttemptIdPBImpl convertFromProtoFormat(
+      ApplicationAttemptIdProto applicationAttemptId) {
+    return new ApplicationAttemptIdPBImpl(applicationAttemptId);
+  }
+
+  private ApplicationAttemptIdProto convertToProtoFormat(
+      ApplicationAttemptId applicationAttemptId) {
+    return ((ApplicationAttemptIdPBImpl) applicationAttemptId).getProto();
+  }
+
+  private ContainerIdPBImpl
+      convertFromProtoFormat(ContainerIdProto containerId) {
+    return new ContainerIdPBImpl(containerId);
+  }
+
+  private ContainerIdProto convertToProtoFormat(ContainerId masterContainerId) {
+    return ((ContainerIdPBImpl) masterContainerId).getProto();
+  }
+
+}