You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by sw...@apache.org on 2014/12/01 21:03:40 UTC

[12/22] ambari git commit: AMBARI-5707. Renaming a module. (swagle)

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba3d6926/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/resources/metrics_def/NODEMANAGER.dat
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/resources/metrics_def/NODEMANAGER.dat b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/resources/metrics_def/NODEMANAGER.dat
deleted file mode 100644
index 239b3d4..0000000
--- a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/resources/metrics_def/NODEMANAGER.dat
+++ /dev/null
@@ -1,67 +0,0 @@
-jvm.JvmMetrics.GcCount
-jvm.JvmMetrics.GcCountCopy
-jvm.JvmMetrics.GcCountMarkSweepCompact
-jvm.JvmMetrics.GcTimeMillis
-jvm.JvmMetrics.GcTimeMillisCopy
-jvm.JvmMetrics.GcTimeMillisMarkSweepCompact
-jvm.JvmMetrics.LogError
-jvm.JvmMetrics.LogFatal
-jvm.JvmMetrics.LogInfo
-jvm.JvmMetrics.LogWarn
-jvm.JvmMetrics.MemHeapCommittedM
-jvm.JvmMetrics.MemHeapMaxM
-jvm.JvmMetrics.MemHeapUsedM
-jvm.JvmMetrics.MemMaxM
-jvm.JvmMetrics.MemNonHeapCommittedM
-jvm.JvmMetrics.MemNonHeapMaxM
-jvm.JvmMetrics.MemNonHeapUsedM
-jvm.JvmMetrics.ThreadsBlocked
-jvm.JvmMetrics.ThreadsNew
-jvm.JvmMetrics.ThreadsRunnable
-jvm.JvmMetrics.ThreadsTerminated
-jvm.JvmMetrics.ThreadsTimedWaiting
-jvm.JvmMetrics.ThreadsWaiting
-mapred.ShuffleMetrics.ShuffleConnections
-mapred.ShuffleMetrics.ShuffleOutputBytes
-mapred.ShuffleMetrics.ShuffleOutputsFailed
-mapred.ShuffleMetrics.ShuffleOutputsOK
-metricssystem.MetricsSystem.DroppedPubAll
-metricssystem.MetricsSystem.NumActiveSinks
-metricssystem.MetricsSystem.NumActiveSources
-metricssystem.MetricsSystem.NumAllSinks
-metricssystem.MetricsSystem.NumAllSources
-metricssystem.MetricsSystem.PublishAvgTime
-metricssystem.MetricsSystem.PublishNumOps
-metricssystem.MetricsSystem.Sink_timelineAvgTime
-metricssystem.MetricsSystem.Sink_timelineDropped
-metricssystem.MetricsSystem.Sink_timelineNumOps
-metricssystem.MetricsSystem.Sink_timelineQsize
-metricssystem.MetricsSystem.SnapshotAvgTime
-metricssystem.MetricsSystem.SnapshotNumOps
-rpc.rpc.CallQueueLength
-rpc.rpc.NumOpenConnections
-rpc.rpc.ReceivedBytes
-rpc.rpc.RpcAuthenticationFailures
-rpc.rpc.RpcAuthenticationSuccesses
-rpc.rpc.RpcAuthorizationFailures
-rpc.rpc.RpcAuthorizationSuccesses
-rpc.rpc.RpcProcessingTimeAvgTime
-rpc.rpc.RpcProcessingTimeNumOps
-rpc.rpc.RpcQueueTimeAvgTime
-rpc.rpc.RpcQueueTimeNumOps
-rpc.rpc.SentBytes
-ugi.UgiMetrics.GetGroupsAvgTime
-ugi.UgiMetrics.GetGroupsNumOps
-ugi.UgiMetrics.LoginFailureAvgTime
-ugi.UgiMetrics.LoginFailureNumOps
-ugi.UgiMetrics.LoginSuccessAvgTime
-ugi.UgiMetrics.LoginSuccessNumOps
-yarn.NodeManagerMetrics.AllocatedContainers
-yarn.NodeManagerMetrics.AllocatedGB
-yarn.NodeManagerMetrics.AvailableGB
-yarn.NodeManagerMetrics.ContainersCompleted
-yarn.NodeManagerMetrics.ContainersFailed
-yarn.NodeManagerMetrics.ContainersIniting
-yarn.NodeManagerMetrics.ContainersKilled
-yarn.NodeManagerMetrics.ContainersLaunched
-yarn.NodeManagerMetrics.ContainersRunning

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba3d6926/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/resources/metrics_def/RESOURCEMANAGER.dat
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/resources/metrics_def/RESOURCEMANAGER.dat b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/resources/metrics_def/RESOURCEMANAGER.dat
deleted file mode 100644
index ec698db..0000000
--- a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/resources/metrics_def/RESOURCEMANAGER.dat
+++ /dev/null
@@ -1,88 +0,0 @@
-jvm.JvmMetrics.GcCount 
-jvm.JvmMetrics.GcCountPS MarkSweep 
-jvm.JvmMetrics.GcCountPS Scavenge 
-jvm.JvmMetrics.GcTimeMillis 
-jvm.JvmMetrics.GcTimeMillisPS MarkSweep 
-jvm.JvmMetrics.GcTimeMillisPS Scavenge 
-jvm.JvmMetrics.LogError 
-jvm.JvmMetrics.LogFatal 
-jvm.JvmMetrics.LogInfo 
-jvm.JvmMetrics.LogWarn 
-jvm.JvmMetrics.MemHeapCommittedM 
-jvm.JvmMetrics.MemHeapMaxM 
-jvm.JvmMetrics.MemHeapUsedM 
-jvm.JvmMetrics.MemMaxM 
-jvm.JvmMetrics.MemNonHeapCommittedM 
-jvm.JvmMetrics.MemNonHeapMaxM 
-jvm.JvmMetrics.MemNonHeapUsedM 
-jvm.JvmMetrics.ThreadsBlocked 
-jvm.JvmMetrics.ThreadsNew 
-jvm.JvmMetrics.ThreadsRunnable 
-jvm.JvmMetrics.ThreadsTerminated 
-jvm.JvmMetrics.ThreadsTimedWaiting 
-jvm.JvmMetrics.ThreadsWaiting 
-metricssystem.MetricsSystem.DroppedPubAll 
-metricssystem.MetricsSystem.NumActiveSinks 
-metricssystem.MetricsSystem.NumActiveSources 
-metricssystem.MetricsSystem.NumAllSinks 
-metricssystem.MetricsSystem.NumAllSources 
-metricssystem.MetricsSystem.PublishAvgTime 
-metricssystem.MetricsSystem.PublishNumOps 
-metricssystem.MetricsSystem.Sink_timelineAvgTime 
-metricssystem.MetricsSystem.Sink_timelineDropped 
-metricssystem.MetricsSystem.Sink_timelineNumOps 
-metricssystem.MetricsSystem.Sink_timelineQsize 
-metricssystem.MetricsSystem.SnapshotAvgTime 
-metricssystem.MetricsSystem.SnapshotNumOps 
-rpc.rpc.CallQueueLength 
-rpc.rpc.NumOpenConnections 
-rpc.rpc.ReceivedBytes 
-rpc.rpc.RpcAuthenticationFailures 
-rpc.rpc.RpcAuthenticationSuccesses 
-rpc.rpc.RpcAuthorizationFailures 
-rpc.rpc.RpcAuthorizationSuccesses 
-rpc.rpc.RpcProcessingTimeAvgTime 
-rpc.rpc.RpcProcessingTimeNumOps 
-rpc.rpc.RpcQueueTimeAvgTime 
-rpc.rpc.RpcQueueTimeNumOps 
-rpc.rpc.SentBytes 
-rpcdetailed.rpcdetailed.NodeHeartbeatAvgTime 
-rpcdetailed.rpcdetailed.NodeHeartbeatNumOps 
-rpcdetailed.rpcdetailed.RegisterNodeManagerAvgTime 
-rpcdetailed.rpcdetailed.RegisterNodeManagerNumOps 
-ugi.UgiMetrics.GetGroupsAvgTime 
-ugi.UgiMetrics.GetGroupsNumOps 
-ugi.UgiMetrics.LoginFailureAvgTime 
-ugi.UgiMetrics.LoginFailureNumOps 
-ugi.UgiMetrics.LoginSuccessAvgTime 
-ugi.UgiMetrics.LoginSuccessNumOps 
-yarn.ClusterMetrics.NumActiveNMs 
-yarn.ClusterMetrics.NumDecommissionedNMs 
-yarn.ClusterMetrics.NumLostNMs 
-yarn.ClusterMetrics.NumRebootedNMs 
-yarn.ClusterMetrics.NumUnhealthyNMs 
-yarn.QueueMetrics.ActiveApplications 
-yarn.QueueMetrics.ActiveUsers 
-yarn.QueueMetrics.AggregateContainersAllocated 
-yarn.QueueMetrics.AggregateContainersReleased 
-yarn.QueueMetrics.AllocatedContainers 
-yarn.QueueMetrics.AllocatedMB 
-yarn.QueueMetrics.AllocatedVCores 
-yarn.QueueMetrics.AppsCompleted 
-yarn.QueueMetrics.AppsFailed 
-yarn.QueueMetrics.AppsKilled 
-yarn.QueueMetrics.AppsPending 
-yarn.QueueMetrics.AppsRunning 
-yarn.QueueMetrics.AppsSubmitted 
-yarn.QueueMetrics.AvailableMB 
-yarn.QueueMetrics.AvailableVCores 
-yarn.QueueMetrics.PendingContainers 
-yarn.QueueMetrics.PendingMB 
-yarn.QueueMetrics.PendingVCores 
-yarn.QueueMetrics.ReservedContainers 
-yarn.QueueMetrics.ReservedMB 
-yarn.QueueMetrics.ReservedVCores 
-yarn.QueueMetrics.running_0 
-yarn.QueueMetrics.running_1440 
-yarn.QueueMetrics.running_300 
-yarn.QueueMetrics.running_60

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba3d6926/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/resources/metrics_def/SLAVE_HBASE.dat
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/resources/metrics_def/SLAVE_HBASE.dat b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/resources/metrics_def/SLAVE_HBASE.dat
deleted file mode 100644
index 38b870f..0000000
--- a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/resources/metrics_def/SLAVE_HBASE.dat
+++ /dev/null
@@ -1,178 +0,0 @@
-ipc.IPC.authenticationFailures 
-ipc.IPC.authenticationSuccesses 
-ipc.IPC.authorizationFailures 
-ipc.IPC.authorizationSuccesses 
-ipc.IPC.numCallsInGeneralQueue 
-ipc.IPC.numCallsInPriorityQueue 
-ipc.IPC.numCallsInReplicationQueue 
-ipc.IPC.numOpenConnections 
-ipc.IPC.ProcessCallTime_75th_percentile 
-ipc.IPC.ProcessCallTime_95th_percentile 
-ipc.IPC.ProcessCallTime_99th_percentile 
-ipc.IPC.ProcessCallTime_max 
-ipc.IPC.ProcessCallTime_mean 
-ipc.IPC.ProcessCallTime_median 
-ipc.IPC.ProcessCallTime_min 
-ipc.IPC.ProcessCallTime_num_ops 
-ipc.IPC.QueueCallTime_75th_percentile 
-ipc.IPC.QueueCallTime_95th_percentile 
-ipc.IPC.QueueCallTime_99th_percentile 
-ipc.IPC.QueueCallTime_max 
-ipc.IPC.QueueCallTime_mean 
-ipc.IPC.QueueCallTime_median 
-ipc.IPC.QueueCallTime_min 
-ipc.IPC.QueueCallTime_num_ops 
-ipc.IPC.queueSize 
-ipc.IPC.receivedBytes 
-ipc.IPC.sentBytes 
-jvm.JvmMetrics.GcCount 
-jvm.JvmMetrics.GcCountConcurrentMarkSweep 
-jvm.JvmMetrics.GcCountCopy 
-jvm.JvmMetrics.GcTimeMillis 
-jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep 
-jvm.JvmMetrics.GcTimeMillisCopy 
-jvm.JvmMetrics.LogError 
-jvm.JvmMetrics.LogFatal 
-jvm.JvmMetrics.LogInfo 
-jvm.JvmMetrics.LogWarn 
-jvm.JvmMetrics.MemHeapCommittedM 
-jvm.JvmMetrics.MemHeapMaxM 
-jvm.JvmMetrics.MemHeapUsedM 
-jvm.JvmMetrics.MemMaxM 
-jvm.JvmMetrics.MemNonHeapCommittedM 
-jvm.JvmMetrics.MemNonHeapMaxM 
-jvm.JvmMetrics.MemNonHeapUsedM 
-jvm.JvmMetrics.ThreadsBlocked 
-jvm.JvmMetrics.ThreadsNew 
-jvm.JvmMetrics.ThreadsRunnable 
-jvm.JvmMetrics.ThreadsTerminated 
-jvm.JvmMetrics.ThreadsTimedWaiting 
-jvm.JvmMetrics.ThreadsWaiting 
-metricssystem.MetricsSystem.DroppedPubAll 
-metricssystem.MetricsSystem.NumActiveSinks 
-metricssystem.MetricsSystem.NumActiveSources 
-metricssystem.MetricsSystem.NumAllSinks 
-metricssystem.MetricsSystem.NumAllSources 
-metricssystem.MetricsSystem.PublishAvgTime 
-metricssystem.MetricsSystem.PublishNumOps 
-metricssystem.MetricsSystem.Sink_timelineAvgTime 
-metricssystem.MetricsSystem.Sink_timelineDropped 
-metricssystem.MetricsSystem.Sink_timelineNumOps 
-metricssystem.MetricsSystem.Sink_timelineQsize 
-metricssystem.MetricsSystem.SnapshotAvgTime 
-metricssystem.MetricsSystem.SnapshotNumOps 
-regionserver.Server.Append_75th_percentile 
-regionserver.Server.Append_95th_percentile 
-regionserver.Server.Append_99th_percentile 
-regionserver.Server.Append_max 
-regionserver.Server.Append_mean 
-regionserver.Server.Append_median 
-regionserver.Server.Append_min 
-regionserver.Server.Append_num_ops 
-regionserver.Server.blockCacheCount 
-regionserver.Server.blockCacheEvictionCount 
-regionserver.Server.blockCacheExpressHitPercent 
-regionserver.Server.blockCacheFreeSize 
-regionserver.Server.blockCacheHitCount 
-regionserver.Server.blockCacheMissCount 
-regionserver.Server.blockCacheSize 
-regionserver.Server.blockCountHitPercent 
-regionserver.Server.checkMutateFailedCount 
-regionserver.Server.checkMutatePassedCount 
-regionserver.Server.compactionQueueLength 
-regionserver.Server.Delete_75th_percentile 
-regionserver.Server.Delete_95th_percentile 
-regionserver.Server.Delete_99th_percentile 
-regionserver.Server.Delete_max 
-regionserver.Server.Delete_mean 
-regionserver.Server.Delete_median 
-regionserver.Server.Delete_min 
-regionserver.Server.Delete_num_ops 
-regionserver.Server.flushQueueLength 
-regionserver.Server.Get_75th_percentile 
-regionserver.Server.Get_95th_percentile 
-regionserver.Server.Get_99th_percentile 
-regionserver.Server.Get_max 
-regionserver.Server.Get_mean 
-regionserver.Server.Get_median 
-regionserver.Server.Get_min 
-regionserver.Server.Get_num_ops 
-regionserver.Server.hlogFileCount 
-regionserver.Server.hlogFileSize 
-regionserver.Server.Increment_75th_percentile 
-regionserver.Server.Increment_95th_percentile 
-regionserver.Server.Increment_99th_percentile 
-regionserver.Server.Increment_max 
-regionserver.Server.Increment_mean 
-regionserver.Server.Increment_median 
-regionserver.Server.Increment_min 
-regionserver.Server.Increment_num_ops 
-regionserver.Server.memStoreSize 
-regionserver.Server.Mutate_75th_percentile 
-regionserver.Server.Mutate_95th_percentile 
-regionserver.Server.Mutate_99th_percentile 
-regionserver.Server.Mutate_max 
-regionserver.Server.Mutate_mean 
-regionserver.Server.Mutate_median 
-regionserver.Server.Mutate_min 
-regionserver.Server.Mutate_num_ops 
-regionserver.Server.mutationsWithoutWALCount 
-regionserver.Server.mutationsWithoutWALSize 
-regionserver.Server.percentFilesLocal 
-regionserver.Server.readRequestCount 
-regionserver.Server.regionCount 
-regionserver.Server.regionServerStartTime 
-regionserver.Server.Replay_75th_percentile 
-regionserver.Server.Replay_95th_percentile 
-regionserver.Server.Replay_99th_percentile 
-regionserver.Server.Replay_max 
-regionserver.Server.Replay_mean 
-regionserver.Server.Replay_median 
-regionserver.Server.Replay_min 
-regionserver.Server.Replay_num_ops 
-regionserver.Server.slowAppendCount 
-regionserver.Server.slowDeleteCount 
-regionserver.Server.slowGetCount 
-regionserver.Server.slowIncrementCount 
-regionserver.Server.slowPutCount 
-regionserver.Server.staticBloomSize 
-regionserver.Server.staticIndexSize 
-regionserver.Server.storeCount 
-regionserver.Server.storeFileCount 
-regionserver.Server.storeFileIndexSize 
-regionserver.Server.storeFileSize 
-regionserver.Server.totalRequestCount 
-regionserver.Server.updatesBlockedTime 
-regionserver.Server.writeRequestCount 
-regionserver.WAL.appendCount 
-regionserver.WAL.AppendSize_75th_percentile 
-regionserver.WAL.AppendSize_95th_percentile 
-regionserver.WAL.AppendSize_99th_percentile 
-regionserver.WAL.AppendSize_max 
-regionserver.WAL.AppendSize_mean 
-regionserver.WAL.AppendSize_median 
-regionserver.WAL.AppendSize_min 
-regionserver.WAL.AppendSize_num_ops 
-regionserver.WAL.AppendTime_75th_percentile 
-regionserver.WAL.AppendTime_95th_percentile 
-regionserver.WAL.AppendTime_99th_percentile 
-regionserver.WAL.AppendTime_max 
-regionserver.WAL.AppendTime_mean 
-regionserver.WAL.AppendTime_median 
-regionserver.WAL.AppendTime_min 
-regionserver.WAL.AppendTime_num_ops 
-regionserver.WAL.slowAppendCount 
-regionserver.WAL.SyncTime_75th_percentile 
-regionserver.WAL.SyncTime_95th_percentile 
-regionserver.WAL.SyncTime_99th_percentile 
-regionserver.WAL.SyncTime_max 
-regionserver.WAL.SyncTime_mean 
-regionserver.WAL.SyncTime_median 
-regionserver.WAL.SyncTime_min 
-regionserver.WAL.SyncTime_num_ops 
-ugi.UgiMetrics.GetGroupsAvgTime 
-ugi.UgiMetrics.GetGroupsNumOps 
-ugi.UgiMetrics.LoginFailureAvgTime 
-ugi.UgiMetrics.LoginFailureNumOps 
-ugi.UgiMetrics.LoginSuccessAvgTime 
-ugi.UgiMetrics.LoginSuccessNumOps
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba3d6926/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/resources/scripts/ams_query.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/resources/scripts/ams_query.py b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/resources/scripts/ams_query.py
deleted file mode 100644
index d51357a..0000000
--- a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/resources/scripts/ams_query.py
+++ /dev/null
@@ -1,209 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import urllib2
-import signal
-import sys
-import optparse
-import time
-
-# http://162.216.148.45:8188/ws/v1/timeline/metrics?
-# metricNames=rpc.rpc.RpcAuthenticationSuccesses
-# &appId=nodemanager&hostname=local.0&startTime=1414152029&endTime=1414155629
-
-AMS_URL = "http://{0}:8188/ws/v1/timeline/metrics?metricNames={1}&appid={" \
-          "2}&hostname={3}"
-
-# in fact it can be list automatically generated from ambari
-# UI queries
-host_metrics = {
-  'cpu': ['cpu_user', 'cpu_wio', 'cpu_nice', 'cpu_aidle', 'cpu_system', 'cpu_idle'],
-  'disk': ['disk_total', 'disk_free'],
-  'load': ['load_one', 'load_fifteen', 'load_five'],
-  'mem': ['swap_free', 'mem_shared', 'mem_free', 'mem_cached', 'mem_buffers'],
-  'network': ['bytes_in', 'bytes_out', 'pkts_in', 'pkts_out'],
-  'process': ['proc_total', 'proc_run']
-}
-
-# HDFS_SERVICE
-namenode_metrics = {
-  'dfs.Capacity': ['dfs.FSNamesystem.CapacityRemainingGB',
-                   'dfs.FSNamesystem.CapacityUsedGB',
-                   'dfs.FSNamesystem.CapacityTotalGB'],
-  'dfs.Replication': ['dfs.FSNamesystem.PendingReplicationBlocks',
-                      'dfs.FSNamesystem.UnderReplicatedBlocks'],
-  'dfs.File': ['dfs.namenode.FileInfoOps', 'dfs.namenode.CreateFileOps'],
-  'jvm.gc': ['jvm.JvmMetrics.GcTimeMillis'],
-  'jvm.mem': ['jvm.JvmMetrics.MemNonHeapUsedM',
-              'jvm.JvmMetrics.MemNonHeapCommittedM',
-              'jvm.JvmMetrics.MemHeapUsedM',
-              'jvm.JvmMetrics.MemHeapCommittedM'],
-  'jvm.thread': ['jvm.JvmMetrics.ThreadsRunnable',
-                 'jvm.JvmMetrics.ThreadsBlocked',
-                 'jvm.JvmMetrics.ThreadsWaiting',
-                 'jvm.JvmMetrics.ThreadsTimedWaiting'],
-  'rpc': ['rpc.rpc.RpcQueueTimeAvgTime']
-}
-
-all_metrics = {
-  'HOST': host_metrics,
-  'namenode': namenode_metrics
-}
-
-all_metrics_times = {}
-
-
-# hostnames = ['EPPLKRAW0101.0']  # 'local.0'
-# metrics_test_host = '162.216.150.247' # metricstest-100
-# metrics_test_host = '162.216.148.45'    # br-3
-# start_time = int(time.time())           # 1414425208
-
-
-def main(argv=None):
-  # Allow Ctrl-C
-  signal.signal(signal.SIGINT, signal_handler)
-
-  parser = optparse.OptionParser()
-
-  parser.add_option("-H", "--host", dest="host",
-                    help="AMS host")
-  parser.add_option("-t", "--starttime", dest="start_time_secs",
-                    default=int(time.time()),
-                    help="start time in seconds, default value is current time")
-  parser.add_option("-n", "--nodes", dest="node_names",
-                    help="nodes from cluster, used as a param to query for")
-  (options, args) = parser.parse_args()
-
-  if options.host is None:
-    print "AMS host name is required (--host or -h)"
-    exit(-1)
-
-  if options.node_names is None:
-    print "cluster nodes are required (--nodes or -n)"
-    exit(3)
-
-  global start_time_secs, metrics_test_host, hostnames
-
-  metrics_test_host = options.host
-  start_time_secs = int(options.start_time_secs)
-  hostnames = [options.node_names]
-
-  while True:
-    run()
-    time.sleep(15)
-    start_time_secs += 15
-
-
-def signal_handler(signal, frame):
-  print('Exiting, Ctrl+C press detected!')
-  print_all_metrics(all_metrics_times)
-  sys.exit(0)
-
-
-def run():
-  hostname = ','.join(hostnames)
-  qs = QuerySender(metrics_test_host, True)
-  for metric_name in all_metrics:
-    print
-    print 'Querying for ' + metric_name + ' metrics'
-    current_time_secs = start_time_secs
-    qs.query_all_app_metrics(hostname, metric_name,
-                             all_metrics[metric_name],
-                             current_time_secs)
-
-
-def add_query_metrics_for_app_id(app_id, metric_timing):
-  if not app_id in all_metrics_times:
-    all_metrics_times[app_id] = []
-  all_metrics_times[app_id].append(metric_timing)
-
-
-def print_all_metrics(metrics):
-  print 'Metrics Summary'
-  for app_id in sorted(metrics):
-    first = True
-    for single_query_metrics in metrics[app_id]:
-      print_app_metrics(app_id, single_query_metrics, first)
-      first = False
-
-
-def print_app_metrics(app_id, metric_timing, header=False):
-  #header
-  if header:
-    print app_id + ': ' + ','.join(sorted(metric_timing.keys()))
-  #vals
-  print app_id + ':',
-  for key in sorted(metric_timing):
-    print '%.3f,' % metric_timing[key],
-  print
-
-
-class QuerySender:
-  def __init__(self, metrics_address, print_responses=False):
-    self.metrics_address = metrics_address
-    self.print_responses = print_responses
-
-  def query_all_app_metrics(self, hostname, app_id, metrics, current_time_secs):
-    metric_timing = {}
-    for key in metrics:
-      print 'Getting metrics for', key
-      query_time = time.time()
-
-      metric_names = ','.join(metrics[key])
-      self.query(hostname, app_id, metric_names, current_time_secs)
-      query_time_elapsed = time.time() - query_time
-
-      print 'Query for "%s" took %s' % (key, query_time_elapsed)
-      metric_timing[key] = query_time_elapsed
-
-    add_query_metrics_for_app_id(app_id, metric_timing)
-    if self.print_responses:
-      print_app_metrics(app_id, metric_timing)
-
-  def query(self, hostname, app_id, metric_names, current_time_secs):
-    url = self.create_url(hostname, metric_names, app_id, current_time_secs)
-    print url
-    response = self.send(url)
-    if self.print_responses:
-      print response
-    pass
-
-  def send(self, url):
-    request = urllib2.Request(url)
-    try:
-      response = urllib2.urlopen(request, timeout=int(30))
-      response = response.read()
-      return response
-
-    except urllib2.URLError as e:
-      print e.reason
-
-  def create_url(self, hostname, metric_names, app_id, current_time_secs):
-    server = AMS_URL.format(self.metrics_address,
-                            metric_names,
-                            app_id,
-                            hostname)
-    t = current_time_secs
-    server += '&startTime=%s&endTime=%s' % (t, t + 3600)
-    return server
-
-
-if __name__ == '__main__':
-  main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba3d6926/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/resources/scripts/start.sh
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/resources/scripts/start.sh b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/resources/scripts/start.sh
deleted file mode 100644
index 0507af1..0000000
--- a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/resources/scripts/start.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-# set -x
-cd "$(dirname "$0")"
-
-METRICS_HOST=$1
-HOST_COUNT=$2
-
-echo $$ > sim_pid
-cat sim_pid
-#HOMEDIR
-exec  java -jar ../lib/ambari-metrics/ambari-metrics-hadoop-timelineservice-simulator*.jar  -h `hostname -f` -n ${HOST_COUNT} -m ${METRICS_HOST} -c 15000 -s 60000

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba3d6926/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/resources/scripts/start_slaves.sh
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/resources/scripts/start_slaves.sh b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/resources/scripts/start_slaves.sh
deleted file mode 100644
index e1e51c8..0000000
--- a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/resources/scripts/start_slaves.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-## set -x
-
-SLAVES=$1
-METRICS_HOST=$2
-HOST_COUNT=$3
-LOADSIM_HOMEDIR="LoadSimulator-1.0-SNAPSHOT"
-
-pdsh -w ${SLAVES} "$LOADSIM_HOMEDIR/start.sh ${METRICS_HOST} ${HOST_COUNT}'"

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba3d6926/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/resources/scripts/status_slaves.sh
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/resources/scripts/status_slaves.sh b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/resources/scripts/status_slaves.sh
deleted file mode 100644
index 79787fd..0000000
--- a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/resources/scripts/status_slaves.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-## set -x
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba3d6926/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/resources/scripts/stop.sh
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/resources/scripts/stop.sh b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/resources/scripts/stop.sh
deleted file mode 100644
index 2220861..0000000
--- a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/resources/scripts/stop.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-# set -x
-cd "$(dirname "$0")"
-read PID <./sim_pid
-
-FOUND_PID=`ps aux | grep simulator | grep $PID`
-if [ -z "${FOUND_PID}" ]
-then
-   echo "process simulator not fund under pid $PID"
-else
-   echo "process simulator running as $PID, killing"
-   kill ${PID}
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba3d6926/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/resources/scripts/stop_slaves.sh
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/resources/scripts/stop_slaves.sh b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/resources/scripts/stop_slaves.sh
deleted file mode 100644
index 7cb567a..0000000
--- a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/resources/scripts/stop_slaves.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-# set -x
-
-SLAVES=$1
-LOADSIM_HOMEDIR="LoadSimulator-1.0-SNAPSHOT"
-
-pdsh -w ${SLAVES} "$LOADSIM_HOMEDIR/stop.sh"

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba3d6926/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/conf/ams-site.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/conf/ams-site.xml b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/conf/ams-site.xml
deleted file mode 100644
index b08609d..0000000
--- a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/conf/ams-site.xml
+++ /dev/null
@@ -1,29 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<!-- Empty site, all defaults should be initialized in the code -->
-<configuration>
-  <property>
-    <name>test</name>
-    <value>testReady</value>
-  </property>
-</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba3d6926/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/conf/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/conf/hadoop-policy.xml b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/conf/hadoop-policy.xml
deleted file mode 100644
index 41bde16..0000000
--- a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/conf/hadoop-policy.xml
+++ /dev/null
@@ -1,134 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration supports_final="true">
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientProtocol, which is used by user code
-    via the DistributedFileSystem.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.client.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
-    for block recovery.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for DatanodeProtocol, which is used by datanodes to
-    communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
-    for updating generation timestamp.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.namenode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for NamenodeProtocol, the protocol used by the secondary
-    namenode to communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.tracker.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterTrackerProtocol, used by the tasktrackers to
-    communicate with the jobtracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for JobSubmissionProtocol, used by job clients to
-    communciate with the jobtracker for job submission, querying job status etc.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.task.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
-    tasks to communicate with the parent tasktracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
- <property>
-    <name>security.admin.operations.protocol.acl</name>
-    <value>hadoop</value>
-    <description>ACL for AdminOperationsProtocol. Used for admin commands.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.refresh.usertogroups.mappings.protocol.acl</name>
-    <value>hadoop</value>
-    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
-    users mappings. The ACL is a comma-separated list of user and
-    group names. The user and group list is separated by a blank. For
-    e.g. "alice,bob users,wheel".  A special value of "*" means all
-    users are allowed.</description>
-  </property>
-
-<property>
-    <name>security.refresh.policy.protocol.acl</name>
-    <value>hadoop</value>
-    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
-    dfsadmin and mradmin commands to refresh the security policy in-effect.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba3d6926/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/conf/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/conf/hbase-site.xml b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/conf/hbase-site.xml
deleted file mode 100644
index c453900..0000000
--- a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/conf/hbase-site.xml
+++ /dev/null
@@ -1,230 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>hbase.defaults.for.version.skip</name>
-    <value>true</value>
-  </property>
-  <property>
-    <name>hbase.rootdir</name>
-    <value>file:///var/lib/hbase</value>
-    <description>
-      AMS service uses HBase as default storage backend. Set the rootdir for
-      HBase to either local filesystem path if using AMS in embedded mode or
-      to a HDFS dir, example: hdfs://namenode.example.org:9000/hbase.  By
-      default HBase writes into /tmp. Change this configuration else all data
-      will be lost on machine restart.
-    </description>
-  </property>
-  <property>
-    <name>hbase.tmp.dir</name>
-    <value>/tmp</value>
-    <description>
-      Temporary directory on the local filesystem.
-      Change this setting to point to a location more permanent
-      than '/tmp' (The '/tmp' directory is often cleared on
-      machine restart).
-    </description>
-  </property>
-  <property>
-    <name>hbase.local.dir</name>
-    <value>${hbase.tmp.dir}/local</value>
-    <description>Directory on the local filesystem to be used as a local storage
-    </description>
-  </property>
-  <property>
-    <name>hbase.cluster.distributed</name>
-    <value>true</value>
-    <description>
-      The mode the cluster will be in. Possible values are false for
-      standalone mode and true for distributed mode. If false, startup will run
-      all HBase and ZooKeeper daemons together in the one JVM.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.wait.on.regionservers.mintostart</name>
-    <value>1</value>
-    <description>
-      Ensure that HBase Master waits for # many region server to start.
-    </description>
-  </property>
-  <property>
-    <name>hbase.zookeeper.quorum</name>
-    <value>localhost</value>
-    <description>Comma separated list of servers in the ZooKeeper Quorum.
-      For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
-      By default this is set to localhost for local and pseudo-distributed modes
-      of operation. For a fully-distributed setup, this should be set to a full
-      list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
-      this is the list of servers which we will start/stop ZooKeeper on.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.bindAddress</name>
-    <value>0.0.0.0</value>
-    <description>The bind address for the HBase Master web UI</description>
-  </property>
-  <property>
-    <name>hbase.master.info.port</name>
-    <value>90010</value>
-    <description>The port for the HBase Master web UI.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.info.port</name>
-    <value>90030</value>
-    <description>The port for the HBase RegionServer web UI.</description>
-  </property>
-  <property>
-    <name>hbase.hregion.majorcompaction</name>
-    <value>0</value>
-    <description>
-      The time (in milliseconds) between 'major' compactions of all
-      HStoreFiles in a region.
-      0 to disable automated major compactions.
-    </description>
-  </property>
-  <property>
-    <name>phoenix.query.spoolThresholdBytes</name>
-    <value>12582912</value>
-    <description>
-      Threshold size in bytes after which results from parallelly executed
-      query results are spooled to disk. Default is 20 mb.
-    </description>
-  </property>
-  <property>
-    <name>hbase.zookeeper.property.dataDir</name>
-    <value>${hbase.tmp.dir}/zookeeper</value>
-    <description>
-      Property from ZooKeeper's config zoo.cfg.
-      The directory where the snapshot is stored.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.scanner.caching</name>
-    <value>10000</value>
-    <description>
-      Number of rows that will be fetched when calling next on a scanner
-      if it is not served from (local, client) memory.
-    </description>
-  </property>
-  <property>
-    <name>hfile.block.cache.size</name>
-    <value>0.3</value>
-    <description>
-      Percentage of maximum heap (-Xmx setting) to allocate to block cache
-      used by a StoreFile. Default of 0.4 means allocate 40%.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.global.memstore.upperLimit</name>
-    <value>0.5</value>
-    <description>
-      Maximum size of all memstores in a region server before new
-      updates are blocked and flushes are forced. Defaults to 40% of heap
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.global.memstore.lowerLimit</name>
-    <value>0.4</value>
-    <description>
-      When memstores are being forced to flush to make room in
-      memory, keep flushing until we hit this mark. Defaults to 35% of heap.
-      This value equal to hbase.regionserver.global.memstore.upperLimit causes
-      the minimum possible flushing to occur when updates are blocked due to
-      memstore limiting.
-    </description>
-  </property>
-  <property>
-    <name>phoenix.groupby.maxCacheSize</name>
-    <value>307200000</value>
-    <description>
-      Size in bytes of pages cached during GROUP BY spilling. Default is 100Mb.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.block.multiplier</name>
-    <value>4</value>
-    <description>
-      Block updates if memstore has hbase.hregion.memstore.block.multiplier
-      times hbase.hregion.memstore.flush.size bytes. Useful preventing runaway
-      memstore during spikes in update traffic.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.flusher.count</name>
-    <value>2</value>
-    <description>
-      The number of flush threads. With fewer threads, the MemStore flushes
-      will be queued. With more threads, the flushes will be executed in parallel,
-      increasing the load on HDFS, and potentially causing more compactions.
-    </description>
-  </property>
-  <property>
-    <name>phoenix.query.timeoutMs</name>
-    <value>1200000</value>
-    <description>
-      Number of milliseconds after which a query will timeout on the client.
-      Default is 10 min.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.scanner.timeout.period</name>
-    <value>900000</value>
-    <description>
-      Client scanner lease period in milliseconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.thread.compaction.large</name>
-    <value>2</value>
-    <description>
-      Configuration key for the large compaction threads.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.thread.compaction.small</name>
-    <value>3</value>
-    <description>
-      Configuration key for the small compaction threads.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.blockingStoreFiles</name>
-    <value>200</value>
-    <description>
-      If more than this number of StoreFiles exist in any one Store
-      (one StoreFile is written per flush of MemStore), updates are blocked for
-      this region until a compaction is completed, or until
-      hbase.hstore.blockingWaitTime has been exceeded.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.flush.size</name>
-    <value>134217728</value>
-    <description>
-      Memstore will be flushed to disk if size of the memstore exceeds this
-      number of bytes. Value is checked by a thread that runs every
-      hbase.server.thread.wakefrequency.
-    </description>
-  </property>
-</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba3d6926/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryStoreTestUtils.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryStoreTestUtils.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryStoreTestUtils.java
deleted file mode 100644
index c41b8a7..0000000
--- a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryStoreTestUtils.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.applicationhistoryservice;
-
-import java.io.IOException;
-
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerState;
-import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
-import org.apache.hadoop.yarn.api.records.NodeId;
-import org.apache.hadoop.yarn.api.records.Priority;
-import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
-import org.apache.hadoop.yarn.api.records.YarnApplicationState;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptFinishData;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptStartData;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationFinishData;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationStartData;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerFinishData;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerStartData;
-
-public class ApplicationHistoryStoreTestUtils {
-
-  protected ApplicationHistoryStore store;
-
-  protected void writeApplicationStartData(ApplicationId appId)
-      throws IOException {
-    store.applicationStarted(ApplicationStartData.newInstance(appId,
-      appId.toString(), "test type", "test queue", "test user", 0, 0));
-  }
-
-  protected void writeApplicationFinishData(ApplicationId appId)
-      throws IOException {
-    store.applicationFinished(ApplicationFinishData.newInstance(appId, 0,
-      appId.toString(), FinalApplicationStatus.UNDEFINED,
-      YarnApplicationState.FINISHED));
-  }
-
-  protected void writeApplicationAttemptStartData(
-      ApplicationAttemptId appAttemptId) throws IOException {
-    store.applicationAttemptStarted(ApplicationAttemptStartData.newInstance(
-      appAttemptId, appAttemptId.toString(), 0,
-      ContainerId.newInstance(appAttemptId, 1)));
-  }
-
-  protected void writeApplicationAttemptFinishData(
-      ApplicationAttemptId appAttemptId) throws IOException {
-    store.applicationAttemptFinished(ApplicationAttemptFinishData.newInstance(
-      appAttemptId, appAttemptId.toString(), "test tracking url",
-      FinalApplicationStatus.UNDEFINED, YarnApplicationAttemptState.FINISHED));
-  }
-
-  protected void writeContainerStartData(ContainerId containerId)
-      throws IOException {
-    store.containerStarted(ContainerStartData.newInstance(containerId,
-      Resource.newInstance(0, 0), NodeId.newInstance("localhost", 0),
-      Priority.newInstance(containerId.getId()), 0));
-  }
-
-  protected void writeContainerFinishData(ContainerId containerId)
-      throws IOException {
-    store.containerFinished(ContainerFinishData.newInstance(containerId, 0,
-      containerId.toString(), 0, ContainerState.COMPLETE));
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba3d6926/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java
deleted file mode 100644
index 6b06918..0000000
--- a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java
+++ /dev/null
@@ -1,209 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.applicationhistoryservice;
-
-import java.io.IOException;
-import java.util.List;
-
-import junit.framework.Assert;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse;
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ApplicationReport;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerReport;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.exceptions.YarnException;
-import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Ignore;
-import org.junit.Test;
-
-// Timeline service client support is not enabled for AMS
-@Ignore
-public class TestApplicationHistoryClientService extends
-    ApplicationHistoryStoreTestUtils {
-
-  ApplicationHistoryServer historyServer = null;
-  String expectedLogUrl = null;
-
-  @Before
-  public void setup() {
-    historyServer = new ApplicationHistoryServer();
-    Configuration config = new YarnConfiguration();
-    expectedLogUrl = WebAppUtils.getHttpSchemePrefix(config) +
-        WebAppUtils.getAHSWebAppURLWithoutScheme(config) +
-        "/applicationhistory/logs/localhost:0/container_0_0001_01_000001/" +
-        "container_0_0001_01_000001/test user";
-    config.setClass(YarnConfiguration.APPLICATION_HISTORY_STORE,
-      MemoryApplicationHistoryStore.class, ApplicationHistoryStore.class);
-    historyServer.init(config);
-    historyServer.start();
-    store =
-        ((ApplicationHistoryManagerImpl) historyServer.getApplicationHistory())
-          .getHistoryStore();
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    historyServer.stop();
-  }
-
-  @Test
-  public void testApplicationReport() throws IOException, YarnException {
-    ApplicationId appId = null;
-    appId = ApplicationId.newInstance(0, 1);
-    writeApplicationStartData(appId);
-    writeApplicationFinishData(appId);
-    GetApplicationReportRequest request =
-        GetApplicationReportRequest.newInstance(appId);
-    GetApplicationReportResponse response =
-        historyServer.getClientService().getClientHandler()
-          .getApplicationReport(request);
-    ApplicationReport appReport = response.getApplicationReport();
-    Assert.assertNotNull(appReport);
-    Assert.assertEquals("application_0_0001", appReport.getApplicationId()
-      .toString());
-    Assert.assertEquals("test type", appReport.getApplicationType().toString());
-    Assert.assertEquals("test queue", appReport.getQueue().toString());
-  }
-
-  @Test
-  public void testApplications() throws IOException, YarnException {
-    ApplicationId appId = null;
-    appId = ApplicationId.newInstance(0, 1);
-    writeApplicationStartData(appId);
-    writeApplicationFinishData(appId);
-    ApplicationId appId1 = ApplicationId.newInstance(0, 2);
-    writeApplicationStartData(appId1);
-    writeApplicationFinishData(appId1);
-    GetApplicationsRequest request = GetApplicationsRequest.newInstance();
-    GetApplicationsResponse response =
-        historyServer.getClientService().getClientHandler()
-          .getApplications(request);
-    List<ApplicationReport> appReport = response.getApplicationList();
-    Assert.assertNotNull(appReport);
-    Assert.assertEquals(appId, appReport.get(0).getApplicationId());
-    Assert.assertEquals(appId1, appReport.get(1).getApplicationId());
-  }
-
-  @Test
-  public void testApplicationAttemptReport() throws IOException, YarnException {
-    ApplicationId appId = ApplicationId.newInstance(0, 1);
-    ApplicationAttemptId appAttemptId =
-        ApplicationAttemptId.newInstance(appId, 1);
-    writeApplicationAttemptStartData(appAttemptId);
-    writeApplicationAttemptFinishData(appAttemptId);
-    GetApplicationAttemptReportRequest request =
-        GetApplicationAttemptReportRequest.newInstance(appAttemptId);
-    GetApplicationAttemptReportResponse response =
-        historyServer.getClientService().getClientHandler()
-          .getApplicationAttemptReport(request);
-    ApplicationAttemptReport attemptReport =
-        response.getApplicationAttemptReport();
-    Assert.assertNotNull(attemptReport);
-    Assert.assertEquals("appattempt_0_0001_000001", attemptReport
-      .getApplicationAttemptId().toString());
-  }
-
-  @Test
-  public void testApplicationAttempts() throws IOException, YarnException {
-    ApplicationId appId = ApplicationId.newInstance(0, 1);
-    ApplicationAttemptId appAttemptId =
-        ApplicationAttemptId.newInstance(appId, 1);
-    ApplicationAttemptId appAttemptId1 =
-        ApplicationAttemptId.newInstance(appId, 2);
-    writeApplicationAttemptStartData(appAttemptId);
-    writeApplicationAttemptFinishData(appAttemptId);
-    writeApplicationAttemptStartData(appAttemptId1);
-    writeApplicationAttemptFinishData(appAttemptId1);
-    GetApplicationAttemptsRequest request =
-        GetApplicationAttemptsRequest.newInstance(appId);
-    GetApplicationAttemptsResponse response =
-        historyServer.getClientService().getClientHandler()
-          .getApplicationAttempts(request);
-    List<ApplicationAttemptReport> attemptReports =
-        response.getApplicationAttemptList();
-    Assert.assertNotNull(attemptReports);
-    Assert.assertEquals(appAttemptId, attemptReports.get(0)
-      .getApplicationAttemptId());
-    Assert.assertEquals(appAttemptId1, attemptReports.get(1)
-      .getApplicationAttemptId());
-  }
-
-  @Test
-  public void testContainerReport() throws IOException, YarnException {
-    ApplicationId appId = ApplicationId.newInstance(0, 1);
-    writeApplicationStartData(appId);
-    ApplicationAttemptId appAttemptId =
-        ApplicationAttemptId.newInstance(appId, 1);
-    ContainerId containerId = ContainerId.newInstance(appAttemptId, 1);
-    writeContainerStartData(containerId);
-    writeContainerFinishData(containerId);
-    writeApplicationFinishData(appId);
-    GetContainerReportRequest request =
-        GetContainerReportRequest.newInstance(containerId);
-    GetContainerReportResponse response =
-        historyServer.getClientService().getClientHandler()
-          .getContainerReport(request);
-    ContainerReport container = response.getContainerReport();
-    Assert.assertNotNull(container);
-    Assert.assertEquals(containerId, container.getContainerId());
-    Assert.assertEquals(expectedLogUrl, container.getLogUrl());
-  }
-
-  @Test
-  public void testContainers() throws IOException, YarnException {
-    ApplicationId appId = ApplicationId.newInstance(0, 1);
-    writeApplicationStartData(appId);
-    ApplicationAttemptId appAttemptId =
-        ApplicationAttemptId.newInstance(appId, 1);
-    ContainerId containerId = ContainerId.newInstance(appAttemptId, 1);
-    ContainerId containerId1 = ContainerId.newInstance(appAttemptId, 2);
-    writeContainerStartData(containerId);
-    writeContainerFinishData(containerId);
-    writeContainerStartData(containerId1);
-    writeContainerFinishData(containerId1);
-    writeApplicationFinishData(appId);
-    GetContainersRequest request =
-        GetContainersRequest.newInstance(appAttemptId);
-    GetContainersResponse response =
-        historyServer.getClientService().getClientHandler()
-          .getContainers(request);
-    List<ContainerReport> containers = response.getContainerList();
-    Assert.assertNotNull(containers);
-    Assert.assertEquals(containerId, containers.get(1).getContainerId());
-    Assert.assertEquals(containerId1, containers.get(0).getContainerId());
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba3d6926/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerImpl.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerImpl.java
deleted file mode 100644
index aad23d9..0000000
--- a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerImpl.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.applicationhistoryservice;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ApplicationReport;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.exceptions.YarnException;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Ignore;
-import org.junit.Test;
-
-public class TestApplicationHistoryManagerImpl extends
-    ApplicationHistoryStoreTestUtils {
-  ApplicationHistoryManagerImpl applicationHistoryManagerImpl = null;
-
-  @Before
-  public void setup() throws Exception {
-    Configuration config = new Configuration();
-    config.setClass(YarnConfiguration.APPLICATION_HISTORY_STORE,
-      MemoryApplicationHistoryStore.class, ApplicationHistoryStore.class);
-    applicationHistoryManagerImpl = new ApplicationHistoryManagerImpl();
-    applicationHistoryManagerImpl.init(config);
-    applicationHistoryManagerImpl.start();
-    store = applicationHistoryManagerImpl.getHistoryStore();
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    applicationHistoryManagerImpl.stop();
-  }
-
-  @Test
-  @Ignore
-  public void testApplicationReport() throws IOException, YarnException {
-    ApplicationId appId = null;
-    appId = ApplicationId.newInstance(0, 1);
-    writeApplicationStartData(appId);
-    writeApplicationFinishData(appId);
-    ApplicationAttemptId appAttemptId =
-        ApplicationAttemptId.newInstance(appId, 1);
-    writeApplicationAttemptStartData(appAttemptId);
-    writeApplicationAttemptFinishData(appAttemptId);
-    ApplicationReport appReport =
-        applicationHistoryManagerImpl.getApplication(appId);
-    Assert.assertNotNull(appReport);
-    Assert.assertEquals(appId, appReport.getApplicationId());
-    Assert.assertEquals(appAttemptId,
-      appReport.getCurrentApplicationAttemptId());
-    Assert.assertEquals(appAttemptId.toString(), appReport.getHost());
-    Assert.assertEquals("test type", appReport.getApplicationType().toString());
-    Assert.assertEquals("test queue", appReport.getQueue().toString());
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba3d6926/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java
deleted file mode 100644
index 3720852..0000000
--- a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java
+++ /dev/null
@@ -1,217 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.applicationhistoryservice;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.service.Service.STATE;
-import org.apache.hadoop.util.ExitUtil;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics
-  .timeline.DefaultPhoenixDataSource;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics
-  .timeline.PhoenixHBaseAccessor;
-import org.apache.zookeeper.ClientCnxn;
-import org.easymock.EasyMock;
-import org.junit.*;
-import org.junit.rules.TemporaryFolder;
-import org.junit.runner.RunWith;
-import org.powermock.core.classloader.annotations.PowerMockIgnore;
-import org.powermock.core.classloader.annotations.PrepareForTest;
-import org.powermock.modules.junit4.PowerMockRunner;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.MalformedURLException;
-import java.net.URISyntaxException;
-import java.net.URL;
-import java.net.URLClassLoader;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.Statement;
-
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics
-  .timeline.TimelineMetricConfiguration.METRICS_SITE_CONFIGURATION_FILE;
-import static org.easymock.EasyMock.createNiceMock;
-import static org.easymock.EasyMock.*;
-import static org.easymock.EasyMock.expectLastCall;
-import static org.junit.Assert.*;
-import static org.powermock.api.easymock.PowerMock.*;
-import static org.powermock.api.support.membermodification.MemberMatcher.method;
-import static org.powermock.api.support.membermodification.MemberModifier
-  .suppress;
-
-@RunWith(PowerMockRunner.class)
-@PrepareForTest({ PhoenixHBaseAccessor.class, UserGroupInformation.class,
-  ClientCnxn.class, DefaultPhoenixDataSource.class})
-@PowerMockIgnore( {"javax.management.*"})
-public class TestApplicationHistoryServer {
-
-  ApplicationHistoryServer historyServer = null;
-  Configuration metricsConf = null;
-
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-  @Before
-  @SuppressWarnings("all")
-  public void setup() throws URISyntaxException, IOException {
-    folder.create();
-    File hbaseSite = folder.newFile("hbase-site.xml");
-    File amsSite = folder.newFile("ams-site.xml");
-
-    FileUtils.writeStringToFile(hbaseSite, "<configuration>\n" +
-      "  <property>\n" +
-      "    <name>hbase.defaults.for.version.skip</name>\n" +
-      "    <value>true</value>\n" +
-      "  </property>" +
-      "  <property> " +
-      "    <name>hbase.zookeeper.quorum</name>\n" +
-      "    <value>localhost</value>\n" +
-      "  </property>" +
-      "</configuration>");
-
-    FileUtils.writeStringToFile(amsSite, "<configuration>\n" +
-      "  <property>\n" +
-      "    <name>test</name>\n" +
-      "    <value>testReady</value>\n" +
-      "  </property>\n" +
-      "  <property>\n" +
-      "    <name>timeline.metrics.host.aggregator.hourly.disabled</name>\n" +
-      "    <value>true</value>\n" +
-      "    <description>\n" +
-      "      Disable host based hourly aggregations.\n" +
-      "    </description>\n" +
-      "  </property>\n" +
-      "  <property>\n" +
-      "    <name>timeline.metrics.host.aggregator.minute.disabled</name>\n" +
-      "    <value>true</value>\n" +
-      "    <description>\n" +
-      "      Disable host based minute aggregations.\n" +
-      "    </description>\n" +
-      "  </property>\n" +
-      "  <property>\n" +
-      "    <name>timeline.metrics.cluster.aggregator.hourly.disabled</name>\n" +
-      "    <value>true</value>\n" +
-      "    <description>\n" +
-      "      Disable cluster based hourly aggregations.\n" +
-      "    </description>\n" +
-      "  </property>\n" +
-      "  <property>\n" +
-      "    <name>timeline.metrics.cluster.aggregator.minute.disabled</name>\n" +
-      "    <value>true</value>\n" +
-      "    <description>\n" +
-      "      Disable cluster based minute aggregations.\n" +
-      "    </description>\n" +
-      "  </property>" +
-      "</configuration>");
-
-    ClassLoader currentClassLoader = Thread.currentThread().getContextClassLoader();
-
-    // Add the conf dir to the classpath
-    // Chain the current thread classloader
-    URLClassLoader urlClassLoader = null;
-    try {
-      urlClassLoader = new URLClassLoader(new URL[] {
-        folder.getRoot().toURI().toURL() }, currentClassLoader);
-    } catch (MalformedURLException e) {
-      e.printStackTrace();
-    }
-
-    Thread.currentThread().setContextClassLoader(urlClassLoader);
-    metricsConf = new Configuration(false);
-    metricsConf.addResource(Thread.currentThread().getContextClassLoader()
-      .getResource(METRICS_SITE_CONFIGURATION_FILE).toURI().toURL());
-    assertNotNull(metricsConf.get("test"));
-  }
-
-  // simple test init/start/stop ApplicationHistoryServer. Status should change.
-  @Test(timeout = 50000)
-  public void testStartStopServer() throws Exception {
-    Configuration config = new YarnConfiguration();
-    UserGroupInformation ugi =
-      UserGroupInformation.createUserForTesting("ambari", new String[] {"ambari"});
-
-    mockStatic(UserGroupInformation.class);
-    expect(UserGroupInformation.getCurrentUser()).andReturn(ugi).anyTimes();
-    expect(UserGroupInformation.isSecurityEnabled()).andReturn(false).anyTimes();
-    config.set(YarnConfiguration.APPLICATION_HISTORY_STORE,
-      "org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore");
-
-    Connection connection = createNiceMock(Connection.class);
-    Statement stmt = createNiceMock(Statement.class);
-    mockStatic(DriverManager.class);
-    expect(DriverManager.getConnection("jdbc:phoenix:localhost:2181:/hbase"))
-      .andReturn(connection).anyTimes();
-    expect(connection.createStatement()).andReturn(stmt).anyTimes();
-    suppress(method(Statement.class, "executeUpdate", String.class));
-    connection.close();
-    expectLastCall();
-
-    EasyMock.replay(connection, stmt);
-    replayAll();
-
-    historyServer = new ApplicationHistoryServer();
-    historyServer.init(config);
-
-    verifyAll();
-
-    assertEquals(STATE.INITED, historyServer.getServiceState());
-    assertEquals(4, historyServer.getServices().size());
-    ApplicationHistoryClientService historyService =
-      historyServer.getClientService();
-    assertNotNull(historyServer.getClientService());
-    assertEquals(STATE.INITED, historyService.getServiceState());
-
-    historyServer.start();
-    assertEquals(STATE.STARTED, historyServer.getServiceState());
-    assertEquals(STATE.STARTED, historyService.getServiceState());
-    historyServer.stop();
-    assertEquals(STATE.STOPPED, historyServer.getServiceState());
-  }
-
-  // test launch method
-  @Ignore
-  @Test(timeout = 60000)
-  public void testLaunch() throws Exception {
-
-    UserGroupInformation ugi =
-      UserGroupInformation.createUserForTesting("ambari", new String[]{"ambari"});
-    mockStatic(UserGroupInformation.class);
-    expect(UserGroupInformation.getCurrentUser()).andReturn(ugi).anyTimes();
-    expect(UserGroupInformation.isSecurityEnabled()).andReturn(false).anyTimes();
-
-    ExitUtil.disableSystemExit();
-    try {
-      historyServer = ApplicationHistoryServer.launchAppHistoryServer(new String[0]);
-    } catch (ExitUtil.ExitException e) {
-      assertEquals(0, e.status);
-      ExitUtil.resetFirstExitException();
-      fail();
-    }
-  }
-
-  @After
-  public void stop() {
-    if (historyServer != null) {
-      historyServer.stop();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba3d6926/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
deleted file mode 100644
index bc16d36..0000000
--- a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
+++ /dev/null
@@ -1,233 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.applicationhistoryservice;
-
-import java.io.IOException;
-import java.net.URI;
-
-import junit.framework.Assert;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.Priority;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptHistoryData;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationHistoryData;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerHistoryData;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-public class TestFileSystemApplicationHistoryStore extends
-    ApplicationHistoryStoreTestUtils {
-
-  private FileSystem fs;
-  private Path fsWorkingPath;
-
-  @Before
-  public void setup() throws Exception {
-    fs = new RawLocalFileSystem();
-    Configuration conf = new Configuration();
-    fs.initialize(new URI("/"), conf);
-    fsWorkingPath = new Path("Test");
-    fs.delete(fsWorkingPath, true);
-    conf.set(YarnConfiguration.FS_APPLICATION_HISTORY_STORE_URI, fsWorkingPath.toString());
-    store = new FileSystemApplicationHistoryStore();
-    store.init(conf);
-    store.start();
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    store.stop();
-    fs.delete(fsWorkingPath, true);
-    fs.close();
-  }
-
-  @Test
-  public void testReadWriteHistoryData() throws IOException {
-    testWriteHistoryData(5);
-    testReadHistoryData(5);
-  }
-
-  private void testWriteHistoryData(int num) throws IOException {
-    testWriteHistoryData(num, false, false);
-  }
-  
-  private void testWriteHistoryData(
-      int num, boolean missingContainer, boolean missingApplicationAttempt)
-          throws IOException {
-    // write application history data
-    for (int i = 1; i <= num; ++i) {
-      ApplicationId appId = ApplicationId.newInstance(0, i);
-      writeApplicationStartData(appId);
-
-      // write application attempt history data
-      for (int j = 1; j <= num; ++j) {
-        ApplicationAttemptId appAttemptId =
-            ApplicationAttemptId.newInstance(appId, j);
-        writeApplicationAttemptStartData(appAttemptId);
-
-        if (missingApplicationAttempt && j == num) {
-          continue;
-        }
-        // write container history data
-        for (int k = 1; k <= num; ++k) {
-          ContainerId containerId = ContainerId.newInstance(appAttemptId, k);
-          writeContainerStartData(containerId);
-          if (missingContainer && k == num) {
-            continue;
-          }
-          writeContainerFinishData(containerId);
-        }
-        writeApplicationAttemptFinishData(appAttemptId);
-      }
-      writeApplicationFinishData(appId);
-    }
-  }
-
-  private void testReadHistoryData(int num) throws IOException {
-    testReadHistoryData(num, false, false);
-  }
-  
-  private void testReadHistoryData(
-      int num, boolean missingContainer, boolean missingApplicationAttempt)
-          throws IOException {
-    // read application history data
-    Assert.assertEquals(num, store.getAllApplications().size());
-    for (int i = 1; i <= num; ++i) {
-      ApplicationId appId = ApplicationId.newInstance(0, i);
-      ApplicationHistoryData appData = store.getApplication(appId);
-      Assert.assertNotNull(appData);
-      Assert.assertEquals(appId.toString(), appData.getApplicationName());
-      Assert.assertEquals(appId.toString(), appData.getDiagnosticsInfo());
-
-      // read application attempt history data
-      Assert.assertEquals(num, store.getApplicationAttempts(appId).size());
-      for (int j = 1; j <= num; ++j) {
-        ApplicationAttemptId appAttemptId =
-            ApplicationAttemptId.newInstance(appId, j);
-        ApplicationAttemptHistoryData attemptData =
-            store.getApplicationAttempt(appAttemptId);
-        Assert.assertNotNull(attemptData);
-        Assert.assertEquals(appAttemptId.toString(), attemptData.getHost());
-        
-        if (missingApplicationAttempt && j == num) {
-          Assert.assertNull(attemptData.getDiagnosticsInfo());
-          continue;
-        } else {
-          Assert.assertEquals(appAttemptId.toString(),
-              attemptData.getDiagnosticsInfo());
-        }
-
-        // read container history data
-        Assert.assertEquals(num, store.getContainers(appAttemptId).size());
-        for (int k = 1; k <= num; ++k) {
-          ContainerId containerId = ContainerId.newInstance(appAttemptId, k);
-          ContainerHistoryData containerData = store.getContainer(containerId);
-          Assert.assertNotNull(containerData);
-          Assert.assertEquals(Priority.newInstance(containerId.getId()),
-            containerData.getPriority());
-          if (missingContainer && k == num) {
-            Assert.assertNull(containerData.getDiagnosticsInfo());
-          } else {
-            Assert.assertEquals(containerId.toString(),
-                containerData.getDiagnosticsInfo());
-          }
-        }
-        ContainerHistoryData masterContainer =
-            store.getAMContainer(appAttemptId);
-        Assert.assertNotNull(masterContainer);
-        Assert.assertEquals(ContainerId.newInstance(appAttemptId, 1),
-          masterContainer.getContainerId());
-      }
-    }
-  }
-
-  @Test
-  public void testWriteAfterApplicationFinish() throws IOException {
-    ApplicationId appId = ApplicationId.newInstance(0, 1);
-    writeApplicationStartData(appId);
-    writeApplicationFinishData(appId);
-    // write application attempt history data
-    ApplicationAttemptId appAttemptId =
-        ApplicationAttemptId.newInstance(appId, 1);
-    try {
-      writeApplicationAttemptStartData(appAttemptId);
-      Assert.fail();
-    } catch (IOException e) {
-      Assert.assertTrue(e.getMessage().contains("is not opened"));
-    }
-    try {
-      writeApplicationAttemptFinishData(appAttemptId);
-      Assert.fail();
-    } catch (IOException e) {
-      Assert.assertTrue(e.getMessage().contains("is not opened"));
-    }
-    // write container history data
-    ContainerId containerId = ContainerId.newInstance(appAttemptId, 1);
-    try {
-      writeContainerStartData(containerId);
-      Assert.fail();
-    } catch (IOException e) {
-      Assert.assertTrue(e.getMessage().contains("is not opened"));
-    }
-    try {
-      writeContainerFinishData(containerId);
-      Assert.fail();
-    } catch (IOException e) {
-      Assert.assertTrue(e.getMessage().contains("is not opened"));
-    }
-  }
-
-  @Test
-  public void testMassiveWriteContainerHistoryData() throws IOException {
-    long mb = 1024 * 1024;
-    long usedDiskBefore = fs.getContentSummary(fsWorkingPath).getLength() / mb;
-    ApplicationId appId = ApplicationId.newInstance(0, 1);
-    writeApplicationStartData(appId);
-    ApplicationAttemptId appAttemptId =
-        ApplicationAttemptId.newInstance(appId, 1);
-    for (int i = 1; i <= 100000; ++i) {
-      ContainerId containerId = ContainerId.newInstance(appAttemptId, i);
-      writeContainerStartData(containerId);
-      writeContainerFinishData(containerId);
-    }
-    writeApplicationFinishData(appId);
-    long usedDiskAfter = fs.getContentSummary(fsWorkingPath).getLength() / mb;
-    Assert.assertTrue((usedDiskAfter - usedDiskBefore) < 20);
-  }
-
-  @Test
-  public void testMissingContainerHistoryData() throws IOException {
-    testWriteHistoryData(3, true, false);
-    testReadHistoryData(3, true, false);
-  }
-  
-  @Test
-  public void testMissingApplicationAttemptHistoryData() throws IOException {
-    testWriteHistoryData(3, false, true);
-    testReadHistoryData(3, false, true);
-  }
-}