You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by dm...@apache.org on 2014/01/31 19:50:03 UTC

[1/4] AMBARI-4358. Add stack extension support for pluggable services (dlysnichenko)

Updated Branches:
  refs/heads/trunk f2146a41e -> 37f11ebda


http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/test/resources/stacks/HDP/2.0.8/services/HDFS/package/dummy-script.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.8/services/HDFS/package/dummy-script.py b/ambari-server/src/test/resources/stacks/HDP/2.0.8/services/HDFS/package/dummy-script.py
new file mode 100644
index 0000000..a582077
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.8/services/HDFS/package/dummy-script.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
\ No newline at end of file


[3/4] AMBARI-4358. Add stack extension support for pluggable services (dlysnichenko)

Posted by dm...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/hdfs_namenode.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/hdfs_namenode.py
deleted file mode 100644
index ecda794..0000000
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/hdfs_namenode.py
+++ /dev/null
@@ -1,180 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from utils import service
-from utils import hdfs_directory
-import urlparse
-
-
-def namenode(action=None, format=True):
-  import params
-  #we need this directory to be present before any action(HA manual steps for
-  #additional namenode)
-  if action == "configure":
-    create_name_dirs(params.dfs_name_dir)
-
-  if action == "start":
-    if format:
-      format_namenode()
-      pass
-    service(
-      action="start", name="namenode", user=params.hdfs_user,
-      keytab=params.dfs_namenode_keytab_file,
-      create_pid_dir=True,
-      create_log_dir=True,
-      principal=params.dfs_namenode_kerberos_principal
-    )
-
-    # TODO: extract creating of dirs to different services
-    create_app_directories()
-    create_user_directories()
-
-  if action == "stop":
-    service(
-      action="stop", name="namenode", user=params.hdfs_user,
-      keytab=params.dfs_namenode_keytab_file,
-      principal=params.dfs_namenode_kerberos_principal
-    )
-
-
-def create_name_dirs(directories):
-  import params
-
-  dirs = directories.split(",")
-  Directory(dirs,
-            mode=0755,
-            owner=params.hdfs_user,
-            group=params.user_group,
-            recursive=True
-  )
-
-
-def create_app_directories():
-  import params
-
-  hdfs_directory(name="/tmp",
-                 owner=params.hdfs_user,
-                 mode="777"
-  )
-  #mapred directories
-  if params.has_histroryserver:
-    hdfs_directory(name="/mapred",
-                   owner=params.mapred_user
-    )
-    hdfs_directory(name="/mapred/system",
-                   owner=params.hdfs_user
-    )
-    #hbase directories
-  if len(params.hbase_master_hosts) != 0:
-    hdfs_directory(name=params.hbase_hdfs_root_dir,
-                   owner=params.hbase_user
-    )
-    hdfs_directory(name=params.hbase_staging_dir,
-                   owner=params.hbase_user,
-                   mode="711"
-    )
-    #hive directories
-  if len(params.hive_server_host) != 0:
-    hdfs_directory(name=params.hive_apps_whs_dir,
-                   owner=params.hive_user,
-                   mode="777"
-    )
-  if len(params.hcat_server_hosts) != 0:
-    hdfs_directory(name=params.webhcat_apps_dir,
-                   owner=params.webhcat_user,
-                   mode="755"
-    )
-  if len(params.hs_host) != 0:
-    if params.yarn_log_aggregation_enabled:
-      hdfs_directory(name=params.yarn_nm_app_log_dir,
-                     owner=params.yarn_user,
-                     group=params.user_group,
-                     mode="777",
-                     recursive_chmod=True
-      )
-    hdfs_directory(name=params.mapreduce_jobhistory_intermediate_done_dir,
-                   owner=params.mapred_user,
-                   group=params.user_group,
-                   mode="777"
-    )
-
-    hdfs_directory(name=params.mapreduce_jobhistory_done_dir,
-                   owner=params.mapred_user,
-                   group=params.user_group,
-                   mode="1777"
-    )
-
-  pass
-
-
-def create_user_directories():
-  import params
-
-  hdfs_directory(name=params.smoke_hdfs_user_dir,
-                 owner=params.smoke_user,
-                 mode=params.smoke_hdfs_user_mode
-  )
-
-  if params.has_hive_server_host:
-    hdfs_directory(name=params.hive_hdfs_user_dir,
-                   owner=params.hive_user,
-                   mode=params.hive_hdfs_user_mode
-    )
-
-  if params.has_hcat_server_host:
-    if params.hcat_hdfs_user_dir != params.webhcat_hdfs_user_dir:
-      hdfs_directory(name=params.hcat_hdfs_user_dir,
-                     owner=params.hcat_user,
-                     mode=params.hcat_hdfs_user_mode
-      )
-    hdfs_directory(name=params.webhcat_hdfs_user_dir,
-                   owner=params.webhcat_user,
-                   mode=params.webhcat_hdfs_user_mode
-    )
-
-  if params.has_oozie_server:
-    hdfs_directory(name=params.oozie_hdfs_user_dir,
-                   owner=params.oozie_user,
-                   mode=params.oozie_hdfs_user_mode
-    )
-
-
-def format_namenode(force=None):
-  import params
-
-  mark_dir = params.namenode_formatted_mark_dir
-  dfs_name_dir = params.dfs_name_dir
-  hdfs_user = params.hdfs_user
-  hadoop_conf_dir = params.hadoop_conf_dir
-
-  if not params.dfs_ha_enabled:
-    if force:
-      ExecuteHadoop('namenode -format',
-                    kinit_override=True)
-    else:
-      File('/tmp/checkForFormat.sh',
-           content=StaticFile("checkForFormat.sh"),
-           mode=0755)
-      Execute(format(
-        "sh /tmp/checkForFormat.sh {hdfs_user} {hadoop_conf_dir} {mark_dir} "
-        "{dfs_name_dir}"),
-              not_if=format("test -d {mark_dir}"),
-              path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin")
-    Execute(format("mkdir -p {mark_dir}"))

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/hdfs_snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/hdfs_snamenode.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/hdfs_snamenode.py
deleted file mode 100644
index a943455..0000000
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/hdfs_snamenode.py
+++ /dev/null
@@ -1,53 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from utils import service
-from utils import hdfs_directory
-
-
-def snamenode(action=None, format=False):
-  import params
-
-  if action == "configure":
-    Directory(params.fs_checkpoint_dir,
-              recursive=True,
-              mode=0755,
-              owner=params.hdfs_user,
-              group=params.user_group)
-  elif action == "start":
-    service(
-      action=action,
-      name="secondarynamenode",
-      user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True,
-      keytab=params.dfs_secondary_namenode_keytab_file,
-      principal=params.dfs_secondary_namenode_kerberos_principal
-    )
-  elif action == "stop":
-    service(
-      action=action,
-      name="secondarynamenode",
-      user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True,
-      keytab=params.dfs_secondary_namenode_keytab_file,
-      principal=params.dfs_secondary_namenode_kerberos_principal
-    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/journalnode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/journalnode.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/journalnode.py
deleted file mode 100644
index fd355cc..0000000
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/journalnode.py
+++ /dev/null
@@ -1,74 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from utils import service
-
-
-class JournalNode(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-    self.config(env)
-    service(
-      action="start", name="journalnode", user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True,
-      keytab=params.dfs_journalnode_keytab_file,
-      principal=params.dfs_journalnode_kerberos_principal
-    )
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-    service(
-      action="stop", name="journalnode", user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True,
-      keytab=params.dfs_journalnode_keytab_file,
-      principal=params.dfs_journalnode_kerberos_principal
-    )
-
-  def config(self, env):
-    import params
-
-    Directory(params.jn_edits_dir,
-              recursive=True,
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-    pass
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    check_process_status(status_params.journalnode_pid_file)
-
-
-if __name__ == "__main__":
-  JournalNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/namenode.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/namenode.py
deleted file mode 100644
index c799415..0000000
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/namenode.py
+++ /dev/null
@@ -1,61 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from hdfs_namenode import namenode
-
-
-class NameNode(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-    #TODO remove when config action will be implemented
-    self.config(env)
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-    self.configure(env)
-    namenode(action="start")
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-    namenode(action="stop")
-
-  def configure(self, env):
-    import params
-
-    env.set_params(params)
-    namenode(action="configure")
-    pass
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    check_process_status(status_params.namenode_pid_file)
-
-
-if __name__ == "__main__":
-  NameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/params.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/params.py
deleted file mode 100644
index 568a8a8..0000000
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/params.py
+++ /dev/null
@@ -1,180 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import status_params
-import os
-
-config = Script.get_config()
-
-#security params
-security_enabled = config['configurations']['global']['security_enabled']
-dfs_journalnode_keytab_file = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
-dfs_web_authentication_kerberos_keytab = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
-dfs_secondary_namenode_keytab_file =  config['configurations']['hdfs-site']['dfs.secondary.namenode.keytab.file']
-dfs_datanode_keytab_file =  config['configurations']['hdfs-site']['dfs.datanode.keytab.file']
-dfs_namenode_keytab_file =  config['configurations']['hdfs-site']['dfs.namenode.keytab.file']
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab']
-
-dfs_datanode_kerberos_principal = config['configurations']['hdfs-site']['dfs.datanode.kerberos.principal']
-dfs_journalnode_kerberos_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.principal']
-dfs_secondary_namenode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.internal.spnego.principal']
-dfs_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.namenode.kerberos.principal']
-dfs_web_authentication_kerberos_principal = config['configurations']['hdfs-site']['dfs.web.authentication.kerberos.principal']
-dfs_secondary_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.principal']
-dfs_journalnode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.internal.spnego.principal']
-
-kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-#hosts
-hostname = config["hostname"]
-rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
-hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-nm_host = default("/clusterHostInfo/nm_host", [])
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", [])
-zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", [])
-
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-has_namenodes = not len(namenode_host) == 0
-has_jobtracker = not len(jtnode_host) == 0
-has_resourcemanager = not len(rm_host) == 0
-has_histroryserver = not len(hs_host) == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_slaves = not len(slave_hosts) == 0
-has_nagios = not len(hagios_server_hosts) == 0
-has_oozie_server = not len(oozie_servers)  == 0
-has_hcat_server_host = not len(hcat_server_hosts)  == 0
-has_hive_server_host = not len(hive_server_host)  == 0
-has_journalnode_hosts = not len(journalnode_hosts)  == 0
-has_zkfc_hosts = not len(zkfc_hosts)  == 0
-
-
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-
-if has_ganglia_server:
-  ganglia_server_host = ganglia_server_hosts[0]
-
-#users and groups
-yarn_user = config['configurations']['global']['yarn_user']
-hbase_user = config['configurations']['global']['hbase_user']
-nagios_user = config['configurations']['global']['nagios_user']
-oozie_user = config['configurations']['global']['oozie_user']
-webhcat_user = config['configurations']['global']['hcat_user']
-hcat_user = config['configurations']['global']['hcat_user']
-hive_user = config['configurations']['global']['hive_user']
-smoke_user =  config['configurations']['global']['smokeuser']
-mapred_user = config['configurations']['global']['mapred_user']
-hdfs_user = status_params.hdfs_user
-
-user_group = config['configurations']['global']['user_group']
-proxyuser_group =  config['configurations']['global']['proxyuser_group']
-nagios_group = config['configurations']['global']['nagios_group']
-smoke_user_group = "users"
-
-#hadoop params
-hadoop_conf_dir = "/etc/hadoop/conf"
-hadoop_pid_dir_prefix = status_params.hadoop_pid_dir_prefix
-hadoop_bin = "/usr/lib/hadoop/sbin"
-
-hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
-
-dfs_domain_socket_path = config['configurations']['hdfs-site']['dfs.domain.socket.path']
-dfs_domain_socket_dir = os.path.dirname(dfs_domain_socket_path)
-
-hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
-
-jn_edits_dir = config['configurations']['hdfs-site']['dfs.journalnode.edits.dir']#"/grid/0/hdfs/journal"
-
-# if stack_version[0] == "2":
-dfs_name_dir = config['configurations']['hdfs-site']['dfs.namenode.name.dir']
-# else:
-#   dfs_name_dir = default("/configurations/hdfs-site/dfs.name.dir","/tmp/hadoop-hdfs/dfs/name")
-
-namenode_dirs_created_stub_dir = format("{hdfs_log_dir_prefix}/{hdfs_user}")
-namenode_dirs_stub_filename = "namenode_dirs_created"
-
-hbase_hdfs_root_dir = config['configurations']['hbase-site']['hbase.rootdir']#","/apps/hbase/data")
-hbase_staging_dir = "/apps/hbase/staging"
-hive_apps_whs_dir = config['configurations']['hive-site']["hive.metastore.warehouse.dir"] #, "/apps/hive/warehouse")
-webhcat_apps_dir = "/apps/webhcat"
-yarn_log_aggregation_enabled = config['configurations']['yarn-site']['yarn.log-aggregation-enable']#","true")
-yarn_nm_app_log_dir =  config['configurations']['yarn-site']['yarn.nodemanager.remote-app-log-dir']#","/app-logs")
-mapreduce_jobhistory_intermediate_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.intermediate-done-dir']#","/app-logs")
-mapreduce_jobhistory_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.done-dir']#","/mr-history/done")
-
-if has_oozie_server:
-  oozie_hdfs_user_dir = format("/user/{oozie_user}")
-  oozie_hdfs_user_mode = 775
-if has_hcat_server_host:
-  hcat_hdfs_user_dir = format("/user/{hcat_user}")
-  hcat_hdfs_user_mode = 755
-  webhcat_hdfs_user_dir = format("/user/{webhcat_user}")
-  webhcat_hdfs_user_mode = 755
-if has_hive_server_host:
-  hive_hdfs_user_dir = format("/user/{hive_user}")
-  hive_hdfs_user_mode = 700
-smoke_hdfs_user_dir = format("/user/{smoke_user}")
-smoke_hdfs_user_mode = 770
-
-namenode_formatted_mark_dir = format("{hadoop_pid_dir_prefix}/hdfs/namenode/formatted/")
-
-# if stack_version[0] == "2":
-fs_checkpoint_dir = config['configurations']['hdfs-site']['dfs.namenode.checkpoint.dir'] #","/tmp/hadoop-hdfs/dfs/namesecondary")
-# else:
-#   fs_checkpoint_dir = default("/configurations/core-site/fs.checkpoint.dir","/tmp/hadoop-hdfs/dfs/namesecondary")
-
-# if stack_version[0] == "2":
-dfs_data_dir = config['configurations']['hdfs-site']['dfs.datanode.data.dir']#,"/tmp/hadoop-hdfs/dfs/data")
-# else:
-#   dfs_data_dir = default('/configurations/hdfs-site/dfs.data.dir',"/tmp/hadoop-hdfs/dfs/data")
-
-# HDFS High Availability properties
-dfs_ha_enabled = False
-dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
-dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
-if dfs_ha_namenode_ids:
-  dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
-  dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
-  if dfs_ha_namenode_ids_array_len > 1:
-    dfs_ha_enabled = True
-if dfs_ha_enabled:
-  for nn_id in dfs_ha_namemodes_ids_list:
-    nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
-    if hostname in nn_host:
-      namenode_id = nn_id
-  namenode_id = None
-
-journalnode_address = default('/configurations/hdfs-site/dfs.journalnode.http-address', None)
-if journalnode_address:
-  journalnode_port = journalnode_address.split(":")[1]
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/service_check.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/service_check.py
deleted file mode 100644
index d27b13a..0000000
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/service_check.py
+++ /dev/null
@@ -1,107 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-
-class HdfsServiceCheck(Script):
-  def service_check(self, env):
-    import params
-
-    env.set_params(params)
-    unique = get_unique_id_and_date()
-    dir = '/tmp'
-    tmp_file = format("{dir}/{unique}")
-
-    safemode_command = "dfsadmin -safemode get | grep OFF"
-
-    create_dir_cmd = format("fs -mkdir {dir} ; hadoop fs -chmod -R 777 {dir}")
-    test_dir_exists = format("hadoop fs -test -e {dir}")
-    cleanup_cmd = format("fs -rm {tmp_file}")
-    #cleanup put below to handle retries; if retrying there wil be a stale file
-    #that needs cleanup; exit code is fn of second command
-    create_file_cmd = format(
-      "{cleanup_cmd}; hadoop fs -put /etc/passwd {tmp_file}")
-    test_cmd = format("fs -test -e {tmp_file}")
-    if params.security_enabled:
-      Execute(format(
-        "su - {smoke_user} -c '{kinit_path_local} -kt {smoke_user_keytab} "
-        "{smoke_user}'"))
-    ExecuteHadoop(safemode_command,
-                  user=params.smoke_user,
-                  logoutput=True,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=3,
-                  tries=5
-    )
-    ExecuteHadoop(create_dir_cmd,
-                  user=params.smoke_user,
-                  logoutput=True,
-                  not_if=test_dir_exists,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=3,
-                  tries=5
-    )
-    ExecuteHadoop(create_file_cmd,
-                  user=params.smoke_user,
-                  logoutput=True,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=3,
-                  tries=5
-    )
-    ExecuteHadoop(test_cmd,
-                  user=params.smoke_user,
-                  logoutput=True,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=3,
-                  tries=5
-    )
-    if params.has_journalnode_hosts:
-      journalnode_port = params.journalnode_port
-      smoke_test_user = params.smoke_user
-      checkWebUIFileName = "checkWebUI.py"
-      checkWebUIFilePath = format("/tmp/{checkWebUIFileName}")
-      comma_sep_jn_hosts = ",".join(params.journalnode_hosts)
-      checkWebUICmd = format(
-        "su - {smoke_test_user} -c 'python {checkWebUIFilePath} -m "
-        "{comma_sep_jn_hosts} -p {journalnode_port}'")
-      File(checkWebUIFilePath,
-           content=StaticFile(checkWebUIFileName))
-
-      Execute(checkWebUICmd,
-              logoutput=True,
-              try_sleep=3,
-              tries=5
-      )
-
-    if params.is_namenode_master:
-      if params.has_zkfc_hosts:
-        pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
-        pid_file = format("{pid_dir}/hadoop-{hdfs_user}-zkfc.pid")
-        check_zkfc_process_cmd = format(
-          "ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
-        Execute(check_zkfc_process_cmd,
-                logoutput=True,
-                try_sleep=3,
-                tries=5
-        )
-
-
-if __name__ == "__main__":
-  HdfsServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/snamenode.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/snamenode.py
deleted file mode 100644
index 8f682ec..0000000
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/snamenode.py
+++ /dev/null
@@ -1,64 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from hdfs_snamenode import snamenode
-
-
-class SNameNode(Script):
-  def install(self, env):
-    import params
-
-    env.set_params(params)
-
-    self.install_packages(env)
-
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-
-    self.config(env)
-    snamenode(action="start")
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-
-    snamenode(action="stop")
-
-  def config(self, env):
-    import params
-
-    env.set_params(params)
-
-    snamenode(action="configure")
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-
-    check_process_status(status_params.snamenode_pid_file)
-
-
-if __name__ == "__main__":
-  SNameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/status_params.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/status_params.py
deleted file mode 100644
index 4097373..0000000
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/status_params.py
+++ /dev/null
@@ -1,31 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
-hdfs_user = config['configurations']['global']['hdfs_user']
-hdp_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
-datanode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-datanode.pid")
-namenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-namenode.pid")
-snamenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-secondarynamenode.pid")
-journalnode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-journalnode.pid")
-zkfc_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-zkfc.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/utils.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/utils.py
deleted file mode 100644
index 225cd2e..0000000
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/utils.py
+++ /dev/null
@@ -1,138 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-
-def service(action=None, name=None, user=None, create_pid_dir=False,
-            create_log_dir=False, keytab=None, principal=None):
-  import params
-
-  kinit_cmd = "true"
-  pid_dir = format("{hadoop_pid_dir_prefix}/{user}")
-  pid_file = format("{pid_dir}/hadoop-{user}-{name}.pid")
-  log_dir = format("{hdfs_log_dir_prefix}/{user}")
-  hadoop_daemon = format(
-    "export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && "
-    "{hadoop_bin}/hadoop-daemon.sh")
-  cmd = format("{hadoop_daemon} --config {hadoop_conf_dir}")
-
-  if create_pid_dir:
-    Directory(pid_dir,
-              owner=user,
-              recursive=True)
-  if create_log_dir:
-    Directory(log_dir,
-              owner=user,
-              recursive=True)
-
-  if params.security_enabled:
-    principal_replaced = principal.replace("_HOST", params.hostname)
-    kinit_cmd = format("kinit -kt {keytab} {principal_replaced}")
-
-    if name == "datanode":
-      user = "root"
-      pid_file = format(
-        "{hadoop_pid_dir_prefix}/{hdfs_user}/hadoop-{hdfs_user}-{name}.pid")
-
-  daemon_cmd = format("{cmd} {action} {name}")
-
-  service_is_up = format(
-    "ls {pid_file} >/dev/null 2>&1 &&"
-    " ps `cat {pid_file}` >/dev/null 2>&1") if action == "start" else None
-
-  Execute(kinit_cmd)
-  Execute(daemon_cmd,
-          user = user,
-          not_if=service_is_up
-  )
-  if action == "stop":
-    File(pid_file,
-         action="delete",
-         ignore_failures=True
-    )
-
-
-def hdfs_directory(name=None, owner=None, group=None,
-                   mode=None, recursive_chown=False, recursive_chmod=False):
-  import params
-
-  dir_exists = format("hadoop fs -ls {name} >/dev/null 2>&1")
-  namenode_safe_mode_off = "hadoop dfsadmin -safemode get|grep 'Safe mode is OFF'"
-
-  stub_dir = params.namenode_dirs_created_stub_dir
-  stub_filename = params.namenode_dirs_stub_filename
-  dir_absent_in_stub = format(
-    "grep -q '^{name}$' {stub_dir}/{stub_filename} > /dev/null 2>&1; test $? -ne 0")
-  record_dir_in_stub = format("echo '{name}' >> {stub_dir}/{stub_filename}")
-  tries = 3
-  try_sleep = 10
-  dfs_check_nn_status_cmd = "true"
-
-  if params.dfs_ha_enabled:
-    namenode_id = params.namenode_id
-    dfs_check_nn_status_cmd = format(
-      "hdfs haadmin -getServiceState $namenode_id | grep active > /dev/null")
-
-  #if params.stack_version[0] == "2":
-  mkdir_cmd = format("fs -mkdir -p {name}")
-  #else:
-  #  mkdir_cmd = format("fs -mkdir {name}")
-
-  if params.security_enabled:
-    Execute(format("kinit -kt {hdfs_user_keytab} {hdfs_user}"),
-            user = params.hdfs_user)
-  ExecuteHadoop(mkdir_cmd,
-                try_sleep=try_sleep,
-                tries=tries,
-                not_if=format(
-                  "! {dir_absent_in_stub} && {dfs_check_nn_status_cmd} && "
-                  "{dir_exists} && ! {namenode_safe_mode_off}"),
-                only_if=format(
-                  "{dir_absent_in_stub} && {dfs_check_nn_status_cmd} && "
-                  "! {dir_exists}"),
-                conf_dir=params.hadoop_conf_dir,
-                user=params.hdfs_user
-  )
-  Execute(record_dir_in_stub,
-          user=params.hdfs_user,
-          only_if=format("{dir_absent_in_stub}")
-  )
-
-  recursive = "-R" if recursive_chown else ""
-  perm_cmds = []
-
-  if owner:
-    chown = owner
-    if group:
-      chown = format("{owner}:{group}")
-    perm_cmds.append(format("fs -chown {recursive} {chown} {name}"))
-  if mode:
-    perm_cmds.append(format("fs -chmod {recursive} {mode} {name}"))
-  for cmd in perm_cmds:
-    ExecuteHadoop(cmd,
-                  user=params.hdfs_user,
-                  only_if=format("! {dir_absent_in_stub} && {dfs_check_nn_status_cmd} && {namenode_safe_mode_off} && {dir_exists}"),
-                  try_sleep=try_sleep,
-                  tries=tries,
-                  conf_dir=params.hadoop_conf_dir
-    )
-
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/zkfc_slave.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/zkfc_slave.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/zkfc_slave.py
deleted file mode 100644
index f415f24..0000000
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/zkfc_slave.py
+++ /dev/null
@@ -1,62 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from utils import service
-
-
-class ZkfcSlave(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-    self.configure(env)
-    service(
-      action="start", name="zkfc", user=params.hdfs_user, create_pid_dir=True,
-      create_log_dir=True
-    )
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-    service(
-      action="stop", name="zkfc", user=params.hdfs_user, create_pid_dir=True,
-      create_log_dir=True
-    )
-
-  def configure(self, env):
-    pass
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-
-    check_process_status(status_params.zkfc_pid_file)
-
-
-if __name__ == "__main__":
-  ZkfcSlave().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/test/resources/stacks/HDP/2.0.8/hooks/dummy-script.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.8/hooks/dummy-script.py b/ambari-server/src/test/resources/stacks/HDP/2.0.8/hooks/dummy-script.py
new file mode 100644
index 0000000..a582077
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.8/hooks/dummy-script.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/test/resources/stacks/HDP/2.0.8/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.8/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.8/metainfo.xml
new file mode 100644
index 0000000..91f8d81
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.8/metainfo.xml
@@ -0,0 +1,24 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <versions>
+      <active>true</active>
+    </versions>
+    <extends>2.0.7</extends>
+</metainfo>
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/test/resources/stacks/HDP/2.0.8/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.8/repos/repoinfo.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.8/repos/repoinfo.xml
new file mode 100644
index 0000000..1025240
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.8/repos/repoinfo.xml
@@ -0,0 +1,61 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<reposinfo>
+  <os type="centos6">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0</baseurl>
+      <repoid>HDP-2.0.8</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+  </os>
+  <os type="centos5">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos5/2.x/updates/2.0.6.0</baseurl>
+      <repoid>HDP-2.0.8</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+  </os>
+  <os type="redhat6">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0</baseurl>
+      <repoid>HDP-2.0.8</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+  </os>
+  <os type="redhat5">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos5/2.x/updates/2.0.6.0</baseurl>
+      <repoid>HDP-2.0.8</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+  </os>
+  <os type="suse11">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP/suse11/2.x/updates/2.0.6.0</baseurl>
+      <repoid>HDP-2.0.8</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+  </os>
+  <os type="sles11">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP/suse11/2.x/updates/2.0.6.0</baseurl>
+      <repoid>HDP-2.0.8</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+  </os>
+</reposinfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/test/resources/stacks/HDP/2.0.8/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.8/role_command_order.json b/ambari-server/src/test/resources/stacks/HDP/2.0.8/role_command_order.json
new file mode 100644
index 0000000..84610ca
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.8/role_command_order.json
@@ -0,0 +1,100 @@
+{
+  "_comment" : "Record format:",
+  "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
+  "general_deps" : {
+    "_comment" : "dependencies for all cases",
+    "NAGIOS_SERVER-INSTALL" : ["HIVE_CLIENT-INSTALL", "HCAT-INSTALL",
+        "MAPREDUCE_CLIENT-INSTALL", "OOZIE_CLIENT-INSTALL"],
+    "HBASE_MASTER-START": ["ZOOKEEPER_SERVER-START"],
+    "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
+    "OOZIE_SERVER-START": ["JOBTRACKER-START", "TASKTRACKER-START"],
+    "WEBHCAT_SERVER-START": ["TASKTRACKER-START", "HIVE_SERVER-START"],
+    "HIVE_METASTORE-START": ["MYSQL_SERVER-START"],
+    "HIVE_SERVER-START": ["TASKTRACKER-START", "MYSQL_SERVER-START"],
+    "HUE_SERVER-START": ["HIVE_SERVER-START", "HCAT-START", "OOZIE_SERVER-START"],
+    "FLUME_SERVER-START": ["OOZIE_SERVER-START"],
+    "NAGIOS_SERVER-START": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START",
+        "GANGLIA_SERVER-START", "GANGLIA_MONITOR-START", "HCAT-START",
+        "HIVE_SERVER-START", "HIVE_METASTORE-START", "HUE_SERVER-START",
+        "JOBTRACKER-START", "TASKTRACKER-START", "ZOOKEEPER_SERVER-START",
+        "MYSQL_SERVER-START", "OOZIE_SERVER-START", "PIG-START", "SQOOP-START",
+        "WEBHCAT_SERVER-START", "FLUME_SERVER-START"],
+    "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"],
+    "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START"],
+    "WEBHCAT_SERVICE_CHECK-SERVICE_CHECK": ["WEBHCAT_SERVER-START"],
+    "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
+    "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", "HIVE_METASTORE-START"],
+    "HCAT_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START"],
+    "PIG_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"],
+    "SQOOP_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"],
+    "ZOOKEEPER_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],
+    "ZOOKEEPER_QUORUM_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],
+    "ZOOKEEPER_SERVER-STOP" : ["HBASE_MASTER-STOP", "HBASE_REGIONSERVER-STOP"],
+    "HBASE_MASTER-STOP": ["HBASE_REGIONSERVER-STOP"],
+    "TASKTRACKER-UPGRADE": ["JOBTRACKER-UPGRADE"],
+    "MAPREDUCE_CLIENT-UPGRADE": ["TASKTRACKER-UPGRADE", "JOBTRACKER-UPGRADE"],
+    "ZOOKEEPER_SERVER-UPGRADE": ["MAPREDUCE_CLIENT-UPGRADE"],
+    "ZOOKEEPER_CLIENT-UPGRADE": ["ZOOKEEPER_SERVER-UPGRADE"],
+    "HBASE_MASTER-UPGRADE": ["ZOOKEEPER_CLIENT-UPGRADE"],
+    "HBASE_REGIONSERVER-UPGRADE": ["HBASE_MASTER-UPGRADE"],
+    "HBASE_CLIENT-UPGRADE": ["HBASE_REGIONSERVER-UPGRADE"],
+    "HIVE_SERVER-UPGRADE" : ["HBASE_CLIENT-UPGRADE"],
+    "HIVE_METASTORE-UPGRADE" : ["HIVE_SERVER-UPGRADE"],
+    "MYSQL_SERVER-UPGRADE": ["HIVE_METASTORE-UPGRADE"],
+    "HIVE_CLIENT-UPGRADE": ["MYSQL_SERVER-UPGRADE"],
+    "HCAT-UPGRADE": ["HIVE_CLIENT-UPGRADE"],
+    "OOZIE_SERVER-UPGRADE" : ["HCAT-UPGRADE"],
+    "OOZIE_CLIENT-UPGRADE" : ["OOZIE_SERVER-UPGRADE"],
+    "WEBHCAT_SERVER-UPGRADE" : ["OOZIE_CLIENT-UPGRADE"],
+    "PIG-UPGRADE" : ["WEBHCAT_SERVER-UPGRADE"],
+    "SQOOP-UPGRADE" : ["PIG-UPGRADE"],
+    "NAGIOS_SERVER-UPGRADE" : ["SQOOP-UPGRADE"],
+    "GANGLIA_SERVER-UPGRADE" : ["NAGIOS_SERVER-UPGRADE"],
+    "GANGLIA_MONITOR-UPGRADE" : ["GANGLIA_SERVER-UPGRADE"]
+  },
+  "_comment" : "GLUSTERFS-specific dependencies",
+  "optional_glusterfs": {
+    "HBASE_MASTER-START": ["PEERSTATUS-START"],
+    "JOBTRACKER-START": ["PEERSTATUS-START"],
+    "TASKTRACKER-START": ["PEERSTATUS-START"],
+    "GLUSTERFS_SERVICE_CHECK-SERVICE_CHECK": ["PEERSTATUS-START"],
+    "JOBTRACKER-UPGRADE": ["GLUSTERFS_CLIENT-UPGRADE"]
+  },
+  "_comment" : "Dependencies that are used when GLUSTERFS is not present in cluster",
+  "optional_no_glusterfs": {
+    "SECONDARY_NAMENODE-START": ["NAMENODE-START"],
+    "RESOURCEMANAGER-START": ["NAMENODE-START", "DATANODE-START"],
+    "NODEMANAGER-START": ["NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START"],
+    "HISTORYSERVER-START": ["NAMENODE-START", "DATANODE-START"],
+    "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START"],
+    "JOBTRACKER-START": ["NAMENODE-START", "DATANODE-START"],
+    "TASKTRACKER-START": ["NAMENODE-START", "DATANODE-START"],
+    "HIVE_SERVER-START": ["DATANODE-START"],
+    "WEBHCAT_SERVER-START": ["DATANODE-START"],
+    "NAGIOS_SERVER-START": ["NAMENODE-START", "SECONDARY_NAMENODE-START",
+        "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START", "HISTORYSERVER-START"],
+    "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START",
+        "SECONDARY_NAMENODE-START"],
+    "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START",
+        "RESOURCEMANAGER-START", "HISTORYSERVER-START", "YARN_SERVICE_CHECK-SERVICE_CHECK"],
+    "YARN_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
+    "RESOURCEMANAGER_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START"],
+    "PIG_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START", "NODEMANAGER-START"],
+    "NAMENODE-STOP": ["JOBTRACKER-STOP", "TASKTRACKER-STOP", "RESOURCEMANAGER-STOP",
+        "NODEMANAGER-STOP", "HISTORYSERVER-STOP", "HBASE_MASTER-STOP"],
+    "DATANODE-STOP": ["JOBTRACKER-STOP", "TASKTRACKER-STOP", "RESOURCEMANAGER-STOP",
+        "NODEMANAGER-STOP", "HISTORYSERVER-STOP", "HBASE_MASTER-STOP"],
+    "SECONDARY_NAMENODE-UPGRADE": ["NAMENODE-UPGRADE"],
+    "DATANODE-UPGRADE": ["SECONDARY_NAMENODE-UPGRADE"],
+    "HDFS_CLIENT-UPGRADE": ["DATANODE-UPGRADE"],
+    "JOBTRACKER-UPGRADE": ["HDFS_CLIENT-UPGRADE"]
+  },
+  "_comment" : "Dependencies that are used in HA NameNode cluster",
+  "optional_ha": {
+    "NAMENODE-START": ["JOURNALNODE-START", "ZOOKEEPER_SERVER-START"],
+    "ZKFC-START": ["NAMENODE-START"],
+    "NAGIOS_SERVER-START": ["ZKFC-START", "JOURNALNODE-START"],
+    "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["ZKFC-START"]
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/test/resources/stacks/HDP/2.0.8/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.8/services/HBASE/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.8/services/HBASE/metainfo.xml
new file mode 100644
index 0000000..336701a
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.8/services/HBASE/metainfo.xml
@@ -0,0 +1,81 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <!--
+  We can use this to distinguish old vs. new.
+  -->
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HBASE</name>
+      <comment>Non-relational distributed database and centralized service for configuration management &amp;
+        synchronization
+      </comment>
+      <version>0.96.0.2.0.6.0</version>
+      <components>
+        <component>
+          <name>HBASE_MASTER</name>
+          <category>MASTER</category>
+          <customCommands>
+            <customCommand> <!--Specialized only for custom commands, can use the same script as well -->
+              <name>RESTART</name>
+              <commandScript>
+                <script>scripts/hbase_master_restart.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>888</timeout>
+              </commandScript>
+              <name>RESTART</name>
+              <commandScript>
+                <script>scripts/hbase_master_restart.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>888</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
+        </component>
+
+        <component>
+          <name>HBASE_REGIONSERVER</name>
+          <category>SLAVE</category>
+          <commandScript> <!--This is the script to handle all default commands -->
+            <script>scripts/hbase_master.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>HBASE_CLIENT</name>
+          <category>CLIENT</category>
+        </component>
+      </components>
+
+      <customCommands>
+        <customCommand>
+          <name>SERVICE_VALIDATION</name>
+          <commandScript>
+            <script>scripts/hbase_validation.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>300</timeout>
+          </commandScript>
+        </customCommand>
+      </customCommands>
+      <configuration-dependencies>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/test/resources/stacks/HDP/2.0.8/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.8/services/HDFS/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.8/services/HDFS/metainfo.xml
new file mode 100644
index 0000000..4c17978
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.8/services/HDFS/metainfo.xml
@@ -0,0 +1,147 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HDFS</name>
+      <comment>Apache Hadoop Distributed File System</comment>
+      <version>2.1.0.2.0.6.0</version>
+
+      <components>
+        <component>
+          <name>NAMENODE</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/namenode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/namenode_dec_overr.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+            <customCommand>
+              <name>YET_ANOTHER_CHILD_COMMAND</name>
+              <commandScript>
+                <script>scripts/yet_another_child_command.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
+        </component>
+
+        <component>
+          <name>DATANODE</name>
+          <category>SLAVE</category>
+          <commandScript>
+            <script>scripts/datanode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>SECONDARY_NAMENODE</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/snamenode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>HDFS_CLIENT</name>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/hdfs_client_overridden.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>JOURNALNODE</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/journalnode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>ZKFC</name>
+          <category>SLAVE</category>
+          <commandScript>
+            <script>scripts/zkfc_slave.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>child-package-def</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check_2.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <customCommands>
+        <customCommand>
+          <name>RESTART</name>
+          <commandScript>
+            <script>scripts/restart_child.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </customCommand>
+        <customCommand>
+          <name>YET_ANOTHER_CHILD_SRV_COMMAND</name>
+          <commandScript>
+            <script>scripts/yet_another_child_srv_command.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </customCommand>
+      </customCommands>
+
+      <configuration-dependencies>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>


[4/4] git commit: AMBARI-4358. Add stack extension support for pluggable services (dlysnichenko)

Posted by dm...@apache.org.
AMBARI-4358. Add stack extension support for pluggable services (dlysnichenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/37f11ebd
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/37f11ebd
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/37f11ebd

Branch: refs/heads/trunk
Commit: 37f11ebda45eeac550ecacb78a25b40cd563825f
Parents: f2146a4
Author: Lisnichenko Dmitro <dl...@hortonworks.com>
Authored: Wed Jan 22 18:09:38 2014 +0200
Committer: Lisnichenko Dmitro <dl...@hortonworks.com>
Committed: Fri Jan 31 20:46:45 2014 +0200

----------------------------------------------------------------------
 .../ambari_agent/CustomServiceOrchestrator.py   |    9 +-
 .../src/main/python/ambari_agent/FileCache.py   |   28 +-
 .../TestCustomServiceOrchestrator.py            |    2 +-
 .../test/python/ambari_agent/TestFileCache.py   |   31 +-
 .../ambari/server/agent/ExecutionCommand.java   |    3 +-
 .../ambari/server/agent/HeartbeatMonitor.java   |    7 +-
 .../server/api/services/AmbariMetaInfo.java     |   24 +-
 .../server/api/util/StackExtensionHelper.java   |  247 +-
 .../AmbariCustomCommandExecutionHelper.java     |   25 +-
 .../server/state/CommandScriptDefinition.java   |   29 +
 .../ambari/server/state/ComponentInfo.java      |   25 +
 .../server/state/CustomCommandDefinition.java   |   27 +
 .../apache/ambari/server/state/ServiceInfo.java |   25 +-
 .../apache/ambari/server/state/StackInfo.java   |   23 +-
 .../server/api/services/AmbariMetaInfoTest.java |  191 +
 .../api/util/StackExtensionHelperTest.java      |    4 +-
 .../AmbariManagementControllerTest.java         |    2 +-
 .../python/stacks/1.3.3/configs/default.json    |    2 +-
 .../python/stacks/1.3.3/configs/secured.json    |    2 +-
 .../python/stacks/2.1.1/configs/default.json    |    2 +-
 .../python/stacks/2.1.1/configs/secured.json    |    2 +-
 .../stacks/HDP/2.0.6/hooks/dummy-script.py      |   21 +
 .../resources/stacks/HDP/2.0.7/metainfo.xml     |    2 +-
 .../services/HBASE/package/dummy-script.py      |   21 +
 .../HDP/2.0.7/services/HBASE/scripts/hbase.py   |   19 -
 .../services/HBASE/scripts/hbase_client.py      |   19 -
 .../services/HBASE/scripts/hbase_master.py      |   19 -
 .../HBASE/scripts/hbase_regionserver.py         |   19 -
 .../stacks/HDP/2.0.7/services/HDFS/metainfo.xml |   61 +-
 .../2.0.7/services/HDFS/package/dummy-script.py |   21 +
 .../HDFS/package/files/checkForFormat.sh        |   62 -
 .../services/HDFS/package/files/checkWebUI.py   |   53 -
 .../services/HDFS/package/scripts/datanode.py   |   57 -
 .../HDFS/package/scripts/hdfs_client.py         |   49 -
 .../HDFS/package/scripts/hdfs_datanode.py       |   56 -
 .../HDFS/package/scripts/hdfs_namenode.py       |  180 -
 .../HDFS/package/scripts/hdfs_snamenode.py      |   53 -
 .../HDFS/package/scripts/journalnode.py         |   74 -
 .../services/HDFS/package/scripts/namenode.py   |   61 -
 .../services/HDFS/package/scripts/params.py     |  180 -
 .../HDFS/package/scripts/service_check.py       |  107 -
 .../services/HDFS/package/scripts/snamenode.py  |   64 -
 .../HDFS/package/scripts/status_params.py       |   31 -
 .../services/HDFS/package/scripts/utils.py      |  138 -
 .../services/HDFS/package/scripts/zkfc_slave.py |   62 -
 .../stacks/HDP/2.0.8/hooks/dummy-script.py      |   21 +
 .../resources/stacks/HDP/2.0.8/metainfo.xml     |   24 +
 .../stacks/HDP/2.0.8/repos/repoinfo.xml         |   61 +
 .../stacks/HDP/2.0.8/role_command_order.json    |  100 +
 .../HDP/2.0.8/services/HBASE/metainfo.xml       |   81 +
 .../stacks/HDP/2.0.8/services/HDFS/metainfo.xml |  147 +
 .../stacks/HDP/2.0.8/services/HDFS/metrics.json | 7800 ++++++++++++++++++
 .../2.0.8/services/HDFS/package/dummy-script.py |   21 +
 53 files changed, 8970 insertions(+), 1424 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py b/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py
index 7ffc1c9..95ad2cd 100644
--- a/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py
+++ b/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py
@@ -98,12 +98,9 @@ class CustomServiceOrchestrator():
       else:
         if command_name == self.CUSTOM_COMMAND_COMMAND:
           command_name = command['hostLevelParams']['custom_command']
-        stack_name = command['hostLevelParams']['stack_name']
-        stack_version = command['hostLevelParams']['stack_version']
-        hook_dir = self.file_cache.get_hook_base_dir(stack_name, stack_version)
-        metadata_folder = command['commandParams']['service_metadata_folder']
-        base_dir = self.file_cache.get_service_base_dir(
-          stack_name, stack_version, metadata_folder, component_name)
+        hook_dir = self.file_cache.get_hook_base_dir(command)
+        service_subpath = command['commandParams']['service_package_folder']
+        base_dir = self.file_cache.get_service_base_dir(service_subpath)
         script_path = self.resolve_script_path(base_dir, script, script_type)
         script_tuple = (script_path, base_dir)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-agent/src/main/python/ambari_agent/FileCache.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/FileCache.py b/ambari-agent/src/main/python/ambari_agent/FileCache.py
index eafb592..01d2e52 100644
--- a/ambari-agent/src/main/python/ambari_agent/FileCache.py
+++ b/ambari-agent/src/main/python/ambari_agent/FileCache.py
@@ -42,32 +42,30 @@ class FileCache():
     self.cache_dir = config.get('agent', 'cache_dir')
 
 
-  def get_service_base_dir(self, stack_name, stack_version, service, component):
+  def get_service_base_dir(self, service_subpath):
     """
     Returns a base directory for service
     """
-    metadata_path = os.path.join(self.cache_dir, "stacks", str(stack_name),
-                                 str(stack_version), "services", str(service),
-                                 "package")
-    if not os.path.isdir(metadata_path):
+    service_base_dir = os.path.join(self.cache_dir, "stacks", service_subpath)
+    if not os.path.isdir(service_base_dir):
       # TODO: Metadata downloading will be implemented at Phase 2
       # As of now, all stack definitions are packaged and distributed with
       # agent rpm
-      message = "Metadata dir for not found for a service " \
-                "(stackName = {0}, stackVersion = {1}, " \
-                "service = {2}, " \
-                "component = {3}".format(stack_name, stack_version,
-                                                 service, component)
+      message = "Service base dir not found at expected location {0}".\
+        format(service_base_dir)
       raise AgentException(message)
-    return metadata_path
+    return service_base_dir
 
 
-  def get_hook_base_dir(self, stack_name, stack_version):
+  def get_hook_base_dir(self, command):
     """
-    Returns a base directory for service
+    Returns a base directory for hooks
     """
-    hook_base_path = os.path.join(self.cache_dir, "stacks", str(stack_name),
-                                 str(stack_version), "hooks")
+    try:
+      hooks_subpath = command['commandParams']['hooks_folder']
+    except KeyError:
+      return None
+    hook_base_path = os.path.join(self.cache_dir, "stacks", hooks_subpath)
     if not os.path.isdir(hook_base_path):
       # TODO: Metadata downloading will be implemented at Phase 2
       # As of now, all stack definitions are packaged and distributed with

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-agent/src/test/python/ambari_agent/TestCustomServiceOrchestrator.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestCustomServiceOrchestrator.py b/ambari-agent/src/test/python/ambari_agent/TestCustomServiceOrchestrator.py
index dadc793..971048b 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestCustomServiceOrchestrator.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestCustomServiceOrchestrator.py
@@ -149,7 +149,7 @@ class TestCustomServiceOrchestrator(TestCase):
         'script_type': 'PYTHON',
         'script': 'scripts/hbase_regionserver.py',
         'command_timeout': '600',
-        'service_metadata_folder' : 'HBASE'
+        'service_package_folder' : 'HBASE'
       },
       'taskId' : '3',
       'roleCommand': 'INSTALL'

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-agent/src/test/python/ambari_agent/TestFileCache.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestFileCache.py b/ambari-agent/src/test/python/ambari_agent/TestFileCache.py
index ae84268..5e389d5 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestFileCache.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestFileCache.py
@@ -58,15 +58,14 @@ class TestFileCache(TestCase):
     fileCache = FileCache(self.config)
     # Check existing dir case
     isdir_mock.return_value = True
-    base = fileCache.get_service_base_dir("HDP", "2.0.7",
-                                          "HBASE", "REGION_SERVER")
-    self.assertEqual(base, "/var/lib/ambari-agent/cache/stacks/HDP/2.0.7/"
-                           "services/HBASE/package")
+    service_subpath = "HDP/2.1.1/services/ZOOKEEPER/package"
+    base = fileCache.get_service_base_dir(service_subpath)
+    self.assertEqual(base, "/var/lib/ambari-agent/cache/stacks/HDP/2.1.1/"
+                           "services/ZOOKEEPER/package")
     # Check absent dir case
     isdir_mock.return_value = False
     try:
-      fileCache.get_service_base_dir("HDP", "2.0.7",
-                                          "HBASE", "REGION_SERVER")
+      fileCache.get_service_base_dir(service_subpath)
       self.fail("Should throw an exception")
     except AgentException:
       pass # Expected
@@ -77,14 +76,28 @@ class TestFileCache(TestCase):
   @patch("os.path.isdir")
   def test_get_hook_base_dir(self, isdir_mock):
     fileCache = FileCache(self.config)
+    # Check missing parameter
+    command = {
+      'commandParams' : {
+      }
+    }
+    base = fileCache.get_hook_base_dir(command)
+    self.assertEqual(base, None)
+
     # Check existing dir case
     isdir_mock.return_value = True
-    base = fileCache.get_hook_base_dir("HDP", "2.0.7")
-    self.assertEqual(base, "/var/lib/ambari-agent/cache/stacks/HDP/2.0.7/hooks")
+    command = {
+      'commandParams' : {
+        'hooks_folder' : 'HDP/2.1.1/hooks'
+      }
+    }
+    base = fileCache.get_hook_base_dir(command)
+    self.assertEqual(base, "/var/lib/ambari-agent/cache/stacks/HDP/2.1.1/hooks")
+
     # Check absent dir case
     isdir_mock.return_value = False
     try:
-      fileCache.get_hook_base_dir("HDP", "2.0.7")
+      fileCache.get_hook_base_dir(command)
       self.fail("Should throw an exception")
     except AgentException:
       pass # Expected

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
index c430c13..3ce7da2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
@@ -239,7 +239,8 @@ public class ExecutionCommand extends AgentCommand {
     String COMMAND_TIMEOUT = "command_timeout";
     String SCRIPT = "script";
     String SCRIPT_TYPE = "script_type";
-    String SERVICE_METADATA_FOLDER = "service_metadata_folder";
+    String SERVICE_PACKAGE_FOLDER = "service_package_folder";
+    String HOOKS_FOLDER = "hooks_folder";
     String STACK_NAME = "stack_name";
     String STACK_VERSION = "stack_version";
     String SERVICE_REPO_INFO = "service_repo_info";

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
index 6616b4f..2babd6b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
@@ -198,6 +198,8 @@ public class HeartbeatMonitor implements Runnable {
     ComponentInfo componentInfo = ambariMetaInfo.getComponent(
             stackId.getStackName(), stackId.getStackVersion(),
             serviceName, componentName);
+    StackInfo stackInfo = ambariMetaInfo.getStackInfo(stackId.getStackName(),
+            stackId.getStackVersion());
 
     Map<String, Map<String, String>> configurations = new TreeMap<String, Map<String, String>>();
 
@@ -260,8 +262,9 @@ public class HeartbeatMonitor implements Runnable {
       }
     }
     commandParams.put(COMMAND_TIMEOUT, commandTimeout);
-    commandParams.put(SERVICE_METADATA_FOLDER,
-       serviceInfo.getServiceMetadataFolder());
+    commandParams.put(SERVICE_PACKAGE_FOLDER,
+       serviceInfo.getServicePackageFolder());
+    commandParams.put(HOOKS_FOLDER, stackInfo.getStackHooksFolder());
     // Fill host level params
     Map<String, String> hostLevelParams = statusCmd.getHostLevelParams();
     hostLevelParams.put(STACK_NAME, stackId.getStackName());

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
index 78131f2..8e1e787 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
@@ -93,7 +93,7 @@ public class AmbariMetaInfo {
     @Override
     public boolean accept(File dir, String s) {
       if (s.equals(".svn") || s.equals(".git") ||
-          s.equals(HOOKS_DIR)) // Hooks dir is not a service
+          s.equals(StackExtensionHelper.HOOKS_FOLDER_NAME)) // Hooks dir is not a service
       {
         return false;
       }
@@ -108,7 +108,6 @@ public class AmbariMetaInfo {
   private static final List<String> ALL_SUPPORTED_OS = Arrays.asList(
       "centos5", "redhat5", "centos6", "redhat6", "oraclelinux5",
       "oraclelinux6", "suse11", "sles11", "ubuntu12");
-  private final static String HOOKS_DIR = "hooks";
   private final ActionDefinitionManager adManager = new ActionDefinitionManager();
   private String serverVersion = "undefined";
   private List<StackInfo> stacksResult = new ArrayList<StackInfo>();
@@ -657,15 +656,16 @@ public class AmbariMetaInfo {
   }
 
   private void getConfigurationInformation(File stackRoot) throws Exception {
+    String stackRootAbsPath = stackRoot.getAbsolutePath();
     if (LOG.isDebugEnabled()) {
       LOG.debug("Loading stack information"
-        + ", stackRoot = " + stackRoot.getAbsolutePath());
+        + ", stackRoot = " + stackRootAbsPath);
     }
 
     if (!stackRoot.isDirectory() && !stackRoot.exists())
       throw new IOException("" + Configuration.METADETA_DIR_PATH
         + " should be a directory with stack"
-        + ", stackRoot = " + stackRoot.getAbsolutePath());
+        + ", stackRoot = " + stackRootAbsPath);
 
     StackExtensionHelper stackExtensionHelper = new StackExtensionHelper
       (stackRoot);
@@ -674,7 +674,7 @@ public class AmbariMetaInfo {
     List<StackInfo> stacks = stackExtensionHelper.getAllAvailableStacks();
     if (stacks.isEmpty()) {
       throw new AmbariException("Unable to find stack definitions under " +
-        "stackRoot = " + stackRoot.getAbsolutePath());
+        "stackRoot = " + stackRootAbsPath);
     }
 
     for (StackInfo stack : stacks) {
@@ -684,9 +684,11 @@ public class AmbariMetaInfo {
 
       stacksResult.add(stack);
 
+      String stackPath = stackRootAbsPath + File.separator +
+              stack.getName() + File.separator + stack.getVersion();
+
       // get repository data for current stack of techs
-      File repositoryFolder = new File(stackRoot.getAbsolutePath()
-        + File.separator + stack.getName() + File.separator + stack.getVersion()
+      File repositoryFolder = new File(stackPath
         + File.separator + REPOSITORY_FOLDER_NAME + File.separator
         + REPOSITORY_FILE_NAME);
 
@@ -707,13 +709,19 @@ public class AmbariMetaInfo {
           + ", repoFolder=" + repositoryFolder.getPath());
       }
 
+      // Populate services
       List<ServiceInfo> services = stackExtensionHelper
         .getAllApplicableServices(stack);
-
       stack.setServices(services);
+
+      // Resolve hooks folder
+      String stackHooksToUse = stackExtensionHelper.
+              resolveHooksFolder(stack);
+      stack.setStackHooksFolder(stackHooksToUse);
     }
   }
 
+
   public String getServerVersion() {
     return serverVersion;
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/main/java/org/apache/ambari/server/api/util/StackExtensionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/util/StackExtensionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/api/util/StackExtensionHelper.java
index 0883ad4..13e1dd2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/util/StackExtensionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/util/StackExtensionHelper.java
@@ -50,6 +50,14 @@ import org.xml.sax.SAXException;
 /**
  * Helper methods for providing stack extension behavior -
  * Apache Jira: AMBARI-2819
+ *
+ * Stack extension processing is done in two steps. At first step, we parse
+ * all information for every stack from stack files. At second step, we
+ * go through parent and perform inheritance where needed. At both steps,
+ * stacks are processed at random order, that's why extension implementation
+ * for any new stack/service/component property should also consist of two
+ * separate steps (otherwise child may happen to be processed before parent's
+ * properties are populated).
  */
 public class StackExtensionHelper {
   private File stackRoot;
@@ -58,6 +66,8 @@ public class StackExtensionHelper {
   private final Map<String, StackInfo> stackVersionMap = new HashMap<String,
     StackInfo>();
   private Map<String, List<StackInfo>> stackParentsMap = null;
+  public final static String HOOKS_FOLDER_NAME = "hooks";
+  private static final String PACKAGE_FOLDER_NAME = "package";
 
   private static final Map<Class<?>, JAXBContext> _jaxbContexts =
       new HashMap<Class<?>, JAXBContext> ();
@@ -111,39 +121,50 @@ public class StackExtensionHelper {
 
   private ServiceInfo mergeServices(ServiceInfo parentService,
                                     ServiceInfo childService) {
-    // TODO: Allow extending stack with custom services
     ServiceInfo mergedServiceInfo = new ServiceInfo();
     mergedServiceInfo.setName(childService.getName());
     mergedServiceInfo.setComment(childService.getComment());
     mergedServiceInfo.setUser(childService.getUser());
     mergedServiceInfo.setVersion(childService.getVersion());
     mergedServiceInfo.setConfigDependencies(childService.getConfigDependencies());
+
+    Map<String, ServiceOsSpecific> osSpecific = childService.getOsSpecifics();
+    if (! osSpecific.isEmpty()) {
+      mergedServiceInfo.setOsSpecifics(childService.getOsSpecifics());
+    } else {
+      mergedServiceInfo.setOsSpecifics(parentService.getOsSpecifics());
+    }
+
+    CommandScriptDefinition commandScript = childService.getCommandScript();
+    if (commandScript != null) {
+       mergedServiceInfo.setCommandScript(childService.getCommandScript());
+    } else {
+      mergedServiceInfo.setCommandScript(parentService.getCommandScript());
+    }
+
+    String servicePackageFolder = childService.getServicePackageFolder();
+    if (servicePackageFolder != null) {
+      mergedServiceInfo.setServicePackageFolder(servicePackageFolder);
+    } else {
+      mergedServiceInfo.setServicePackageFolder(
+              parentService.getServicePackageFolder());
+    }
+
+    // Merge custom command definitions for service
+    List<CustomCommandDefinition> mergedCustomCommands =
+            mergeCustomCommandLists(parentService.getCustomCommands(),
+                    childService.getCustomCommands());
+    mergedServiceInfo.setCustomCommands(mergedCustomCommands);
     
     // metrics
     if (null == childService.getMetricsFile() && null != parentService.getMetricsFile())
       mergedServiceInfo.setMetricsFile(parentService.getMetricsFile());
-    
-    // Add all child components to service
+
+    populateComponents(mergedServiceInfo, parentService, childService);
+
+    // Add child properties not deleted
     List<String> deleteList = new ArrayList<String>();
     List<String> appendList = new ArrayList<String>();
-    for (ComponentInfo childComponentInfo : childService.getComponents()) {
-      if (!childComponentInfo.isDeleted()) {
-        mergedServiceInfo.getComponents().add(childComponentInfo);
-        appendList.add(childComponentInfo.getName());
-      } else {
-        deleteList.add(childComponentInfo.getName());
-      }
-    }
-    // Add remaining parent components
-    for (ComponentInfo parentComponent : parentService.getComponents()) {
-      if (!deleteList.contains(parentComponent.getName()) && !appendList
-          .contains(parentComponent.getName())) {
-        mergedServiceInfo.getComponents().add(parentComponent);
-      }
-    }
-    // Add child properties not deleted
-    deleteList = new ArrayList<String>();
-    appendList = new ArrayList<String>();
     for (PropertyInfo propertyInfo : childService.getProperties()) {
       if (!propertyInfo.isDeleted()) {
         mergedServiceInfo.getProperties().add(propertyInfo);
@@ -170,7 +191,92 @@ public class StackExtensionHelper {
     }
     return mergedServiceInfo;
   }
-  
+
+
+  /**
+   * Merges component sets of parentService and childService and writes result
+   * to mergedServiceInfo
+   */
+  private void populateComponents(ServiceInfo mergedServiceInfo, ServiceInfo parentService,
+                                  ServiceInfo childService) {
+    // Add all child components to service
+    List<String> deleteList = new ArrayList<String>();
+    List<String> appendList = new ArrayList<String>();
+
+    for (ComponentInfo childComponent : childService.getComponents()) {
+      if (!childComponent.isDeleted()) {
+        ComponentInfo parentComponent = getComponent(parentService,
+                childComponent.getName());
+        if (parentComponent != null) { // If parent has similar component
+          ComponentInfo mergedComponent = mergeComponents(parentComponent,
+                  childComponent);
+          mergedServiceInfo.getComponents().add(mergedComponent);
+          appendList.add(mergedComponent.getName());
+        } else {
+          mergedServiceInfo.getComponents().add(childComponent);
+          appendList.add(childComponent.getName());
+        }
+      } else {
+        deleteList.add(childComponent.getName());
+      }
+    }
+    // Add remaining parent components
+    for (ComponentInfo parentComponent : parentService.getComponents()) {
+      if (!deleteList.contains(parentComponent.getName()) && !appendList
+              .contains(parentComponent.getName())) {
+        mergedServiceInfo.getComponents().add(parentComponent);
+      }
+    }
+  }
+
+
+  private ComponentInfo getComponent(ServiceInfo service, String componentName) {
+    for (ComponentInfo component : service.getComponents()) {
+      if (component.getName().equals(componentName)) {
+        return component;
+      }
+    }
+    return null;
+  }
+
+
+  private ComponentInfo mergeComponents(ComponentInfo parent, ComponentInfo child) {
+    ComponentInfo result = new ComponentInfo(child); // cloning child
+    CommandScriptDefinition commandScript = child.getCommandScript();
+    if (commandScript != null) {
+      result.setCommandScript(child.getCommandScript());
+    } else {
+      result.setCommandScript(parent.getCommandScript());
+    }
+
+    // Merge custom command definitions for service
+    List<CustomCommandDefinition> mergedCustomCommands =
+            mergeCustomCommandLists(parent.getCustomCommands(),
+                    child.getCustomCommands());
+    result.setCustomCommands(mergedCustomCommands);
+
+    return result;
+  }
+
+
+  private List<CustomCommandDefinition> mergeCustomCommandLists(
+          List<CustomCommandDefinition> parentList,
+          List<CustomCommandDefinition> childList) {
+    List<CustomCommandDefinition> mergedList =
+            new ArrayList<CustomCommandDefinition>(childList);
+    List<String> existingNames = new ArrayList<String>();
+    for (CustomCommandDefinition childCCD : childList) {
+      existingNames.add(childCCD.getName());
+    }
+    for (CustomCommandDefinition parentsCCD : parentList) {
+      if (! existingNames.contains(parentsCCD.getName())) {
+        mergedList.add(parentsCCD);
+        existingNames.add(parentsCCD.getName());
+      }
+    }
+    return mergedList;
+  }
+
 
   public List<ServiceInfo> getAllApplicableServices(StackInfo stackInfo) {
     LinkedList<StackInfo> parents = (LinkedList<StackInfo>)
@@ -208,6 +314,37 @@ public class StackExtensionHelper {
     return new ArrayList<ServiceInfo>(serviceInfoMap.values());
   }
 
+
+  /**
+   * Determines exact hooks folder (subpath from stackRoot to hooks directory)
+   * to use for a given stack. If given stack
+   * has not hooks folder, inheritance hierarhy is queried.
+   * @param stackInfo stack to work with
+   */
+  public String resolveHooksFolder(StackInfo stackInfo) throws AmbariException {
+    // Determine hooks folder for stack
+    String stackId = String.format("%s-%s",
+            stackInfo.getName(), stackInfo.getVersion());
+    String hooksFolder = stackInfo.getStackHooksFolder();
+    if (hooksFolder == null) {
+      // Try to get parent's
+      List<StackInfo> parents = getParents(stackInfo);
+      for (StackInfo parent : parents) {
+        hooksFolder = parent.getStackHooksFolder();
+        if (hooksFolder != null) {
+          break;
+        }
+      }
+    }
+    if (hooksFolder == null) {
+      String message = String.format(
+              "Can not determine hooks dir for stack %s",
+              stackId);
+      LOG.debug(message);
+    }
+    return hooksFolder;
+  }
+
   void populateServicesForStack(StackInfo stackInfo) throws
           ParserConfigurationException, SAXException,
           XPathExpressionException, IOException, JAXBException {
@@ -218,7 +355,6 @@ public class StackExtensionHelper {
     if (!servicesFolder.exists()) {
       LOG.info("No services defined for stack: " + stackInfo.getName() +
       "-" + stackInfo.getVersion());
-
     } else {
       try {
         File[] servicesFolders = servicesFolder.listFiles(AmbariMetaInfo
@@ -267,9 +403,14 @@ public class StackExtensionHelper {
             List<ServiceInfo> serviceInfos = smiv2x.getServices();
             for (ServiceInfo serviceInfo : serviceInfos) {
               serviceInfo.setSchemaVersion(AmbariMetaInfo.SCHEMA_VERSION_2);
-              serviceInfo.setServiceMetadataFolder(serviceFolder.getName());
-              // TODO: allow repository overriding when extending stack
 
+              // Find service package folder
+              String servicePackageDir = resolveServicePackageFolder(
+                      stackRoot.getAbsolutePath(), stackInfo,
+                      serviceFolder.getName(), serviceInfo.getName());
+              serviceInfo.setServicePackageFolder(servicePackageDir);
+
+              // process metrics.json
               if (metricsJson.exists())
                 serviceInfo.setMetricsFile(metricsJson);
 
@@ -290,10 +431,49 @@ public class StackExtensionHelper {
   }
 
 
+  /**
+   * Determines exact service directory that contains scripts and templates
+   * for service. If given stack has not this folder, inheritance hierarhy is
+   * queried.
+   */
+  String resolveServicePackageFolder(String stackRoot,
+                                     StackInfo stackInfo, String serviceFolderName,
+                                     String serviceName) throws AmbariException {
+    String stackId = String.format("%s-%s",
+            stackInfo.getName(), stackInfo.getVersion());
+    String expectedSubPath = stackInfo.getName() + File.separator +
+                    stackInfo.getVersion() + File.separator +
+                    AmbariMetaInfo.SERVICES_FOLDER_NAME +
+                    File.separator + serviceFolderName + File.separator +
+                    PACKAGE_FOLDER_NAME;
+    File packageDir = new File(stackRoot + File.separator + expectedSubPath);
+    String servicePackageFolder = null;
+    if (packageDir.isDirectory()) {
+      servicePackageFolder = expectedSubPath;
+      String message = String.format(
+              "Service package folder for service %s" +
+                      "for stack %s has been resolved to %s",
+              serviceName, stackId, servicePackageFolder);
+      LOG.debug(message);
+    } else {
+        String message = String.format(
+                "Service package folder %s for service %s " +
+                        "for stack %s does not exist.",
+                packageDir.getAbsolutePath(), serviceName, stackId);
+        LOG.debug(message);
+    }
+    return servicePackageFolder;
+  }
+
+
   public List<StackInfo> getAllAvailableStacks() {
     return new ArrayList<StackInfo>(stackVersionMap.values());
   }
 
+  public List<StackInfo> getParents(StackInfo stackInfo) {
+    return stackParentsMap.get(stackInfo.getVersion());
+  }
+
   private Map<String, List<StackInfo>> getParentStacksInOrder(
       Collection<StackInfo> stacks) {
     Map<String, List<StackInfo>> parentStacksMap = new HashMap<String,
@@ -372,7 +552,22 @@ public class StackExtensionHelper {
       stackInfo.setMinUpgradeVersion(smx.getVersion().getUpgrade());
       stackInfo.setActive(smx.getVersion().isActive());
       stackInfo.setParentStackVersion(smx.getExtends());
-      String rcoFileLocation = stackVersionFolder.getAbsolutePath() + File.separator + AmbariMetaInfo.RCO_FILE_NAME;
+
+      // Populating hooks dir for stack
+      String hooksSubPath = stackInfo.getName() + File.separator +
+              stackInfo.getVersion() + File.separator + HOOKS_FOLDER_NAME;
+      String hooksAbsPath = stackVersionFolder.getAbsolutePath() +
+              File.separator + HOOKS_FOLDER_NAME;
+      if (new File(hooksAbsPath).exists()) {
+        stackInfo.setStackHooksFolder(hooksSubPath);
+      } else {
+        String message = String.format("Hooks folder %s does not exist",
+                hooksAbsPath);
+        LOG.debug(message);
+      }
+
+      String rcoFileLocation = stackVersionFolder.getAbsolutePath() +
+              File.separator + AmbariMetaInfo.RCO_FILE_NAME;
       if (new File(rcoFileLocation).exists())
         stackInfo.setRcoFileLocation(rcoFileLocation);
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
index 72fc31d..55022cf 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
@@ -45,6 +45,7 @@ import org.apache.ambari.server.state.ServiceComponentHostEvent;
 import org.apache.ambari.server.state.ServiceInfo;
 import org.apache.ambari.server.state.ServiceOsSpecific;
 import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.StackInfo;
 import org.apache.ambari.server.state.State;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostOpInProgressEvent;
 import org.apache.ambari.server.utils.StageUtils;
@@ -64,6 +65,7 @@ import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.COMMAND_T
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.CUSTOM_COMMAND;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.DB_DRIVER_FILENAME;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.DB_NAME;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.HOOKS_FOLDER;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JAVA_HOME;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JCE_NAME;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JDK_LOCATION;
@@ -75,7 +77,7 @@ import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.REPO_INFO
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SCHEMA_VERSION;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SCRIPT;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SCRIPT_TYPE;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SERVICE_METADATA_FOLDER;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SERVICE_PACKAGE_FOLDER;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SERVICE_REPO_INFO;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.STACK_NAME;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.STACK_VERSION;
@@ -221,6 +223,8 @@ public class AmbariCustomCommandExecutionHelper {
     ServiceInfo serviceInfo =
         ambariMetaInfo.getServiceInfo(stackId.getStackName(),
             stackId.getStackVersion(), serviceName);
+    StackInfo stackInfo = ambariMetaInfo.getStackInfo(stackId.getStackName(),
+            stackId.getStackVersion());
 
     long nowTimestamp = System.currentTimeMillis();
 
@@ -281,8 +285,9 @@ public class AmbariCustomCommandExecutionHelper {
       }
       commandParams.put(COMMAND_TIMEOUT, commandTimeout);
 
-      commandParams.put(SERVICE_METADATA_FOLDER,
-          serviceInfo.getServiceMetadataFolder());
+      commandParams.put(SERVICE_PACKAGE_FOLDER,
+          serviceInfo.getServicePackageFolder());
+      commandParams.put(HOOKS_FOLDER, stackInfo.getStackHooksFolder());
 
       execCmd.setCommandParams(commandParams);
     }
@@ -360,6 +365,8 @@ public class AmbariCustomCommandExecutionHelper {
     ServiceInfo serviceInfo =
         ambariMetaInfo.getServiceInfo(stackId.getStackName(),
             stackId.getStackVersion(), serviceName);
+    StackInfo stackInfo = ambariMetaInfo.getStackInfo(stackId.getStackName(),
+            stackId.getStackVersion());
 
 
     stage.addHostRoleExecutionCommand(hostname,
@@ -414,8 +421,9 @@ public class AmbariCustomCommandExecutionHelper {
     }
     commandParams.put(COMMAND_TIMEOUT, commandTimeout);
 
-    commandParams.put(SERVICE_METADATA_FOLDER,
-        serviceInfo.getServiceMetadataFolder());
+    commandParams.put(SERVICE_PACKAGE_FOLDER,
+        serviceInfo.getServicePackageFolder());
+    commandParams.put(HOOKS_FOLDER, stackInfo.getStackHooksFolder());
 
     execCmd.setCommandParams(commandParams);
 
@@ -570,6 +578,8 @@ public class AmbariCustomCommandExecutionHelper {
     ComponentInfo componentInfo = ambariMetaInfo.getComponent(
         stackId.getStackName(), stackId.getStackVersion(),
         serviceName, componentName);
+    StackInfo stackInfo = ambariMetaInfo.getStackInfo(stackId.getStackName(),
+            stackId.getStackVersion());
 
     ExecutionCommand execCmd = stage.getExecutionCommandWrapper(scHost.getHostName(),
         scHost.getServiceComponentName()).getExecutionCommand();
@@ -612,8 +622,9 @@ public class AmbariCustomCommandExecutionHelper {
       }
     }
     commandParams.put(COMMAND_TIMEOUT, commandTimeout);
-    commandParams.put(SERVICE_METADATA_FOLDER,
-        serviceInfo.getServiceMetadataFolder());
+    commandParams.put(SERVICE_PACKAGE_FOLDER,
+        serviceInfo.getServicePackageFolder());
+    commandParams.put(HOOKS_FOLDER, stackInfo.getStackHooksFolder());
 
     execCmd.setCommandParams(commandParams);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/main/java/org/apache/ambari/server/state/CommandScriptDefinition.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/CommandScriptDefinition.java b/ambari-server/src/main/java/org/apache/ambari/server/state/CommandScriptDefinition.java
index 3394ecd..56e7438 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/CommandScriptDefinition.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/CommandScriptDefinition.java
@@ -19,6 +19,9 @@
 package org.apache.ambari.server.state;
 
 
+import org.apache.commons.lang.builder.EqualsBuilder;
+import org.apache.commons.lang.builder.HashCodeBuilder;
+
 import javax.xml.bind.annotation.XmlAccessType;
 import javax.xml.bind.annotation.XmlAccessorType;
 
@@ -60,4 +63,30 @@ public class CommandScriptDefinition {
     PUPPET // TODO: Not supported yet. Do we really need it?
   }
 
+  @Override
+  public boolean equals(Object obj) {
+    if (obj == null) {
+      return false;
+    }
+    if (obj == this) {
+      return true;
+    }
+    if (! (obj instanceof CommandScriptDefinition)) {
+      return false;
+    }
+
+    CommandScriptDefinition rhs = (CommandScriptDefinition) obj;
+    return new EqualsBuilder().
+            append(script, rhs.script).
+            append(scriptType, rhs.scriptType).
+            append(timeout, rhs.timeout).isEquals();
+  }
+
+  @Override
+  public int hashCode() {
+    return new HashCodeBuilder(17, 31).
+            append(script).
+            append(scriptType).
+            append(timeout).toHashCode();
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/main/java/org/apache/ambari/server/state/ComponentInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ComponentInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ComponentInfo.java
index 8798ef1..f03bd8c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ComponentInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ComponentInfo.java
@@ -63,6 +63,23 @@ public class ComponentInfo {
   @XmlElement(name="auto-deploy")
   private AutoDeployInfo m_autoDeploy;
 
+  public ComponentInfo() {
+  }
+
+  /**
+   * Copy constructor.
+   */
+  public ComponentInfo(ComponentInfo prototype) {
+    name = prototype.name;
+    category = prototype.category;
+    deleted = prototype.deleted;
+    cardinality = prototype.cardinality;
+    commandScript = prototype.commandScript;
+    customCommands = prototype.customCommands;
+    dependencies = prototype.dependencies;
+    m_autoDeploy = prototype.m_autoDeploy;
+  }
+
   public String getName() {
     return name;
   }
@@ -103,6 +120,10 @@ public class ComponentInfo {
     return commandScript;
   }
 
+  public void setCommandScript(CommandScriptDefinition commandScript) {
+    this.commandScript = commandScript;
+  }
+
   public List<CustomCommandDefinition> getCustomCommands() {
     if (customCommands == null) {
       customCommands = new ArrayList<CustomCommandDefinition>();
@@ -110,6 +131,10 @@ public class ComponentInfo {
     return customCommands;
   }
 
+  public void setCustomCommands(List<CustomCommandDefinition> customCommands) {
+    this.customCommands = customCommands;
+  }
+
   public boolean isCustomCommand(String commandName) {
     if (customCommands != null && commandName != null) {
       for (CustomCommandDefinition cc: customCommands) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/main/java/org/apache/ambari/server/state/CustomCommandDefinition.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/CustomCommandDefinition.java b/ambari-server/src/main/java/org/apache/ambari/server/state/CustomCommandDefinition.java
index ab4143f..a26e7be 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/CustomCommandDefinition.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/CustomCommandDefinition.java
@@ -17,6 +17,9 @@
  */
 package org.apache.ambari.server.state;
 
+import org.apache.commons.lang.builder.EqualsBuilder;
+import org.apache.commons.lang.builder.HashCodeBuilder;
+
 import javax.xml.bind.annotation.*;
 
 /**
@@ -36,4 +39,28 @@ public class CustomCommandDefinition {
     return commandScript;
   }
 
+  @Override
+  public boolean equals(Object obj) {
+    if (obj == null) {
+      return false;
+    }
+    if (obj == this) {
+      return true;
+    }
+    if (! (obj instanceof CustomCommandDefinition)) {
+      return false;
+    }
+
+    CustomCommandDefinition rhs = (CustomCommandDefinition) obj;
+    return new EqualsBuilder().
+            append(name, rhs.name).
+            append(commandScript, rhs.commandScript).isEquals();
+  }
+
+  @Override
+  public int hashCode() {
+    return new HashCodeBuilder(17, 31).
+            append(name).
+            append(commandScript).toHashCode();
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceInfo.java
index c45531f..c12363c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceInfo.java
@@ -95,12 +95,13 @@ public class ServiceInfo {
 
 
   /**
-   * Directory, that contains service metadata. Since schema ver 2,
+   * Meaning: stores subpath from stack root to exact directory, that contains
+   * service scripts and templates. Since schema ver 2,
    * we may have multiple service metadata inside folder.
    * Added at schema ver 2
    */
   @XmlTransient
-  private String serviceMetadataFolder;
+  private String servicePackageFolder;
 
   public boolean isDeleted() {
     return isDeleted;
@@ -286,12 +287,12 @@ public class ServiceInfo {
   }
 
 
-  public String getServiceMetadataFolder() {
-    return serviceMetadataFolder;
+  public String getServicePackageFolder() {
+    return servicePackageFolder;
   }
 
-  public void setServiceMetadataFolder(String serviceMetadataFolder) {
-    this.serviceMetadataFolder = serviceMetadataFolder;
+  public void setServicePackageFolder(String servicePackageFolder) {
+    this.servicePackageFolder = servicePackageFolder;
   }
 
   /**
@@ -316,6 +317,10 @@ public class ServiceInfo {
     return serviceOsSpecificsMap;
   }
 
+  public void setOsSpecifics(Map<String, ServiceOsSpecific> serviceOsSpecificsMap) {
+    this.serviceOsSpecificsMap = serviceOsSpecificsMap;
+  }
+
   public List<CustomCommandDefinition> getCustomCommands() {
     if (customCommands == null) {
       customCommands = new ArrayList<CustomCommandDefinition>();
@@ -323,10 +328,18 @@ public class ServiceInfo {
     return customCommands;
   }
 
+  public void setCustomCommands(List<CustomCommandDefinition> customCommands) {
+    this.customCommands = customCommands;
+  }
+
   public CommandScriptDefinition getCommandScript() {
     return commandScript;
   }
 
+  public void setCommandScript(CommandScriptDefinition commandScript) {
+    this.commandScript = commandScript;
+  }
+
   /**
    * @param file the file containing the metrics definitions
    */

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
index 6affad9..cc1a45e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
@@ -23,7 +23,7 @@ import java.util.List;
 
 import org.apache.ambari.server.controller.StackVersionResponse;
 
-public class StackInfo {
+public class StackInfo implements Comparable<StackInfo>{
   private String name;
   private String version;
   private String minUpgradeVersion;
@@ -33,6 +33,12 @@ public class StackInfo {
   private List<ServiceInfo> services;
   private String parentStackVersion;
 
+  /**
+   * Meaning: stores subpath from stack root to exact hooks folder for stack. These hooks are
+   * applied to all commands for services in current stack.
+   */
+  private String stackHooksFolder;
+
   public String getName() {
     return name;
   }
@@ -145,4 +151,19 @@ public class StackInfo {
   public void setRcoFileLocation(String rcoFileLocation) {
     this.rcoFileLocation = rcoFileLocation;
   }
+
+  public String getStackHooksFolder() {
+    return stackHooksFolder;
+  }
+
+  public void setStackHooksFolder(String stackHooksFolder) {
+    this.stackHooksFolder = stackHooksFolder;
+  }
+
+  @Override
+  public int compareTo(StackInfo o) {
+    String myId = name + "-" + version;
+    String oId = o.name + "-" + o.version;
+    return myId.compareTo(oId);
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
index 3a348e7..2a4310b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
@@ -27,6 +27,7 @@ import static org.junit.Assert.fail;
 
 import java.io.File;
 import java.lang.reflect.Method;
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashSet;
 import java.util.LinkedList;
@@ -1058,4 +1059,194 @@ public class AmbariMetaInfoTest {
       }
     }
   }
+
+
+  @Test
+  public void testHooksDirInheritance() throws Exception {
+    // Test hook dir determination in parent
+    StackInfo stackInfo = metaInfo.getStackInfo(STACK_NAME_HDP, "2.0.6");
+    Assert.assertEquals("HDP/2.0.6/hooks", stackInfo.getStackHooksFolder());
+    // Test hook dir inheritance
+    stackInfo = metaInfo.getStackInfo(STACK_NAME_HDP, "2.0.7");
+    Assert.assertEquals("HDP/2.0.6/hooks", stackInfo.getStackHooksFolder());
+    // Test hook dir override
+    stackInfo = metaInfo.getStackInfo(STACK_NAME_HDP, "2.0.8");
+    Assert.assertEquals("HDP/2.0.8/hooks", stackInfo.getStackHooksFolder());
+  }
+
+
+  @Test
+  public void testServicePackageDirInheritance() throws Exception {
+    // Test service package dir determination in parent
+    ServiceInfo service = metaInfo.getService(STACK_NAME_HDP, "2.0.7", "HBASE");
+    Assert.assertEquals("HDP/2.0.7/services/HBASE/package",
+            service.getServicePackageFolder());
+
+    service = metaInfo.getService(STACK_NAME_HDP, "2.0.7", "HDFS");
+    Assert.assertEquals("HDP/2.0.7/services/HDFS/package",
+            service.getServicePackageFolder());
+    // Test service package dir inheritance
+    service = metaInfo.getService(STACK_NAME_HDP, "2.0.8", "HBASE");
+    Assert.assertEquals("HDP/2.0.7/services/HBASE/package",
+            service.getServicePackageFolder());
+    // Test service package dir override
+    service = metaInfo.getService(STACK_NAME_HDP, "2.0.8", "HDFS");
+    Assert.assertEquals("HDP/2.0.8/services/HDFS/package",
+            service.getServicePackageFolder());
+  }
+
+
+  @Test
+  public void testServiceCommandScriptInheritance() throws Exception {
+    // Test command script determination in parent
+    ServiceInfo service = metaInfo.getService(STACK_NAME_HDP, "2.0.7", "HDFS");
+    Assert.assertEquals("scripts/service_check_1.py",
+            service.getCommandScript().getScript());
+    service = metaInfo.getService(STACK_NAME_HDP, "2.0.7", "HBASE");
+    Assert.assertEquals("scripts/service_check.py",
+            service.getCommandScript().getScript());
+    // Test command script inheritance
+    service = metaInfo.getService(STACK_NAME_HDP, "2.0.8", "HBASE");
+    Assert.assertEquals("scripts/service_check.py",
+            service.getCommandScript().getScript());
+    // Test command script override
+    service = metaInfo.getService(STACK_NAME_HDP, "2.0.8", "HDFS");
+    Assert.assertEquals("scripts/service_check_2.py",
+            service.getCommandScript().getScript());
+  }
+
+    @Test
+  public void testComponentCommandScriptInheritance() throws Exception {
+    // Test command script determination in parent
+    ComponentInfo component = metaInfo.getComponent(STACK_NAME_HDP,
+            "2.0.7", "HDFS", "HDFS_CLIENT");
+    Assert.assertEquals("scripts/hdfs_client.py",
+            component.getCommandScript().getScript());
+    component = metaInfo.getComponent(STACK_NAME_HDP,
+            "2.0.7", "HBASE", "HBASE_MASTER");
+    Assert.assertEquals("scripts/hbase_master.py",
+            component.getCommandScript().getScript());
+    // Test command script inheritance
+    component = metaInfo.getComponent(STACK_NAME_HDP,
+            "2.0.8", "HBASE", "HBASE_MASTER");
+    Assert.assertEquals("scripts/hbase_master.py",
+            component.getCommandScript().getScript());
+    // Test command script override
+    component = metaInfo.getComponent(STACK_NAME_HDP,
+            "2.0.8", "HDFS", "HDFS_CLIENT");
+    Assert.assertEquals("scripts/hdfs_client_overridden.py",
+            component.getCommandScript().getScript());
+  }
+
+
+  @Test
+  public void testServiceCustomCommandScriptInheritance() throws Exception {
+    // Test custom command script determination in parent
+    ServiceInfo service = metaInfo.getService(STACK_NAME_HDP, "2.0.7", "HDFS");
+
+    CustomCommandDefinition ccd = findCustomCommand("RESTART", service);
+    Assert.assertEquals("scripts/restart_parent.py",
+            ccd.getCommandScript().getScript());
+
+    ccd = findCustomCommand("YET_ANOTHER_PARENT_SRV_COMMAND", service);
+    Assert.assertEquals("scripts/yet_another_parent_srv_command.py",
+            ccd.getCommandScript().getScript());
+
+    Assert.assertEquals(2, service.getCustomCommands().size());
+
+    // Test custom command script inheritance
+    service = metaInfo.getService(STACK_NAME_HDP, "2.0.8", "HDFS");
+    Assert.assertEquals(3, service.getCustomCommands().size());
+
+    ccd = findCustomCommand("YET_ANOTHER_PARENT_SRV_COMMAND", service);
+    Assert.assertEquals("scripts/yet_another_parent_srv_command.py",
+            ccd.getCommandScript().getScript());
+
+    // Test custom command script override
+    service = metaInfo.getService(STACK_NAME_HDP, "2.0.8", "HDFS");
+
+    ccd = findCustomCommand("RESTART", service);
+    Assert.assertEquals("scripts/restart_child.py",
+            ccd.getCommandScript().getScript());
+
+    ccd = findCustomCommand("YET_ANOTHER_CHILD_SRV_COMMAND", service);
+    Assert.assertEquals("scripts/yet_another_child_srv_command.py",
+            ccd.getCommandScript().getScript());
+  }
+
+
+  @Test
+  public void testChildCustomCommandScriptInheritance() throws Exception {
+    // Test custom command script determination in parent
+    ComponentInfo component = metaInfo.getComponent(STACK_NAME_HDP, "2.0.7",
+            "HDFS", "NAMENODE");
+
+    CustomCommandDefinition ccd = findCustomCommand("DECOMMISSION", component);
+    Assert.assertEquals("scripts/namenode_dec.py",
+            ccd.getCommandScript().getScript());
+
+    ccd = findCustomCommand("YET_ANOTHER_PARENT_COMMAND", component);
+    Assert.assertEquals("scripts/yet_another_parent_command.py",
+            ccd.getCommandScript().getScript());
+
+    Assert.assertEquals(2, component.getCustomCommands().size());
+
+    // Test custom command script inheritance
+    component = metaInfo.getComponent(STACK_NAME_HDP, "2.0.8",
+            "HDFS", "NAMENODE");
+    Assert.assertEquals(3, component.getCustomCommands().size());
+
+    ccd = findCustomCommand("YET_ANOTHER_PARENT_COMMAND", component);
+    Assert.assertEquals("scripts/yet_another_parent_command.py",
+            ccd.getCommandScript().getScript());
+
+    // Test custom command script override
+    ccd = findCustomCommand("DECOMMISSION", component);
+    Assert.assertEquals("scripts/namenode_dec_overr.py",
+            ccd.getCommandScript().getScript());
+
+    ccd = findCustomCommand("YET_ANOTHER_CHILD_COMMAND", component);
+    Assert.assertEquals("scripts/yet_another_child_command.py",
+            ccd.getCommandScript().getScript());
+  }
+
+
+  @Test
+  public void testServiceOsSpecificsInheritance() throws Exception {
+    // Test command script determination in parent
+    ServiceInfo service = metaInfo.getService(STACK_NAME_HDP, "2.0.7", "HDFS");
+    Assert.assertEquals("parent-package-def",
+            service.getOsSpecifics().get("any").getPackages().get(0).getName());
+    service = metaInfo.getService(STACK_NAME_HDP, "2.0.7", "HBASE");
+    Assert.assertEquals(2, service.getOsSpecifics().keySet().size());
+    // Test command script inheritance
+    service = metaInfo.getService(STACK_NAME_HDP, "2.0.8", "HBASE");
+    Assert.assertEquals(2, service.getOsSpecifics().keySet().size());
+    // Test command script override
+    service = metaInfo.getService(STACK_NAME_HDP, "2.0.8", "HDFS");
+    Assert.assertEquals("child-package-def",
+            service.getOsSpecifics().get("any").getPackages().get(0).getName());
+  }
+
+
+  private CustomCommandDefinition findCustomCommand(String ccName,
+                                                    ServiceInfo service) {
+    for (CustomCommandDefinition ccd: service.getCustomCommands()) {
+      if (ccd.getName().equals(ccName)) {
+        return ccd;
+      }
+    }
+    return null;
+  }
+
+  private CustomCommandDefinition findCustomCommand(String ccName,
+                                                    ComponentInfo component) {
+    for (CustomCommandDefinition ccd: component.getCustomCommands()) {
+      if (ccd.getName().equals(ccName)) {
+        return ccd;
+      }
+    }
+    return null;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/test/java/org/apache/ambari/server/api/util/StackExtensionHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/util/StackExtensionHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/util/StackExtensionHelperTest.java
index 6edb3df..2e064e8 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/util/StackExtensionHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/util/StackExtensionHelperTest.java
@@ -22,7 +22,6 @@ import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.state.*;
 
 import java.io.File;
-import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 
@@ -85,7 +84,8 @@ public class StackExtensionHelperTest {
         assertEquals("hive-site", configDependencies.get(1));
       } else if (serviceInfo.getName().equals("HBASE")) {
         assertEquals("HBASE", serviceInfo.getName());
-        assertEquals("HBASE", serviceInfo.getServiceMetadataFolder());
+        assertEquals("HDP/2.0.7/services/HBASE/package",
+                serviceInfo.getServicePackageFolder());
         assertEquals("2.0", serviceInfo.getSchemaVersion());
         assertTrue(serviceInfo.getComment().startsWith("Non-relational distr"));
         assertEquals("0.96.0.2.0.6.0", serviceInfo.getVersion());

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index 251aa5f..009e0af 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@ -146,7 +146,7 @@ public class AmbariManagementControllerTest {
   private static final String REPO_ID = "HDP-1.1.1.16";
   private static final String PROPERTY_NAME = "hbase.regionserver.msginterval";
   private static final String SERVICE_NAME = "HDFS";
-  private static final int STACK_VERSIONS_CNT = 10;
+  private static final int STACK_VERSIONS_CNT = 11;
   private static final int REPOS_CNT = 3;
   private static final int STACKS_CNT = 1;
   private static final int STACK_PROPERTIES_CNT = 81;

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/test/python/stacks/1.3.3/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.3/configs/default.json b/ambari-server/src/test/python/stacks/1.3.3/configs/default.json
index 70b93f5..6d12470 100644
--- a/ambari-server/src/test/python/stacks/1.3.3/configs/default.json
+++ b/ambari-server/src/test/python/stacks/1.3.3/configs/default.json
@@ -25,7 +25,7 @@
     "role": "DATANODE", 
     "commandParams": {
         "command_timeout": "600", 
-        "service_metadata_folder": "HDFS", 
+        "service_package_folder": "HDFS",
         "script_type": "PYTHON", 
         "schema_version": "2.0", 
         "script": "scripts/datanode.py",

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/test/python/stacks/1.3.3/configs/secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.3/configs/secured.json b/ambari-server/src/test/python/stacks/1.3.3/configs/secured.json
index ac357cc..9520b02 100644
--- a/ambari-server/src/test/python/stacks/1.3.3/configs/secured.json
+++ b/ambari-server/src/test/python/stacks/1.3.3/configs/secured.json
@@ -25,7 +25,7 @@
     "role": "MYSQL_SERVER", 
     "commandParams": {
         "command_timeout": "600", 
-        "service_metadata_folder": "HIVE", 
+        "service_package_folder": "HIVE",
         "script_type": "PYTHON", 
         "schema_version": "2.0", 
         "script": "scripts/mysql_server.py",

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/test/python/stacks/2.1.1/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1.1/configs/default.json b/ambari-server/src/test/python/stacks/2.1.1/configs/default.json
index 5b40256..f1ce054 100644
--- a/ambari-server/src/test/python/stacks/2.1.1/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.1.1/configs/default.json
@@ -21,7 +21,7 @@
     "role": "OOZIE_SERVICE_CHECK", 
     "commandParams": {
         "command_timeout": "300", 
-        "service_metadata_folder": "OOZIE", 
+        "service_package_folder": "OOZIE",
         "script_type": "PYTHON", 
         "schema_version": "2.0", 
         "script": "scripts/service_check.py",

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/test/python/stacks/2.1.1/configs/secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1.1/configs/secured.json b/ambari-server/src/test/python/stacks/2.1.1/configs/secured.json
index b000c56..8477faa 100644
--- a/ambari-server/src/test/python/stacks/2.1.1/configs/secured.json
+++ b/ambari-server/src/test/python/stacks/2.1.1/configs/secured.json
@@ -26,7 +26,7 @@
     "role": "YARN_CLIENT", 
     "commandParams": {
         "command_timeout": "600", 
-        "service_metadata_folder": "YARN", 
+        "service_package_folder": "YARN",
         "script_type": "PYTHON", 
         "schema_version": "2.0", 
         "script": "scripts/yarn_client.py",

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/test/resources/stacks/HDP/2.0.6/hooks/dummy-script.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.6/hooks/dummy-script.py b/ambari-server/src/test/resources/stacks/HDP/2.0.6/hooks/dummy-script.py
new file mode 100644
index 0000000..a582077
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.6/hooks/dummy-script.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/test/resources/stacks/HDP/2.0.7/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.7/metainfo.xml
index 07da411..10673b7 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.7/metainfo.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/metainfo.xml
@@ -19,6 +19,6 @@
     <versions>
       <active>true</active>
     </versions>
-    <extends>2.0.5</extends>
+    <extends>2.0.6</extends>
 </metainfo>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HBASE/package/dummy-script.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HBASE/package/dummy-script.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HBASE/package/dummy-script.py
new file mode 100644
index 0000000..a582077
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HBASE/package/dummy-script.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HBASE/scripts/hbase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HBASE/scripts/hbase.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HBASE/scripts/hbase.py
deleted file mode 100644
index 0a169a4..0000000
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HBASE/scripts/hbase.py
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HBASE/scripts/hbase_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HBASE/scripts/hbase_client.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HBASE/scripts/hbase_client.py
deleted file mode 100644
index 8a4d0e0..0000000
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HBASE/scripts/hbase_client.py
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HBASE/scripts/hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HBASE/scripts/hbase_master.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HBASE/scripts/hbase_master.py
deleted file mode 100644
index 0a169a4..0000000
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HBASE/scripts/hbase_master.py
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HBASE/scripts/hbase_regionserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HBASE/scripts/hbase_regionserver.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HBASE/scripts/hbase_regionserver.py
deleted file mode 100644
index 0a169a4..0000000
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HBASE/scripts/hbase_regionserver.py
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/metainfo.xml
index 3de6ce5..4ab509a 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/metainfo.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/metainfo.xml
@@ -36,7 +36,15 @@
             <customCommand>
               <name>DECOMMISSION</name>
               <commandScript>
-                <script>scripts/namenode.py</script>
+                <script>scripts/namenode_dec.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+            <customCommand>
+              <name>YET_ANOTHER_PARENT_COMMAND</name>
+              <commandScript>
+                <script>scripts/yet_another_parent_command.py</script>
                 <scriptType>PYTHON</scriptType>
                 <timeout>600</timeout>
               </commandScript>
@@ -101,46 +109,37 @@
           <packages>
             <package>
               <type>rpm</type>
-              <name>lzo</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-libhdfs</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-lzo</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-lzo-native</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>snappy</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>snappy-devel</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>ambari-log4j</name>
+              <name>parent-package-def</name>
             </package>
           </packages>
         </osSpecific>
       </osSpecifics>
 
       <commandScript>
-        <script>scripts/service_check.py</script>
+        <script>scripts/service_check_1.py</script>
         <scriptType>PYTHON</scriptType>
         <timeout>300</timeout>
       </commandScript>
 
+      <customCommands>
+        <customCommand>
+          <name>RESTART</name>
+          <commandScript>
+            <script>scripts/restart_parent.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </customCommand>
+        <customCommand>
+          <name>YET_ANOTHER_PARENT_SRV_COMMAND</name>
+          <commandScript>
+            <script>scripts/yet_another_parent_srv_command.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </customCommand>
+      </customCommands>
+
       <configuration-dependencies>
         <config-type>core-site</config-type>
         <config-type>global</config-type>

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/dummy-script.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/dummy-script.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/dummy-script.py
new file mode 100644
index 0000000..a582077
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/dummy-script.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/files/checkForFormat.sh b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/files/checkForFormat.sh
deleted file mode 100644
index d14091a..0000000
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/files/checkForFormat.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-export hdfs_user=$1
-shift
-export conf_dir=$1
-shift
-export mark_dir=$1
-shift
-export name_dirs=$*
-
-export EXIT_CODE=0
-export command="namenode -format"
-export list_of_non_empty_dirs=""
-
-mark_file=/var/run/hadoop/hdfs/namenode-formatted
-if [[ -f ${mark_file} ]] ; then
-  rm -f ${mark_file}
-  mkdir -p ${mark_dir}
-fi
-
-if [[ ! -d $mark_dir ]] ; then
-  for dir in `echo $name_dirs | tr ',' ' '` ; do
-    echo "NameNode Dirname = $dir"
-    cmd="ls $dir | wc -l  | grep -q ^0$"
-    eval $cmd
-    if [[ $? -ne 0 ]] ; then
-      (( EXIT_CODE = $EXIT_CODE + 1 ))
-      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
-    fi
-  done
-
-  if [[ $EXIT_CODE == 0 ]] ; then
-    su - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
-  else
-    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
-  fi
-else
-  echo "${mark_dir} exists. Namenode DFS already formatted"
-fi
-
-exit $EXIT_CODE
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/files/checkWebUI.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/files/checkWebUI.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/files/checkWebUI.py
deleted file mode 100644
index f8e9c1a..0000000
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/files/checkWebUI.py
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import optparse
-import httplib
-
-#
-# Main.
-#
-def main():
-  parser = optparse.OptionParser(usage="usage: %prog [options] component ")
-  parser.add_option("-m", "--hosts", dest="hosts", help="Comma separated hosts list for WEB UI to check it availability")
-  parser.add_option("-p", "--port", dest="port", help="Port of WEB UI to check it availability")
-
-  (options, args) = parser.parse_args()
-  
-  hosts = options.hosts.split(',')
-  port = options.port
-
-  for host in hosts:
-    try:
-      conn = httplib.HTTPConnection(host, port)
-      # This can be modified to get a partial url part to be sent with request
-      conn.request("GET", "/")
-      httpCode = conn.getresponse().status
-      conn.close()
-    except Exception:
-      httpCode = 404
-
-    if httpCode != 200:
-      print "Cannot access WEB UI on: http://" + host + ":" + port
-      exit(1)
-      
-
-if __name__ == "__main__":
-  main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/datanode.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/datanode.py
deleted file mode 100644
index eaa27cf..0000000
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/datanode.py
+++ /dev/null
@@ -1,57 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from hdfs_datanode import datanode
-
-
-class DataNode(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-    self.config(env)
-    datanode(action="start")
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-    datanode(action="stop")
-
-  def config(self, env):
-    import params
-
-    datanode(action="configure")
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    check_process_status(status_params.datanode_pid_file)
-
-
-if __name__ == "__main__":
-  DataNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/hdfs_client.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/hdfs_client.py
deleted file mode 100644
index 6babde5..0000000
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/hdfs_client.py
+++ /dev/null
@@ -1,49 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from utils import service
-
-
-class HdfsClient(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-    self.config(env)
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-
-  def config(self, env):
-    import params
-
-    pass
-
-
-if __name__ == "__main__":
-  HdfsClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/hdfs_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/hdfs_datanode.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/hdfs_datanode.py
deleted file mode 100644
index e0b6c39..0000000
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/hdfs_datanode.py
+++ /dev/null
@@ -1,56 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from utils import service
-
-
-def datanode(action=None):
-  import params
-
-  if action == "configure":
-    Directory(params.dfs_domain_socket_dir,
-              recursive=True,
-              mode=0750,
-              owner=params.hdfs_user,
-              group=params.user_group)
-    Directory(params.dfs_data_dir,
-              recursive=True,
-              mode=0755,
-              owner=params.hdfs_user,
-              group=params.user_group)
-
-  if action == "start":
-    service(
-      action=action, name="datanode",
-      user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True,
-      keytab=params.dfs_datanode_keytab_file,
-      principal=params.dfs_datanode_kerberos_principal
-    )
-  if action == "stop":
-    service(
-      action=action, name="datanode",
-      user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True,
-      keytab=params.dfs_datanode_keytab_file,
-      principal=params.dfs_datanode_kerberos_principal
-    )


[2/4] AMBARI-4358. Add stack extension support for pluggable services (dlysnichenko)

Posted by dm...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/37f11ebd/ambari-server/src/test/resources/stacks/HDP/2.0.8/services/HDFS/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.8/services/HDFS/metrics.json b/ambari-server/src/test/resources/stacks/HDP/2.0.8/services/HDFS/metrics.json
new file mode 100644
index 0000000..f33a0c0
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.8/services/HDFS/metrics.json
@@ -0,0 +1,7800 @@
+{
+  "NAMENODE": {
+    "Component": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "metrics/dfs/FSNamesystem/TotalLoad": {
+            "metric": "dfs.FSNamesystem.TotalLoad",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/BlockCapacity": {
+            "metric": "dfs.FSNamesystem.BlockCapacity",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/GetListingOps": {
+            "metric": "dfs.namenode.GetListingOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesAppended": {
+            "metric": "dfs.namenode.FilesAppended",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getProtocolVersion_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/fsync_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.FsyncAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginSuccess_avg_time": {
+            "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/load/load_one": {
+            "metric": "load_one",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/renewLease_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.RenewLeaseNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getFileInfo_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.GetFileInfoAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memNonHeapUsedM": {
+            "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/complete_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.CompleteAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setPermission_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.SetPermissionNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CapacityTotalGB": {
+            "metric": "dfs.FSNamesystem.CapacityTotalGB",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setOwner_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.SetOwnerNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getBlockLocations_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/process/proc_run": {
+            "metric": "proc_run",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CapacityUsedGB": {
+            "metric": "dfs.FSNamesystem.CapacityUsedGB",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/AddBlockOps": {
+            "metric": "dfs.namenode.AddBlockOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/swap_total": {
+            "metric": "swap_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesDeleted": {
+            "metric": "dfs.namenode.FilesDeleted",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/Syncs_avg_time": {
+            "metric": "dfs.namenode.SyncsAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsBlocked": {
+            "metric": "jvm.JvmMetrics.ThreadsBlocked",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcQueueTime_num_ops": {
+            "metric": "rpc.rpc.RpcQueueTimeNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/process/proc_total": {
+            "metric": "proc_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/blockReport_avg_time": {
+            "metric": "dfs.namenode.BlockReportAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/disk/part_max_used": {
+            "metric": "part_max_used",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getFileInfo_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.GetFileInfoNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getEditLogSize_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginSuccess_num_ops": {
+            "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blockReceived_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_idle": {
+            "metric": "cpu_idle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/versionRequest_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.VersionRequestAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_aidle": {
+            "metric": "cpu_aidle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_free": {
+            "metric": "mem_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/versionRequest_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.VersionRequestNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/addBlock_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.AddBlockNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesCreated": {
+            "metric": "dfs.namenode.FilesCreated",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rename_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.RenameAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/bytes_in": {
+            "metric": "bytes_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setSafeMode_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.SetSafeModeNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/pkts_out": {
+            "metric": "pkts_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/memNonHeapCommittedM": {
+            "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_cached": {
+            "metric": "mem_cached",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/disk_total": {
+            "metric": "disk_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setPermission_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.SetPermissionAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesRenamed": {
+            "metric": "dfs.namenode.FilesRenamed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/register_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setReplication_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.setReplication_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/JournalTransactionsBatchedInSync": {
+            "metric": "dfs.namenode.JournalTransactionsBatchedInSync",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/ugi/loginFailure_num_ops": {
+            "metric": "ugi.UgiMetrics.LoginFailureNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/GetBlockLocations": {
+            "metric": "dfs.namenode.GetBlockLocations",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/fsync_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.FsyncNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_wio": {
+            "metric": "cpu_wio",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/create_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.CreateAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/PendingReplicationBlocks": {
+            "metric": "dfs.FSNamesystem.PendingReplicationBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_speed": {
+            "metric": "cpu_speed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/delete_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.DeleteAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FileInfoOps": {
+            "metric": "dfs.namenode.FileInfoOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/sendHeartbeat_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.SendHeartbeatNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/DeleteFileOps": {
+            "metric": "dfs.namenode.DeleteFileOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/RpcProcessingTime_avg_time": {
+            "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blockReport_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.BlockReportNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setSafeMode_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.SetSafeModeAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthenticationSuccesses": {
+            "metric": "rpc.rpc.RpcAuthenticationSuccesses",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/PendingDeletionBlocks": {
+            "metric": "dfs.FSNamesystem.PendingDeletionBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthenticationFailures": {
+            "metric": "rpc.rpc.RpcAuthenticationFailures",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/network/pkts_in": {
+            "metric": "pkts_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_total": {
+            "metric": "mem_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getEditLogSize_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memHeapCommittedM": {
+            "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesInGetListingOps": {
+            "metric": "dfs.namenode.FilesInGetListingOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsRunnable": {
+            "metric": "jvm.JvmMetrics.ThreadsRunnable",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/complete_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.CompleteNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsNew": {
+            "metric": "jvm.JvmMetrics.ThreadsNew",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rollFsImage_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.rollFsImage_num_ops",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthorizationFailures": {
+            "metric": "rpc.rpc.RpcAuthorizationFailures",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/Syncs_num_ops": {
+            "metric": "dfs.namenode.SyncsNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcQueueTime_avg_time": {
+            "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blockReceived_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setReplication_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.setReplication_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rollEditLog_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.RollEditLogAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/SentBytes": {
+            "metric": "rpc.rpc.SentBytes",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/FilesTotal": {
+            "metric": "dfs.FSNamesystem.FilesTotal",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logWarn": {
+            "metric": "jvm.JvmMetrics.LogWarn",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/ExcessBlocks": {
+            "metric": "dfs.FSNamesystem.ExcessBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsTimedWaiting": {
+            "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/gcCount": {
+            "metric": "jvm.JvmMetrics.GcCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/ReceivedBytes": {
+            "metric": "rpc.rpc.ReceivedBytes",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_nice": {
+            "metric": "cpu_nice",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/blockReport_num_ops": {
+            "metric": "dfs.namenode.BlockReportNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/SafemodeTime": {
+            "metric": "dfs.namenode.SafemodeTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rollFsImage_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.rollFsImage_avg_time",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/mkdirs_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.MkdirsAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/NumOpenConnections": {
+            "metric": "rpc.rpc.NumOpenConnections",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memHeapUsedM": {
+            "metric": "jvm.JvmMetrics.MemHeapUsedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks": {
+            "metric": "dfs.FSNamesystem.ScheduledReplicationBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsWaiting": {
+            "metric": "jvm.JvmMetrics.ThreadsWaiting",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/disk/disk_free": {
+            "metric": "disk_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/BlocksTotal": {
+            "metric": "dfs.FSNamesystem.BlocksTotal",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_buffers": {
+            "metric": "mem_buffers",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/gcTimeMillis": {
+            "metric": "jvm.JvmMetrics.GcTimeMillis",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getBlockLocations_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/Transactions_num_ops": {
+            "metric": "dfs.namenode.TransactionsNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/create_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.CreateNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsTerminated": {
+            "metric": "jvm.JvmMetrics.ThreadsTerminated",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/network/bytes_out": {
+            "metric": "bytes_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_user": {
+            "metric": "cpu_user",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/swap_free": {
+            "metric": "swap_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load/load_five": {
+            "metric": "load_five",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_system": {
+            "metric": "cpu_system",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CapacityRemainingGB": {
+            "metric": "dfs.FSNamesystem.CapacityRemainingGB",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/Transactions_avg_time": {
+            "metric": "dfs.namenode.TransactionsAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/boottime": {
+            "metric": "boottime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/MissingBlocks": {
+            "metric": "dfs.FSNamesystem.MissingBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/callQueueLen": {
+            "metric": "rpc.rpc.CallQueueLength",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/delete_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.DeleteNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CorruptBlocks": {
+            "metric": "dfs.FSNamesystem.CorruptBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rename_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.RenameNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blockReport_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.BlockReportAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/mkdirs_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.MkdirsNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load/load_fifteen": {
+            "metric": "load_fifteen",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/logInfo": {
+            "metric": "jvm.JvmMetrics.LogInfo",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/fsImageLoadTime": {
+            "metric": "dfs.namenode.FsImageLoadTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getListing_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.GetListingNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blocksBeingWrittenReport_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rollEditLog_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.RollEditLogNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/addBlock_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.AddBlockAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blocksBeingWrittenReport_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setOwner_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.SetOwnerAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/RpcProcessingTime_num_ops": {
+            "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_shared": {
+            "metric": "mem_shared",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/UnderReplicatedBlocks": {
+            "metric": "dfs.FSNamesystem.UnderReplicatedBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/sendHeartbeat_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.SendHeartbeatAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/CreateFileOps": {
+            "metric": "dfs.namenode.CreateFileOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logError": {
+            "metric": "jvm.JvmMetrics.LogError",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginFailure_avg_time": {
+            "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_num": {
+            "metric": "cpu_num",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getProtocolVersion_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/register_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthorizationSuccesses": {
+            "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getListing_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.GetListingAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logFatal": {
+            "metric": "jvm.JvmMetrics.LogFatal",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/renewLease_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.RenewLeaseAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          }
+        }
+      },
+      {
+        "type": "jmx",
+        "metrics": {
+          "metrics/dfs/namenode/Used": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Used",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/TotalLoad": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.TotalLoad",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memMaxM":{
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemMaxM",
+            "pointInTime" : true,
+            "temporal" : false
+          },
+          "metrics/dfs/FSNamesystem/BlockCapacity": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.BlockCapacity",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/TotalFiles": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalFiles",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/HostName": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.HostName",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/GetListingOps": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.GetListingOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/UpgradeFinalized": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.UpgradeFinalized",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getProtocolVersion_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getProtocolVersion_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/fsync_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.fsync_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/ugi/loginSuccess_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=ugi.loginSuccess_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/Safemode": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Safemode",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/CorruptBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CorruptBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/LiveNodes": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.LiveNodes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/renewLease_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getFileInfo_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getFileInfo_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityRemaining": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityRemaining",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/PercentRemaining": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentRemaining",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memNonHeapUsedM": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemNonHeapUsedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/complete_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.complete_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityTotalGB": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityTotalGB",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getBlockLocations_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getBlockLocations_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/AddBlockOps": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.AddBlockOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityUsedGB": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityUsedGB",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Syncs_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.Syncs_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsBlocked": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsBlocked",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/RpcQueueTime_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTime_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/PercentUsed": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentUsed",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/DecomNodes": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DecomNodes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/blockReport_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.blockReport_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/NonDfsUsedSpace": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NonDfsUsedSpace",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/UpgradeFinalized": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.UpgradeFinalized",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getFileInfo_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getFileInfo_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getEditLogSize_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getEditLogSize_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/ugi/loginSuccess_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=ugi.loginSuccess_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/blockReceived_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReceived_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Safemode": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Safemode",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/FilesCreated": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.FilesCreated",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/addBlock_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.addBlock_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/DecomNodes": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DecomNodes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityUsed": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityUsed",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/NonHeapMemoryUsed": {
+            "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memNonHeapCommittedM": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemNonHeapCommittedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/DeadNodes": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DeadNodes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/PercentUsed": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentUsed",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Free": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Free",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Total": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Total",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/GetBlockLocations": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.GetBlockLocations",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/ugi/loginFailure_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=ugi.loginFailure_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/fsync_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.fsync_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/HeapMemoryMax": {
+            "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/create_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.create_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/PendingReplicationBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.PendingReplicationBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/UnderReplicatedBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.UnderReplicatedBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/FileInfoOps": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.FileInfoOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/MissingBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.MissingBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/sendHeartbeat_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.sendHeartbeat_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/RpcProcessingTime_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcProcessingTime_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/blockReport_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReport_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/CapacityRemaining": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemState.CapacityRemaining",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/rpcAuthenticationSuccesses": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthenticationSuccesses",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/PendingDeletionBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.PendingDeletionBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/rpcAuthenticationFailures": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthenticationFailures",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getEditLogSize_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getEditLogSize_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memHeapCommittedM": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemHeapCommittedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/FilesInGetListingOps": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.FilesInGetListingOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsRunnable": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsRunnable",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/BlocksTotal": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.BlocksTotal",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityTotal": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityTotal",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/complete_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.complete_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/LiveNodes": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.LiveNodes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsNew": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsNew",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/rollFsImage_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollFsImage_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/rpcAuthorizationFailures": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthorizationFailures",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Syncs_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.Syncs_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/StartTime": {
+            "metric": "java.lang:type=Runtime.StartTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/RpcQueueTime_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTime_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/blockReceived_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReceived_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/rollEditLog_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollEditLog_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/DeadNodes": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DeadNodes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/SentBytes": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.SentBytes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/HeapMemoryUsed": {
+            "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/FilesTotal": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.FilesTotal",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Version": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Version",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/logWarn": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogWarn",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/ExcessBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.ExcessBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsTimedWaiting": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsTimedWaiting",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/gcCount": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.GcCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/PercentRemaining": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentRemaining",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/ReceivedBytes": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.ReceivedBytes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/blockReport_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.blockReport_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/NonHeapMemoryMax": {
+            "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/rollFsImage_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollFsImage_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/NumOpenConnections": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.NumOpenConnections",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memHeapUsedM": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemHeapUsedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.ScheduledReplicationBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsWaiting": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsWaiting",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/BlocksTotal": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.BlocksTotal",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/gcTimeMillis": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.GcTimeMillis",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getBlockLocations_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getBlockLocations_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Transactions_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.Transactions_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/create_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.create_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/CapacityTotal": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Total",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsTerminated": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsTerminated",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityRemainingGB": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityRemainingGB",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Transactions_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.Transactions_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/MissingBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.MissingBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Threads": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Threads",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/callQueueLen": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.callQueueLen",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CorruptBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CorruptBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/blockReport_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReport_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/TotalFiles": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalFiles",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/logInfo": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogInfo",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/NameDirStatuses": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NameDirStatuses",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getListing_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getListing_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/rollEditLog_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollEditLog_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/addBlock_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.addBlock_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/RpcProcessingTime_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcProcessingTime_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/CapacityUsed": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Used",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/UnderReplicatedBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.UnderReplicatedBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/sendHeartbeat_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.sendHeartbeat_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/CreateFileOps": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.CreateFileOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/logError": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogError",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/ugi/loginFailure_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=ugi.loginFailure_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getProtocolVersion_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getProtocolVersion_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/rpcAuthorizationSuccesses": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthorizationSuccesses",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/Version": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Version",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getListing_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getListing_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/logFatal": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogFatal",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/NonDfsUsedSpace": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NonDfsUsedSpace",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/renewLease_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/TotalBlocks": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityNonDFSUsed": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityNonDFSUsed",
+            "pointInTime": true,
+            "temporal": false
+          }
+        }
+      }
+    ],
+    "HostComponent": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "metrics/dfs/FSNamesystem/TotalLoad": {
+            "metric": "dfs.FSNamesystem.TotalLoad",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/BlockCapacity": {
+            "metric": "dfs.FSNamesystem.BlockCapacity",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/GetListingOps": {
+            "metric": "dfs.namenode.GetListingOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesAppended": {
+            "metric": "dfs.namenode.FilesAppended",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getProtocolVersion_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/fsync_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.FsyncAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginSuccess_avg_time": {
+            "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/load/load_one": {
+            "metric": "load_one",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/renewLease_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.RenewLeaseNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getFileInfo_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.GetFileInfoAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memNonHeapUsedM": {
+            "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/complete_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.CompleteAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setPermission_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.SetPermissionNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CapacityTotalGB": {
+            "metric": "dfs.FSNamesystem.CapacityTotalGB",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setOwner_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.SetOwnerNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getBlockLocations_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/process/proc_run": {
+            "metric": "proc_run",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CapacityUsedGB": {
+            "metric": "dfs.FSNamesystem.CapacityUsedGB",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/AddBlockOps": {
+            "metric": "dfs.namenode.AddBlockOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/swap_total": {
+            "metric": "swap_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesDeleted": {
+            "metric": "dfs.namenode.FilesDeleted",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/Syncs_avg_time": {
+            "metric": "dfs.namenode.SyncsAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsBlocked": {
+            "metric": "jvm.JvmMetrics.ThreadsBlocked",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcQueueTime_num_ops": {
+            "metric": "rpc.rpc.RpcQueueTimeNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/process/proc_total": {
+            "metric": "proc_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/blockReport_avg_time": {
+            "metric": "dfs.namenode.BlockReportAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/disk/part_max_used": {
+            "metric": "part_max_used",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getFileInfo_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.GetFileInfoNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getEditLogSize_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginSuccess_num_ops": {
+            "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blockReceived_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_idle": {
+            "metric": "cpu_idle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/versionRequest_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.VersionRequestAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_aidle": {
+            "metric": "cpu_aidle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_free": {
+            "metric": "mem_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/versionRequest_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.VersionRequestNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/addBlock_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.AddBlockNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesCreated": {
+            "metric": "dfs.namenode.FilesCreated",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rename_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.RenameAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/bytes_in": {
+            "metric": "bytes_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setSafeMode_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.SetSafeModeNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/pkts_out": {
+            "metric": "pkts_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/memNonHeapCommittedM": {
+            "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_cached": {
+            "metric": "mem_cached",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/disk_total": {
+            "metric": "disk_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setPermission_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.SetPermissionAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesRenamed": {
+            "metric": "dfs.namenode.FilesRenamed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/register_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setReplication_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.setReplication_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/JournalTransactionsBatchedInSync": {
+            "metric": "dfs.namenode.JournalTransactionsBatchedInSync",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/ugi/loginFailure_num_ops": {
+            "metric": "ugi.UgiMetrics.LoginFailureNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/GetBlockLocations": {
+            "metric": "dfs.namenode.GetBlockLocations",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/fsync_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.FsyncNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_wio": {
+            "metric": "cpu_wio",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/create_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.CreateAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/PendingReplicationBlocks": {
+            "metric": "dfs.FSNamesystem.PendingReplicationBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_speed": {
+            "metric": "cpu_speed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/delete_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.DeleteAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FileInfoOps": {
+            "metric": "dfs.namenode.FileInfoOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/sendHeartbeat_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.SendHeartbeatNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/DeleteFileOps": {
+            "metric": "dfs.namenode.DeleteFileOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/RpcProcessingTime_avg_time": {
+            "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blockReport_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.BlockReportNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setSafeMode_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.SetSafeModeAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthenticationSuccesses": {
+            "metric": "rpc.rpc.RpcAuthenticationSuccesses",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/PendingDeletionBlocks": {
+            "metric": "dfs.FSNamesystem.PendingDeletionBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthenticationFailures": {
+            "metric": "rpc.rpc.RpcAuthenticationFailures",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/network/pkts_in": {
+            "metric": "pkts_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_total": {
+            "metric": "mem_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getEditLogSize_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memHeapCommittedM": {
+            "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesInGetListingOps": {
+            "metric": "dfs.namenode.FilesInGetListingOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsRunnable": {
+            "metric": "jvm.JvmMetrics.ThreadsRunnable",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/complete_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.CompleteNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsNew": {
+            "metric": "jvm.JvmMetrics.ThreadsNew",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rollFsImage_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.rollFsImage_num_ops",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthorizationFailures": {
+            "metric": "rpc.rpc.RpcAuthorizationFailures",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/Syncs_num_ops": {
+            "metric": "dfs.namenode.SyncsNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcQueueTime_avg_time": {
+            "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blockReceived_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setReplication_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.setReplication_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rollEditLog_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.RollEditLogAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/SentBytes": {
+            "metric": "rpc.rpc.SentBytes",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/FilesTotal": {
+            "metric": "dfs.FSNamesystem.FilesTotal",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logWarn": {
+            "metric": "jvm.JvmMetrics.LogWarn",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/ExcessBlocks": {
+            "metric": "dfs.FSNamesystem.ExcessBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsTimedWaiting": {
+            "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/gcCount": {
+            "metric": "jvm.JvmMetrics.GcCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/ReceivedBytes": {
+            "metric": "rpc.rpc.ReceivedBytes",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_nice": {
+            "metric": "cpu_nice",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/blockReport_num_ops": {
+            "metric": "dfs.namenode.BlockReportNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/SafemodeTime": {
+            "metric": "dfs.namenode.SafemodeTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rollFsImage_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.rollFsImage_avg_time",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/mkdirs_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.MkdirsAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/NumOpenConnections": {
+            "metric": "rpc.rpc.NumOpenConnections",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memHeapUsedM": {
+            "metric": "jvm.JvmMetrics.MemHeapUsedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks": {
+            "metric": "dfs.FSNamesystem.ScheduledReplicationBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsWaiting": {
+            "metric": "jvm.JvmMetrics.ThreadsWaiting",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/disk/disk_free": {
+            "metric": "disk_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/BlocksTotal": {
+            "metric": "dfs.FSNamesystem.BlocksTotal",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_buffers": {
+            "metric": "mem_buffers",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/gcTimeMillis": {
+            "metric": "jvm.JvmMetrics.GcTimeMillis",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getBlockLocations_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/Transactions_num_ops": {
+            "metric": "dfs.namenode.TransactionsNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/create_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.CreateNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsTerminated": {
+            "metric": "jvm.JvmMetrics.ThreadsTerminated",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/network/bytes_out": {
+            "metric": "bytes_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_user": {
+            "metric": "cpu_user",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/swap_free": {
+            "metric": "swap_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load/load_five": {
+            "metric": "load_five",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_system": {
+            "metric": "cpu_system",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CapacityRemainingGB": {
+            "metric": "dfs.FSNamesystem.CapacityRemainingGB",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/Transactions_avg_time": {
+            "metric": "dfs.namenode.TransactionsAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/boottime": {
+            "metric": "boottime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/MissingBlocks": {
+            "metric": "dfs.FSNamesystem.MissingBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/callQueueLen": {
+            "metric": "rpc.rpc.CallQueueLength",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/delete_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.DeleteNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CorruptBlocks": {
+            "metric": "dfs.FSNamesystem.CorruptBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rename_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.RenameNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blockReport_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.BlockReportAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/mkdirs_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.MkdirsNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load/load_fifteen": {
+            "metric": "load_fifteen",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/logInfo": {
+            "metric": "jvm.JvmMetrics.LogInfo",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/fsImageLoadTime": {
+            "metric": "dfs.namenode.FsImageLoadTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getListing_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.GetListingNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blocksBeingWrittenReport_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rollEditLog_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.RollEditLogNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/addBlock_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.AddBlockAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blocksBeingWrittenReport_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setOwner_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.SetOwnerAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/RpcProcessingTime_num_ops": {
+            "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_shared": {
+            "metric": "mem_shared",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/UnderReplicatedBlocks": {
+            "metric": "dfs.FSNamesystem.UnderReplicatedBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/sendHeartbeat_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.SendHeartbeatAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/CreateFileOps": {
+            "metric": "dfs.namenode.CreateFileOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logError": {
+            "metric": "jvm.JvmMetrics.LogError",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginFailure_avg_time": {
+            "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_num": {
+            "metric": "cpu_num",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getProtocolVersion_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/register_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthorizationSuccesses": {
+            "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getListing_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.GetListingAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logFatal": {
+            "metric": "jvm.JvmMetrics.LogFatal",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/renewLease_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.RenewLeaseAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          }
+        }
+      },
+      {
+        "type": "jmx",
+        "metrics": {
+          "metrics/dfs/namenode/Used": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Used",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/TotalLoad": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystem.TotalLoad",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memMaxM":{
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemMaxM",
+            "pointInTime" : true,
+            "temporal" : false
+          },
+          "metrics/dfs/FSNamesystem/BlockCapacity": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystem.BlockCapacity",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/TotalFiles": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalFiles",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/GetListingOps": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeActivity.GetListingOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/HostName": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeActivity.tag.Hostname",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/runtime/StartTime": {
+            "metric": "java.lang:type=Runtime.StartTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/UpgradeFinalized": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.UpgradeFinalized",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getProtocolVersion_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.VersionRequestNumOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/fsync_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.FsyncAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/ugi/loginSuccess_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=UgiMetrics.LoginSuccessAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/renewLease_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityRemaining": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityRemaining",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getFileInfo_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.GetFileInfoAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/PercentRemaining": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentRemaining",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memNonHeapUsedM": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemNonHeapUsedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/complete_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.CompleteAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityTotalGB": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityTotalGB",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getBlockLocations_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.GetBlockLocationsNumOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityUsedGB": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityUsedGB",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/AddBlockOps": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeActivity.AddBlockOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Syncs_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeActivity.SyncsAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsBlocked": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsBlocked",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/PercentUsed": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentUsed",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/RpcQueueTime_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTimeNumOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/blockReport_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeActivity.BlockReportAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/HeapMemoryMax": {
+            "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/HeapMemoryUsed": {
+            "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getFileInfo_num_ops": {
+            "m

<TRUNCATED>