You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by dm...@apache.org on 2017/01/20 10:19:41 UTC

[39/46] ambari git commit: AMBARI-18739. Perf: Create Rolling and Express Upgrade Packs (dlysnichenko)

http://git-wip-us.apache.org/repos/asf/ambari/blob/2c362fd0/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/alerts/alert_checkpoint_time.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/alerts/alert_checkpoint_time.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/alerts/alert_checkpoint_time.py
new file mode 100644
index 0000000..73bc168
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/alerts/alert_checkpoint_time.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import logging
+
+from resource_management.libraries.functions.simulate_perf_cluster_alert_behaviour import simulate_perf_cluster_alert_behaviour
+
+ALERT_BEHAVIOUR_TYPE = "{{hdfs-alert-config/alert.behavior.type}}"
+
+ALERT_SUCCESS_PERCENTAGE = "{{hdfs-alert-config/alert.success.percentage}}"
+
+ALERT_TIMEOUT_RETURN_VALUE = "{{hdfs-alert-config/alert.timeout.return.value}}"
+ALERT_TIMEOUT_SECS = "{{hdfs-alert-config/alert.timeout.secs}}"
+
+ALERT_FLIP_INTERVAL_MINS = "{{hdfs-alert-config/alert.flip.interval.mins}}"
+
+logger = logging.getLogger('ambari_alerts')
+
+alert_behaviour_properties = {"alert_behaviour_type" : ALERT_BEHAVIOUR_TYPE, "alert_success_percentage" : ALERT_SUCCESS_PERCENTAGE,
+                              "alert_timeout_return_value" : ALERT_TIMEOUT_RETURN_VALUE, "alert_timeout_secs" : ALERT_TIMEOUT_SECS,
+                              "alert_flip_interval_mins" : ALERT_FLIP_INTERVAL_MINS}
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (ALERT_BEHAVIOUR_TYPE, ALERT_SUCCESS_PERCENTAGE, ALERT_TIMEOUT_RETURN_VALUE, ALERT_TIMEOUT_SECS,
+          ALERT_FLIP_INTERVAL_MINS)
+
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  return simulate_perf_cluster_alert_behaviour(alert_behaviour_properties, configurations)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/2c362fd0/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/alerts/alert_datanode_unmounted_data_dir.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/alerts/alert_datanode_unmounted_data_dir.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/alerts/alert_datanode_unmounted_data_dir.py
new file mode 100644
index 0000000..73bc168
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/alerts/alert_datanode_unmounted_data_dir.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import logging
+
+from resource_management.libraries.functions.simulate_perf_cluster_alert_behaviour import simulate_perf_cluster_alert_behaviour
+
+ALERT_BEHAVIOUR_TYPE = "{{hdfs-alert-config/alert.behavior.type}}"
+
+ALERT_SUCCESS_PERCENTAGE = "{{hdfs-alert-config/alert.success.percentage}}"
+
+ALERT_TIMEOUT_RETURN_VALUE = "{{hdfs-alert-config/alert.timeout.return.value}}"
+ALERT_TIMEOUT_SECS = "{{hdfs-alert-config/alert.timeout.secs}}"
+
+ALERT_FLIP_INTERVAL_MINS = "{{hdfs-alert-config/alert.flip.interval.mins}}"
+
+logger = logging.getLogger('ambari_alerts')
+
+alert_behaviour_properties = {"alert_behaviour_type" : ALERT_BEHAVIOUR_TYPE, "alert_success_percentage" : ALERT_SUCCESS_PERCENTAGE,
+                              "alert_timeout_return_value" : ALERT_TIMEOUT_RETURN_VALUE, "alert_timeout_secs" : ALERT_TIMEOUT_SECS,
+                              "alert_flip_interval_mins" : ALERT_FLIP_INTERVAL_MINS}
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (ALERT_BEHAVIOUR_TYPE, ALERT_SUCCESS_PERCENTAGE, ALERT_TIMEOUT_RETURN_VALUE, ALERT_TIMEOUT_SECS,
+          ALERT_FLIP_INTERVAL_MINS)
+
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  return simulate_perf_cluster_alert_behaviour(alert_behaviour_properties, configurations)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/2c362fd0/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/alerts/alert_nfs_gateway_process.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/alerts/alert_nfs_gateway_process.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/alerts/alert_nfs_gateway_process.py
new file mode 100644
index 0000000..73bc168
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/alerts/alert_nfs_gateway_process.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import logging
+
+from resource_management.libraries.functions.simulate_perf_cluster_alert_behaviour import simulate_perf_cluster_alert_behaviour
+
+ALERT_BEHAVIOUR_TYPE = "{{hdfs-alert-config/alert.behavior.type}}"
+
+ALERT_SUCCESS_PERCENTAGE = "{{hdfs-alert-config/alert.success.percentage}}"
+
+ALERT_TIMEOUT_RETURN_VALUE = "{{hdfs-alert-config/alert.timeout.return.value}}"
+ALERT_TIMEOUT_SECS = "{{hdfs-alert-config/alert.timeout.secs}}"
+
+ALERT_FLIP_INTERVAL_MINS = "{{hdfs-alert-config/alert.flip.interval.mins}}"
+
+logger = logging.getLogger('ambari_alerts')
+
+alert_behaviour_properties = {"alert_behaviour_type" : ALERT_BEHAVIOUR_TYPE, "alert_success_percentage" : ALERT_SUCCESS_PERCENTAGE,
+                              "alert_timeout_return_value" : ALERT_TIMEOUT_RETURN_VALUE, "alert_timeout_secs" : ALERT_TIMEOUT_SECS,
+                              "alert_flip_interval_mins" : ALERT_FLIP_INTERVAL_MINS}
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (ALERT_BEHAVIOUR_TYPE, ALERT_SUCCESS_PERCENTAGE, ALERT_TIMEOUT_RETURN_VALUE, ALERT_TIMEOUT_SECS,
+          ALERT_FLIP_INTERVAL_MINS)
+
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  return simulate_perf_cluster_alert_behaviour(alert_behaviour_properties, configurations)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/2c362fd0/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/alerts/alert_snamenode_process.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/alerts/alert_snamenode_process.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/alerts/alert_snamenode_process.py
new file mode 100644
index 0000000..73bc168
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/alerts/alert_snamenode_process.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import logging
+
+from resource_management.libraries.functions.simulate_perf_cluster_alert_behaviour import simulate_perf_cluster_alert_behaviour
+
+ALERT_BEHAVIOUR_TYPE = "{{hdfs-alert-config/alert.behavior.type}}"
+
+ALERT_SUCCESS_PERCENTAGE = "{{hdfs-alert-config/alert.success.percentage}}"
+
+ALERT_TIMEOUT_RETURN_VALUE = "{{hdfs-alert-config/alert.timeout.return.value}}"
+ALERT_TIMEOUT_SECS = "{{hdfs-alert-config/alert.timeout.secs}}"
+
+ALERT_FLIP_INTERVAL_MINS = "{{hdfs-alert-config/alert.flip.interval.mins}}"
+
+logger = logging.getLogger('ambari_alerts')
+
+alert_behaviour_properties = {"alert_behaviour_type" : ALERT_BEHAVIOUR_TYPE, "alert_success_percentage" : ALERT_SUCCESS_PERCENTAGE,
+                              "alert_timeout_return_value" : ALERT_TIMEOUT_RETURN_VALUE, "alert_timeout_secs" : ALERT_TIMEOUT_SECS,
+                              "alert_flip_interval_mins" : ALERT_FLIP_INTERVAL_MINS}
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (ALERT_BEHAVIOUR_TYPE, ALERT_SUCCESS_PERCENTAGE, ALERT_TIMEOUT_RETURN_VALUE, ALERT_TIMEOUT_SECS,
+          ALERT_FLIP_INTERVAL_MINS)
+
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  return simulate_perf_cluster_alert_behaviour(alert_behaviour_properties, configurations)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/2c362fd0/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/alerts/alert_upgrade_finalized.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/alerts/alert_upgrade_finalized.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/alerts/alert_upgrade_finalized.py
new file mode 100644
index 0000000..73bc168
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/alerts/alert_upgrade_finalized.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import logging
+
+from resource_management.libraries.functions.simulate_perf_cluster_alert_behaviour import simulate_perf_cluster_alert_behaviour
+
+ALERT_BEHAVIOUR_TYPE = "{{hdfs-alert-config/alert.behavior.type}}"
+
+ALERT_SUCCESS_PERCENTAGE = "{{hdfs-alert-config/alert.success.percentage}}"
+
+ALERT_TIMEOUT_RETURN_VALUE = "{{hdfs-alert-config/alert.timeout.return.value}}"
+ALERT_TIMEOUT_SECS = "{{hdfs-alert-config/alert.timeout.secs}}"
+
+ALERT_FLIP_INTERVAL_MINS = "{{hdfs-alert-config/alert.flip.interval.mins}}"
+
+logger = logging.getLogger('ambari_alerts')
+
+alert_behaviour_properties = {"alert_behaviour_type" : ALERT_BEHAVIOUR_TYPE, "alert_success_percentage" : ALERT_SUCCESS_PERCENTAGE,
+                              "alert_timeout_return_value" : ALERT_TIMEOUT_RETURN_VALUE, "alert_timeout_secs" : ALERT_TIMEOUT_SECS,
+                              "alert_flip_interval_mins" : ALERT_FLIP_INTERVAL_MINS}
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (ALERT_BEHAVIOUR_TYPE, ALERT_SUCCESS_PERCENTAGE, ALERT_TIMEOUT_RETURN_VALUE, ALERT_TIMEOUT_SECS,
+          ALERT_FLIP_INTERVAL_MINS)
+
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  return simulate_perf_cluster_alert_behaviour(alert_behaviour_properties, configurations)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/2c362fd0/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/datanode.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/datanode.py
new file mode 100644
index 0000000..66b467e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/datanode.py
@@ -0,0 +1,57 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+# Python Imports
+
+# Local Imports
+from resource_management.libraries.script.dummy import Dummy
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.core.logger import Logger
+
+
+class FAKEDataNode(Dummy):
+  """
+  Dummy script that simulates a slave component.
+  """
+
+  def __init__(self):
+    super(FAKEDataNode, self).__init__()
+    self.component_name = "FAKEDATANODE"
+    self.principal_conf_name = "hdfs-site"
+    self.principal_name = "dfs.datanode.kerberos.principal"
+    self.keytab_conf_name = "hdfs-site"
+    self.keytab_name = "dfs.datanode.keytab.file"
+
+  def get_component_name(self):
+    return "hadoop-hdfs-datanode"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing FAKEDataNode Stack Upgrade pre-restart")
+    import params
+    env.set_params(params)
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      stack_select.select(self.get_component_name(), params.version)
+
+if __name__ == "__main__":
+  FAKEDataNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/2c362fd0/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/hdfs_client.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/hdfs_client.py
new file mode 100644
index 0000000..27e178d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/hdfs_client.py
@@ -0,0 +1,38 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+# Python Imports
+
+# Local Imports
+from resource_management.libraries.script.dummy import Dummy
+
+
+class HdfsClient(Dummy):
+  """
+  Dummy script that simulates a client component.
+  """
+
+  def __init__(self):
+    super(HdfsClient, self).__init__()
+    self.component_name = "FAKEHDFS_CLIENT"
+
+if __name__ == "__main__":
+  HdfsClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/2c362fd0/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/journalnode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/journalnode.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/journalnode.py
new file mode 100644
index 0000000..b24756c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/journalnode.py
@@ -0,0 +1,58 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+# Python Imports
+
+# Local Imports
+from resource_management.libraries.script.dummy import Dummy
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.core.logger import Logger
+
+
+class FAKEJournalNode(Dummy):
+  """
+  Dummy script that simulates a master component.
+  """
+
+  def __init__(self):
+    super(FAKEJournalNode, self).__init__()
+    self.component_name = "FAKEJOURNALNODE"
+    self.principal_conf_name = "hdfs-site"
+    self.principal_name = "dfs.journalnode.kerberos.principal"
+    self.keytab_conf_name = "hdfs-site"
+    self.keytab_name = "dfs.journalnode.keytab.file"
+
+  def get_component_name(self):
+    return "hadoop-hdfs-journalnode"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing FAKEJournalNode Stack Upgrade pre-restart")
+    import params
+    env.set_params(params)
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      stack_select.select(self.get_component_name(), params.version)
+
+if __name__ == "__main__":
+  FAKEJournalNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/2c362fd0/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/namenode.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/namenode.py
new file mode 100644
index 0000000..27720da
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/namenode.py
@@ -0,0 +1,79 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+# Python Imports
+import json
+
+# Local Imports
+from resource_management.libraries.script.dummy import Dummy
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.core.logger import Logger
+
+
+class FAKEHNameNode(Dummy):
+  """
+  Dummy script that simulates a master component.
+  """
+
+  def __init__(self):
+    super(FAKEHNameNode, self).__init__()
+    self.component_name = "FAKENAMENODE"
+    self.principal_conf_name = "hdfs-site"
+    self.principal_name = "dfs.namenode.kerberos.principal"
+    self.keytab_conf_name = "hdfs-site"
+    self.keytab_name = "dfs.namenode.keytab.file"
+
+  def rebalancehdfs(self, env):
+    print "Rebalance FAKEHDFS"
+
+    threshold = 10
+    if "namenode" in self.config["commandParams"]:
+      name_node_params = self.config["commandParams"]["namenode"]
+      if name_node_params is not None:
+        name_node_parameters = json.loads(name_node_params)
+        threshold = name_node_parameters['threshold']
+
+    print "Threshold: %s" % str(threshold)
+
+  def decommission(self):
+    print "Rebalance FAKEHDFS"
+
+  def get_component_name(self):
+    return "hadoop-hdfs-namenode"
+
+  def finalize_non_rolling_upgrade(self, env):
+    pass
+
+  def finalize_rolling_upgrade(self, env):
+    pass
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing FAKEHNameNode Stack Upgrade pre-restart")
+    import params
+    env.set_params(params)
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      stack_select.select(self.get_component_name(), params.version)
+
+if __name__ == "__main__":
+  FAKEHNameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/2c362fd0/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/nfsgateway.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/nfsgateway.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/nfsgateway.py
new file mode 100644
index 0000000..f40bde3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/nfsgateway.py
@@ -0,0 +1,42 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+# Python Imports
+
+# Local Imports
+from resource_management.libraries.script.dummy import Dummy
+
+
+class FAKENFSGateway(Dummy):
+  """
+  Dummy script that simulates a slave component.
+  """
+
+  def __init__(self):
+    super(FAKENFSGateway, self).__init__()
+    self.component_name = "FAKENFS_GATEWAY"
+    self.principal_conf_name = "hdfs-site"
+    self.principal_name = "nfs.kerberos.principal"
+    self.keytab_conf_name = "hdfs-site"
+    self.keytab_name = "nfs.keytab.file"
+
+if __name__ == "__main__":
+  FAKENFSGateway().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/2c362fd0/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/params.py
new file mode 100644
index 0000000..8068441
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/params.py
@@ -0,0 +1,33 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.get_architecture import get_architecture
+
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+architecture = get_architecture()
+
+stack_name = default("/hostLevelParams/stack_name", None)
+
+# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade
+version = default("/commandParams/version", None)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/2c362fd0/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/service_check.py
new file mode 100644
index 0000000..270b082
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/service_check.py
@@ -0,0 +1,30 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management.libraries.script.script import Script
+
+class ServiceCheck(Script):
+
+  def service_check(self, env):
+    print "Service Check"
+
+if __name__ == "__main__":
+  ServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/2c362fd0/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/snamenode.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/snamenode.py
new file mode 100644
index 0000000..b85f9b1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/snamenode.py
@@ -0,0 +1,42 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+# Python Imports
+
+# Local Imports
+from resource_management.libraries.script.dummy import Dummy
+
+
+class SFAKEHNameNode(Dummy):
+  """
+  Dummy script that simulates a slave component.
+  """
+
+  def __init__(self):
+    super(SFAKEHNameNode, self).__init__()
+    self.component_name = "SECONDARY_FAKENAMENODE"
+    self.principal_conf_name = "hdfs-site"
+    self.principal_name = "dfs.secondary.namenode.kerberos.principal"
+    self.keytab_conf_name = "hdfs-site"
+    self.keytab_name = "dfs.secondary.namenode.keytab.file"
+
+if __name__ == "__main__":
+  SFAKEHNameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/2c362fd0/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/zkfc_slave.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/zkfc_slave.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/zkfc_slave.py
new file mode 100644
index 0000000..66e36ad
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/scripts/zkfc_slave.py
@@ -0,0 +1,38 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+# Python Imports
+
+# Local Imports
+from resource_management.libraries.script.dummy import Dummy
+
+
+class ZkfcSlave(Dummy):
+  """
+  Dummy script that simulates a slave component.
+  """
+
+  def __init__(self):
+    super(ZkfcSlave, self).__init__()
+    self.component_name = "FAKEZKFC"
+
+if __name__ == "__main__":
+  ZkfcSlave().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/2c362fd0/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/quicklinks/quicklinks.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/quicklinks/quicklinks.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/quicklinks/quicklinks.json
new file mode 100644
index 0000000..4989c05
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/quicklinks/quicklinks.json
@@ -0,0 +1,76 @@
+{
+  "name": "default",
+  "description": "default quick links configuration",
+  "configuration": {
+    "protocol":
+    {
+      "type":"https",
+      "checks":[
+        {
+          "property":"dfs.http.policy",
+          "desired":"HTTPS_ONLY",
+          "site":"hdfs-site"
+        }
+      ]
+    },
+
+    "links": [
+      {
+        "name": "namenode_ui",
+        "label": "FAKEHNameNode UI",
+        "url":"%@://%@:%@",
+        "requires_user_name": "false",
+        "port":{
+          "http_property": "dfs.namenode.http-address",
+          "http_default_port": "50070",
+          "https_property": "dfs.namenode.https-address",
+          "https_default_port": "50470",
+          "regex": "\\w*:(\\d+)",
+          "site": "hdfs-site"
+        }
+      },
+      {
+        "name": "namenode_logs",
+        "label": "FAKEHNameNode Logs",
+        "url":"%@://%@:%@/logs",
+        "requires_user_name": "false",
+        "port":{
+          "http_property": "dfs.namenode.http-address",
+          "http_default_port": "50070",
+          "https_property": "dfs.namenode.https-address",
+          "https_default_port": "50470",
+          "regex": "\\w*:(\\d+)",
+          "site": "hdfs-site"
+        }
+      },
+      {
+        "name": "namenode_jmx",
+        "label": "FAKEHNameNode JMX",
+        "url":"%@://%@:%@/jmx",
+        "requires_user_name": "false",
+        "port":{
+          "http_property": "dfs.namenode.http-address",
+          "http_default_port": "50070",
+          "https_property": "dfs.namenode.https-address",
+          "https_default_port": "50470",
+          "regex": "\\w*:(\\d+)",
+          "site": "hdfs-site"
+        }
+      },
+      {
+        "name": "Thread Stacks",
+        "label": "Thread Stacks",
+        "url":"%@://%@:%@/stacks",
+        "requires_user_name": "false",
+        "port":{
+          "http_property": "dfs.namenode.http-address",
+          "http_default_port": "50070",
+          "https_property": "dfs.namenode.https-address",
+          "https_default_port": "50470",
+          "regex": "\\w*:(\\d+)",
+          "site": "hdfs-site"
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/2c362fd0/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/themes/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/themes/theme.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/themes/theme.json
new file mode 100644
index 0000000..499f300
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/themes/theme.json
@@ -0,0 +1,179 @@
+{
+  "name": "default",
+  "description": "Default theme for FAKEHDFS service",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "settings",
+            "display-name": "Settings",
+            "layout": {
+              "tab-columns": "2",
+              "tab-rows": "1",
+              "sections": [
+                {
+                  "name": "section-namenode",
+                  "display-name": "FAKEHNameNode",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-namenode-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-datanode",
+                  "display-name": "FAKEDataNode",
+                  "row-index": "0",
+                  "column-index": "1",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-datanode-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "hdfs-site/dfs.namenode.name.dir",
+          "subsection-name": "subsection-namenode-col1"
+        },
+        {
+          "config": "hadoop-env/namenode_heapsize",
+          "subsection-name": "subsection-namenode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.namenode.handler.count",
+          "subsection-name": "subsection-namenode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.namenode.safemode.threshold-pct",
+          "subsection-name": "subsection-namenode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.datanode.data.dir",
+          "subsection-name": "subsection-datanode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.datanode.failed.volumes.tolerated",
+          "subsection-name": "subsection-datanode-col1"
+        },
+        {
+          "config": "hadoop-env/dtnode_heapsize",
+          "subsection-name": "subsection-datanode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.datanode.max.transfer.threads",
+          "subsection-name": "subsection-datanode-col1"
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "hdfs-site/dfs.namenode.name.dir",
+        "widget": {
+          "type": "directories"
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.namenode.safemode.threshold-pct",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "percent"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.namenode.handler.count",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hadoop-env/namenode_heapsize",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.datanode.failed.volumes.tolerated",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.datanode.data.dir",
+        "widget": {
+          "type": "directories"
+        }
+      },
+      {
+        "config": "hadoop-env/dtnode_heapsize",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.datanode.max.transfer.threads",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      }
+    ]
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/2c362fd0/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/widgets.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/widgets.json
new file mode 100644
index 0000000..7a793f8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/widgets.json
@@ -0,0 +1,649 @@
+{
+  "layouts": [
+    {
+      "layout_name": "default_hdfs_dashboard",
+      "display_name": "Standard FAKEHDFS Dashboard",
+      "section_name": "FAKEHDFS_SUMMARY",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "FAKEHNameNode GC count",
+          "description": "Count of total garbage collections and count of major type garbage collections of the JVM.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "jvm.JvmMetrics.GcCount._rate",
+              "metric_path": "metrics/jvm/gcCount._rate",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "jvm.JvmMetrics.GcCountConcurrentMarkSweep._rate",
+              "metric_path": "metrics/jvm/GcCountConcurrentMarkSweep._rate",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "GC total count",
+              "value": "${jvm.JvmMetrics.GcCount._rate}"
+            },
+            {
+              "name": "GC count of type major collection",
+              "value": "${jvm.JvmMetrics.GcCountConcurrentMarkSweep._rate}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "FAKEHNameNode GC time",
+          "description": "Total time taken by major type garbage collections in milliseconds.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep._rate",
+              "metric_path": "metrics/jvm/GcTimeMillisConcurrentMarkSweep._rate",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "GC time in major collection",
+              "value": "${jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep._rate}"
+            }
+          ],
+          "properties": {
+            "display_unit": "ms",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "NN Connection Load",
+          "description": "Number of open RPC connections being managed by FAKEHNameNode.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "rpc.rpc.client.NumOpenConnections",
+              "metric_path": "metrics/rpc/client/NumOpenConnections",
+              "category": "",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "rpc.rpc.datanode.NumOpenConnections",
+              "metric_path": "metrics/rpc/datanode/NumOpenConnections",
+              "category": "",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "Open Client Connections",
+              "value": "${rpc.rpc.client.NumOpenConnections}"
+            },
+            {
+              "name": "Open Datanode Connections",
+              "value": "${rpc.rpc.datanode.NumOpenConnections}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "FAKEHNameNode Heap",
+          "description": "Heap memory committed and Heap memory used with respect to time.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "jvm.JvmMetrics.MemHeapCommittedM",
+              "metric_path": "metrics/jvm/memHeapCommittedM",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "jvm.JvmMetrics.MemHeapUsedM",
+              "metric_path": "metrics/jvm/memHeapUsedM",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "JVM heap committed",
+              "value": "${jvm.JvmMetrics.MemHeapCommittedM}"
+            },
+            {
+              "name": "JVM heap used",
+              "value": "${jvm.JvmMetrics.MemHeapUsedM}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "FAKEHNameNode Host Load",
+          "description": "Percentage of CPU and Memory resources being consumed on FAKEHNameNode host.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "cpu_system",
+              "metric_path": "metrics/cpu/cpu_system",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "cpu_user",
+              "metric_path": "metrics/cpu/cpu_user",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "cpu_nice",
+              "metric_path": "metrics/cpu/cpu_nice",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "cpu_idle",
+              "metric_path": "metrics/cpu/cpu_idle",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "cpu_wio",
+              "metric_path": "metrics/cpu/cpu_wio",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "mem_total",
+              "metric_path": "metrics/memory/mem_total",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "mem_free",
+              "metric_path": "metrics/memory/mem_free",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "CPU utilization",
+              "value": "${((cpu_system + cpu_user + cpu_nice)/(cpu_system + cpu_user + cpu_nice + cpu_idle + cpu_wio)) * 100}"
+            },
+            {
+              "name": "Memory utilization",
+              "value": "${((mem_total - mem_free)/mem_total) * 100}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1",
+            "display_unit": "%"
+          }
+        },
+        {
+          "widget_name": "FAKEHNameNode RPC",
+          "description": "Compares the average time spent for RPC request in a queue and RPC request being processed.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "rpc.rpc.client.RpcQueueTimeAvgTime",
+              "metric_path": "metrics/rpc/client/RpcQueueTime_avg_time",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "rpc.rpc.client.RpcProcessingTimeAvgTime",
+              "metric_path": "metrics/rpc/client/RpcProcessingTime_avg_time",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "rpc.rpc.datanode.RpcQueueTimeAvgTime",
+              "metric_path": "metrics/rpc/datanode/RpcQueueTime_avg_time",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "rpc.rpc.datanode.RpcProcessingTimeAvgTime",
+              "metric_path": "metrics/rpc/datanode/RpcProcessingTime_avg_time",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "Client RPC Queue Wait time",
+              "value": "${rpc.rpc.client.RpcQueueTimeAvgTime}"
+            },
+            {
+              "name": "Client RPC Processing time",
+              "value": "${rpc.rpc.client.RpcProcessingTimeAvgTime}"
+            },
+            {
+              "name": "Datanode RPC Queue Wait time",
+              "value": "${rpc.rpc.datanode.RpcQueueTimeAvgTime}"
+            },
+            {
+              "name": "Datanode RPC Processing time",
+              "value": "${rpc.rpc.datanode.RpcProcessingTimeAvgTime}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1",
+            "display_unit": "ms"
+          }
+        },
+        {
+          "widget_name": "FAKEHNameNode Operations",
+          "description": "Rate per second of number of file operation over time.",
+          "widget_type": "GRAPH",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "dfs.namenode.TotalFileOps._rate",
+              "metric_path": "metrics/dfs/namenode/TotalFileOps._rate",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "FAKEHNameNode File Operations",
+              "value": "${dfs.namenode.TotalFileOps._rate}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Failed disk volumes",
+          "description": "Number of Failed disk volumes across all FAKEDataNodes. Its indicative of FAKEHDFS bad health.",
+          "widget_type": "NUMBER",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.NumFailedVolumes._sum",
+              "metric_path": "metrics/dfs/datanode/NumFailedVolumes",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKEDATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "Failed disk volumes",
+              "value": "${FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.NumFailedVolumes._sum}"
+            }
+          ],
+          "properties": {
+            "display_unit": ""
+          }
+        },
+        {
+          "widget_name": "Blocks With Corrupted Replicas",
+          "description": "Number represents data blocks with at least one corrupted replica (but not all of them). Its indicative of FAKEHDFS bad health.",
+          "widget_type": "NUMBER",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "Hadoop:service=FAKEHNameNode,name=FSNamesystem.CorruptBlocks",
+              "metric_path": "metrics/dfs/FSNamesystem/CorruptBlocks",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "Blocks With Corrupted Replicas",
+              "value": "${Hadoop:service=FAKEHNameNode,name=FSNamesystem.CorruptBlocks}"
+            }
+          ],
+          "properties": {
+            "warning_threshold": "0",
+            "error_threshold": "50"
+          }
+        },
+        {
+          "widget_name": "Under Replicated Blocks",
+          "description": "Number represents file blocks that does not meet the replication factor criteria. Its indicative of FAKEHDFS bad health.",
+          "widget_type": "NUMBER",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "Hadoop:service=FAKEHNameNode,name=FSNamesystem.UnderReplicatedBlocks",
+              "metric_path": "metrics/dfs/FSNamesystem/UnderReplicatedBlocks",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKENAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "Under Replicated Blocks",
+              "value": "${Hadoop:service=FAKEHNameNode,name=FSNamesystem.UnderReplicatedBlocks}"
+            }
+          ],
+          "properties": {
+            "warning_threshold": "0",
+            "error_threshold": "50"
+          }
+        },
+        {
+          "widget_name": "FAKEHDFS Space Utilization",
+          "description": "Percentage of available space used in the DFS.",
+          "widget_type": "GAUGE",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining",
+              "metric_path": "metrics/FSDatasetState/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl/Remaining",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKEDATANODE"
+            },
+            {
+              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity",
+              "metric_path": "metrics/dfs/datanode/Capacity",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKEDATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "FAKEHDFS Space Utilization",
+              "value": "${(FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity - FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining)/FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity}"
+            }
+          ],
+          "properties": {
+            "warning_threshold": "0.75",
+            "error_threshold": "0.9"
+          }
+        }
+      ]
+    },
+    {
+      "layout_name": "default_hdfs_heatmap",
+      "section_name": "FAKEHDFS_HEATMAPS",
+      "display_name": "FAKEHDFS Heatmaps",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "FAKEHDFS Bytes Read",
+          "default_section_name": "FAKEHDFS_HEATMAPS",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "dfs.datanode.BytesRead._rate",
+              "metric_path": "metrics/dfs/datanode/bytes_read._rate",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKEDATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "FAKEHDFS Bytes Read",
+              "value": "${dfs.datanode.BytesRead._rate}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "1024"
+          }
+        },
+        {
+          "widget_name": "FAKEHDFS Bytes Written",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "dfs.datanode.BytesWritten._rate",
+              "metric_path": "metrics/dfs/datanode/bytes_written._rate",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKEDATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "FAKEHDFS Bytes Written",
+              "value": "${dfs.datanode.BytesWritten._rate}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "1024"
+          }
+        },
+        {
+          "widget_name": "FAKEDataNode Garbage Collection Time",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=FAKEDataNode,name=JvmMetrics.GcTimeMillis",
+              "metric_path": "metrics/jvm/gcTimeMillis",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKEDATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "FAKEDataNode Garbage Collection Time",
+              "value": "${Hadoop:service=FAKEDataNode,name=JvmMetrics.GcTimeMillis}"
+            }
+          ],
+          "properties": {
+            "display_unit": "ms",
+            "max_limit": "10000"
+          }
+        },
+        {
+          "widget_name": "FAKEDataNode JVM Heap Memory Used",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=FAKEDataNode,name=JvmMetrics.MemHeapUsedM",
+              "metric_path": "metrics/jvm/memHeapUsedM",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKEDATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "FAKEDataNode JVM Heap Memory Used",
+              "value": "${Hadoop:service=FAKEDataNode,name=JvmMetrics.MemHeapUsedM}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "512"
+          }
+        },
+        {
+          "widget_name": "FAKEDataNode JVM Heap Memory Committed",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=FAKEDataNode,name=JvmMetrics.MemHeapCommittedM",
+              "metric_path": "metrics/jvm/memHeapCommittedM",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKEDATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "FAKEDataNode JVM Heap Memory Committed",
+              "value": "${Hadoop:service=FAKEDataNode,name=JvmMetrics.MemHeapCommittedM}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "512"
+          }
+        },
+        {
+          "widget_name": "FAKEDataNode Process Disk I/O Utilization",
+          "default_section_name": "FAKEHDFS_HEATMAPS",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "dfs.datanode.BytesRead._rate",
+              "metric_path": "metrics/dfs/datanode/bytes_read._rate",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKEDATANODE"
+            },
+            {
+              "name": "dfs.datanode.BytesWritten._rate",
+              "metric_path": "metrics/dfs/datanode/bytes_written._rate",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKEDATANODE"
+            },
+            {
+              "name": "dfs.datanode.TotalReadTime._rate",
+              "metric_path": "metrics/dfs/datanode/TotalReadTime._rate",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKEDATANODE"
+            },
+            {
+              "name": "dfs.datanode.TotalWriteTime._rate",
+              "metric_path": "metrics/dfs/datanode/TotalWriteTime._rate",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKEDATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "FAKEDataNode Process Disk I/O Utilization",
+              "value": "${((dfs.datanode.BytesRead._rate/dfs.datanode.TotalReadTime._rate)+(dfs.datanode.BytesWritten._rate/dfs.datanode.TotalWriteTime._rate))*50}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "FAKEDataNode Process Network I/O Utilization",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "dfs.datanode.RemoteBytesRead._rate",
+              "metric_path": "metrics/dfs/datanode/RemoteBytesRead._rate",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKEDATANODE"
+            },
+            {
+              "name": "dfs.datanode.ReadsFromRemoteClient._rate",
+              "metric_path": "metrics/dfs/datanode/reads_from_remote_client._rate",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKEDATANODE"
+            },
+            {
+              "name": "dfs.datanode.RemoteBytesWritten._rate",
+              "metric_path": "metrics/dfs/datanode/RemoteBytesWritten._rate",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKEDATANODE"
+            },
+            {
+              "name": "dfs.datanode.WritesFromRemoteClient._rate",
+              "metric_path": "metrics/dfs/datanode/writes_from_remote_client._rate",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKEDATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "FAKEDataNode Process Network I/O Utilization",
+              "value": "${((dfs.datanode.RemoteBytesRead._rate/dfs.datanode.ReadsFromRemoteClient._rate)+(dfs.datanode.RemoteBytesWritten._rate/dfs.datanode.WritesFromRemoteClient._rate))*50}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "FAKEHDFS Space Utilization",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining",
+              "metric_path": "metrics/FSDatasetState/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl/Remaining",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKEDATANODE"
+            },
+            {
+              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity",
+              "metric_path": "metrics/dfs/datanode/Capacity",
+              "service_name": "FAKEHDFS",
+              "component_name": "FAKEDATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "FAKEHDFS Space Utilization",
+              "value": "${((FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity - FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining)/FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        }
+      ]
+    }
+  ]
+}