You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by al...@apache.org on 2016/12/08 23:10:38 UTC

[03/20] ambari git commit: AMBARI-19137. HDP 3.0 TP - move ZK, HFDS, YARN/MR into new common-services version (alejandro)

http://git-wip-us.apache.org/repos/asf/ambari/blob/3bf5d32d/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/widgets.json b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/widgets.json
deleted file mode 100644
index 4a645b0..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/widgets.json
+++ /dev/null
@@ -1,649 +0,0 @@
-{
-  "layouts": [
-    {
-      "layout_name": "default_hdfs_dashboard",
-      "display_name": "Standard HDFS Dashboard",
-      "section_name": "HDFS_SUMMARY",
-      "widgetLayoutInfo": [
-        {
-          "widget_name": "NameNode GC count",
-          "description": "Count of total garbage collections and count of major type garbage collections of the JVM.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "jvm.JvmMetrics.GcCount._rate",
-              "metric_path": "metrics/jvm/gcCount._rate",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "jvm.JvmMetrics.GcCountConcurrentMarkSweep._rate",
-              "metric_path": "metrics/jvm/GcCountConcurrentMarkSweep._rate",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            }
-          ],
-          "values": [
-            {
-              "name": "GC total count",
-              "value": "${jvm.JvmMetrics.GcCount._rate}"
-            },
-            {
-              "name": "GC count of type major collection",
-              "value": "${jvm.JvmMetrics.GcCountConcurrentMarkSweep._rate}"
-            }
-          ],
-          "properties": {
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "NameNode GC time",
-          "description": "Total time taken by major type garbage collections in milliseconds.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep._rate",
-              "metric_path": "metrics/jvm/GcTimeMillisConcurrentMarkSweep._rate",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            }
-          ],
-          "values": [
-            {
-              "name": "GC time in major collection",
-              "value": "${jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep._rate}"
-            }
-          ],
-          "properties": {
-            "display_unit": "ms",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "NN Connection Load",
-          "description": "Number of open RPC connections being managed by NameNode.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "rpc.rpc.client.NumOpenConnections",
-              "metric_path": "metrics/rpc/client/NumOpenConnections",
-              "category": "",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "rpc.rpc.datanode.NumOpenConnections",
-              "metric_path": "metrics/rpc/datanode/NumOpenConnections",
-              "category": "",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            }
-          ],
-          "values": [
-            {
-              "name": "Open Client Connections",
-              "value": "${rpc.rpc.client.NumOpenConnections}"
-            },
-            {
-              "name": "Open Datanode Connections",
-              "value": "${rpc.rpc.datanode.NumOpenConnections}"
-            }
-          ],
-          "properties": {
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "NameNode Heap",
-          "description": "Heap memory committed and Heap memory used with respect to time.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "jvm.JvmMetrics.MemHeapCommittedM",
-              "metric_path": "metrics/jvm/memHeapCommittedM",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "jvm.JvmMetrics.MemHeapUsedM",
-              "metric_path": "metrics/jvm/memHeapUsedM",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            }
-          ],
-          "values": [
-            {
-              "name": "JVM heap committed",
-              "value": "${jvm.JvmMetrics.MemHeapCommittedM}"
-            },
-            {
-              "name": "JVM heap used",
-              "value": "${jvm.JvmMetrics.MemHeapUsedM}"
-            }
-          ],
-          "properties": {
-            "display_unit": "MB",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "NameNode Host Load",
-          "description": "Percentage of CPU and Memory resources being consumed on NameNode host.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "cpu_system",
-              "metric_path": "metrics/cpu/cpu_system",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "cpu_user",
-              "metric_path": "metrics/cpu/cpu_user",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "cpu_nice",
-              "metric_path": "metrics/cpu/cpu_nice",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "cpu_idle",
-              "metric_path": "metrics/cpu/cpu_idle",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "cpu_wio",
-              "metric_path": "metrics/cpu/cpu_wio",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "mem_total",
-              "metric_path": "metrics/memory/mem_total",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "mem_free",
-              "metric_path": "metrics/memory/mem_free",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            }
-          ],
-          "values": [
-            {
-              "name": "CPU utilization",
-              "value": "${((cpu_system + cpu_user + cpu_nice)/(cpu_system + cpu_user + cpu_nice + cpu_idle + cpu_wio)) * 100}"
-            },
-            {
-              "name": "Memory utilization",
-              "value": "${((mem_total - mem_free)/mem_total) * 100}"
-            }
-          ],
-          "properties": {
-            "graph_type": "LINE",
-            "time_range": "1",
-            "display_unit": "%"
-          }
-        },
-        {
-          "widget_name": "NameNode RPC",
-          "description": "Compares the average time spent for RPC request in a queue and RPC request being processed.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "rpc.rpc.client.RpcQueueTimeAvgTime",
-              "metric_path": "metrics/rpc/client/RpcQueueTime_avg_time",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "rpc.rpc.client.RpcProcessingTimeAvgTime",
-              "metric_path": "metrics/rpc/client/RpcProcessingTime_avg_time",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "rpc.rpc.datanode.RpcQueueTimeAvgTime",
-              "metric_path": "metrics/rpc/datanode/RpcQueueTime_avg_time",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "rpc.rpc.datanode.RpcProcessingTimeAvgTime",
-              "metric_path": "metrics/rpc/datanode/RpcProcessingTime_avg_time",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            }
-          ],
-          "values": [
-            {
-              "name": "Client RPC Queue Wait time",
-              "value": "${rpc.rpc.client.RpcQueueTimeAvgTime}"
-            },
-            {
-              "name": "Client RPC Processing time",
-              "value": "${rpc.rpc.client.RpcProcessingTimeAvgTime}"
-            },
-            {
-              "name": "Datanode RPC Queue Wait time",
-              "value": "${rpc.rpc.datanode.RpcQueueTimeAvgTime}"
-            },
-            {
-              "name": "Datanode RPC Processing time",
-              "value": "${rpc.rpc.datanode.RpcProcessingTimeAvgTime}"
-            }
-          ],
-          "properties": {
-            "graph_type": "LINE",
-            "time_range": "1",
-            "display_unit": "ms"
-          }
-        },
-        {
-          "widget_name": "NameNode Operations",
-          "description": "Rate per second of number of file operation over time.",
-          "widget_type": "GRAPH",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "dfs.namenode.TotalFileOps._rate",
-              "metric_path": "metrics/dfs/namenode/TotalFileOps._rate",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            }
-          ],
-          "values": [
-            {
-              "name": "NameNode File Operations",
-              "value": "${dfs.namenode.TotalFileOps._rate}"
-            }
-          ],
-          "properties": {
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Failed disk volumes",
-          "description": "Number of Failed disk volumes across all DataNodes. Its indicative of HDFS bad health.",
-          "widget_type": "NUMBER",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.NumFailedVolumes._sum",
-              "metric_path": "metrics/dfs/datanode/NumFailedVolumes",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "Failed disk volumes",
-              "value": "${FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.NumFailedVolumes._sum}"
-            }
-          ],
-          "properties": {
-            "display_unit": ""
-          }
-        },
-        {
-          "widget_name": "Blocks With Corrupted Replicas",
-          "description": "Number represents data blocks with at least one corrupted replica (but not all of them). Its indicative of HDFS bad health.",
-          "widget_type": "NUMBER",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "Hadoop:service=NameNode,name=FSNamesystem.CorruptBlocks",
-              "metric_path": "metrics/dfs/FSNamesystem/CorruptBlocks",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            }
-          ],
-          "values": [
-            {
-              "name": "Blocks With Corrupted Replicas",
-              "value": "${Hadoop:service=NameNode,name=FSNamesystem.CorruptBlocks}"
-            }
-          ],
-          "properties": {
-            "warning_threshold": "0",
-            "error_threshold": "50"
-          }
-        },
-        {
-          "widget_name": "Under Replicated Blocks",
-          "description": "Number represents file blocks that does not meet the replication factor criteria. Its indicative of HDFS bad health.",
-          "widget_type": "NUMBER",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "Hadoop:service=NameNode,name=FSNamesystem.UnderReplicatedBlocks",
-              "metric_path": "metrics/dfs/FSNamesystem/UnderReplicatedBlocks",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            }
-          ],
-          "values": [
-            {
-              "name": "Under Replicated Blocks",
-              "value": "${Hadoop:service=NameNode,name=FSNamesystem.UnderReplicatedBlocks}"
-            }
-          ],
-          "properties": {
-            "warning_threshold": "0",
-            "error_threshold": "50"
-          }
-        },
-        {
-          "widget_name": "HDFS Space Utilization",
-          "description": "Percentage of available space used in the DFS.",
-          "widget_type": "GAUGE",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining",
-              "metric_path": "metrics/FSDatasetState/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl/Remaining",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            },
-            {
-              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity",
-              "metric_path": "metrics/dfs/datanode/Capacity",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "HDFS Space Utilization",
-              "value": "${(FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity - FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining)/FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity}"
-            }
-          ],
-          "properties": {
-            "warning_threshold": "0.75",
-            "error_threshold": "0.9"
-          }
-        }
-      ]
-    },
-    {
-      "layout_name": "default_hdfs_heatmap",
-      "section_name": "HDFS_HEATMAPS",
-      "display_name": "HDFS Heatmaps",
-      "widgetLayoutInfo": [
-        {
-          "widget_name": "HDFS Bytes Read",
-          "default_section_name": "HDFS_HEATMAPS",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "dfs.datanode.BytesRead._rate",
-              "metric_path": "metrics/dfs/datanode/bytes_read._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "HDFS Bytes Read",
-              "value": "${dfs.datanode.BytesRead._rate}"
-            }
-          ],
-          "properties": {
-            "display_unit": "MB",
-            "max_limit": "1024"
-          }
-        },
-        {
-          "widget_name": "HDFS Bytes Written",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "dfs.datanode.BytesWritten._rate",
-              "metric_path": "metrics/dfs/datanode/bytes_written._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "HDFS Bytes Written",
-              "value": "${dfs.datanode.BytesWritten._rate}"
-            }
-          ],
-          "properties": {
-            "display_unit": "MB",
-            "max_limit": "1024"
-          }
-        },
-        {
-          "widget_name": "DataNode Garbage Collection Time",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "Hadoop:service=DataNode,name=JvmMetrics.GcTimeMillis",
-              "metric_path": "metrics/jvm/gcTimeMillis",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "DataNode Garbage Collection Time",
-              "value": "${Hadoop:service=DataNode,name=JvmMetrics.GcTimeMillis}"
-            }
-          ],
-          "properties": {
-            "display_unit": "ms",
-            "max_limit": "10000"
-          }
-        },
-        {
-          "widget_name": "DataNode JVM Heap Memory Used",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "Hadoop:service=DataNode,name=JvmMetrics.MemHeapUsedM",
-              "metric_path": "metrics/jvm/memHeapUsedM",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "DataNode JVM Heap Memory Used",
-              "value": "${Hadoop:service=DataNode,name=JvmMetrics.MemHeapUsedM}"
-            }
-          ],
-          "properties": {
-            "display_unit": "MB",
-            "max_limit": "512"
-          }
-        },
-        {
-          "widget_name": "DataNode JVM Heap Memory Committed",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "Hadoop:service=DataNode,name=JvmMetrics.MemHeapCommittedM",
-              "metric_path": "metrics/jvm/memHeapCommittedM",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "DataNode JVM Heap Memory Committed",
-              "value": "${Hadoop:service=DataNode,name=JvmMetrics.MemHeapCommittedM}"
-            }
-          ],
-          "properties": {
-            "display_unit": "MB",
-            "max_limit": "512"
-          }
-        },
-        {
-          "widget_name": "DataNode Process Disk I/O Utilization",
-          "default_section_name": "HDFS_HEATMAPS",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "dfs.datanode.BytesRead._rate",
-              "metric_path": "metrics/dfs/datanode/bytes_read._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            },
-            {
-              "name": "dfs.datanode.BytesWritten._rate",
-              "metric_path": "metrics/dfs/datanode/bytes_written._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            },
-            {
-              "name": "dfs.datanode.TotalReadTime._rate",
-              "metric_path": "metrics/dfs/datanode/TotalReadTime._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            },
-            {
-              "name": "dfs.datanode.TotalWriteTime._rate",
-              "metric_path": "metrics/dfs/datanode/TotalWriteTime._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "DataNode Process Disk I/O Utilization",
-              "value": "${((dfs.datanode.BytesRead._rate/dfs.datanode.TotalReadTime._rate)+(dfs.datanode.BytesWritten._rate/dfs.datanode.TotalWriteTime._rate))*50}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "DataNode Process Network I/O Utilization",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "dfs.datanode.RemoteBytesRead._rate",
-              "metric_path": "metrics/dfs/datanode/RemoteBytesRead._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            },
-            {
-              "name": "dfs.datanode.ReadsFromRemoteClient._rate",
-              "metric_path": "metrics/dfs/datanode/reads_from_remote_client._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            },
-            {
-              "name": "dfs.datanode.RemoteBytesWritten._rate",
-              "metric_path": "metrics/dfs/datanode/RemoteBytesWritten._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            },
-            {
-              "name": "dfs.datanode.WritesFromRemoteClient._rate",
-              "metric_path": "metrics/dfs/datanode/writes_from_remote_client._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "DataNode Process Network I/O Utilization",
-              "value": "${((dfs.datanode.RemoteBytesRead._rate/dfs.datanode.ReadsFromRemoteClient._rate)+(dfs.datanode.RemoteBytesWritten._rate/dfs.datanode.WritesFromRemoteClient._rate))*50}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "HDFS Space Utilization",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining",
-              "metric_path": "metrics/FSDatasetState/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl/Remaining",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            },
-            {
-              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity",
-              "metric_path": "metrics/dfs/datanode/Capacity",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "HDFS Space Utilization",
-              "value": "${((FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity - FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining)/FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        }
-      ]
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3bf5d32d/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/kerberos.json
deleted file mode 100644
index 9000e95..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/kerberos.json
+++ /dev/null
@@ -1,246 +0,0 @@
-{
-  "services": [
-    {
-      "name": "HDFS",
-      "identities": [
-        {
-          "name": "/spnego",
-          "principal": {
-            "configuration": "hdfs-site/dfs.web.authentication.kerberos.principal"
-          },
-          "keytab": {
-            "configuration": "hdfs-site/dfs.web.authentication.kerberos.keytab"
-          }
-        },
-        {
-          "name": "/smokeuser"
-        }
-      ],
-      "auth_to_local_properties" : [
-        "core-site/hadoop.security.auth_to_local"
-      ],
-      "configurations": [
-        {
-          "core-site": {
-            "hadoop.security.authentication": "kerberos",
-            "hadoop.security.authorization": "true",
-            "hadoop.proxyuser.HTTP.groups": "${hadoop-env/proxyuser_group}"
-          }
-        },
-        {
-          "ranger-hdfs-audit": {
-            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
-            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
-            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
-            "xasecure.audit.jaas.Client.option.storeKey": "false",
-            "xasecure.audit.jaas.Client.option.serviceName": "solr",
-            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
-          }
-        }
-      ],
-      "components": [
-        {
-          "name":  "HDFS_CLIENT",
-          "identities": [
-            {
-              "name": "/HDFS/NAMENODE/hdfs"
-            }
-          ]
-        },
-        {
-          "name": "NAMENODE",
-          "identities": [
-            {
-              "name": "hdfs",
-              "principal": {
-                "value": "${hadoop-env/hdfs_user}-${cluster_name|toLower()}@${realm}",
-                "type" : "user" ,
-                "configuration": "hadoop-env/hdfs_principal_name",
-                "local_username" : "${hadoop-env/hdfs_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/hdfs.headless.keytab",
-                "owner": {
-                  "name": "${hadoop-env/hdfs_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "hadoop-env/hdfs_user_keytab"
-              }
-            },
-            {
-              "name": "namenode_nn",
-              "principal": {
-                "value": "nn/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "hdfs-site/dfs.namenode.kerberos.principal",
-                "local_username" : "${hadoop-env/hdfs_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/nn.service.keytab",
-                "owner": {
-                  "name": "${hadoop-env/hdfs_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "hdfs-site/dfs.namenode.keytab.file"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "hdfs-site/dfs.namenode.kerberos.internal.spnego.principal"
-              }
-            },
-            {
-              "name": "/HDFS/NAMENODE/namenode_nn",
-              "principal": {
-                "configuration": "ranger-hdfs-audit/xasecure.audit.jaas.Client.option.principal"                
-              },
-              "keytab": {
-                "configuration": "ranger-hdfs-audit/xasecure.audit.jaas.Client.option.keyTab"
-              }
-            }
-          ],
-          "configurations": [
-            {
-              "hdfs-site": {
-                "dfs.block.access.token.enable": "true"
-              }
-            }
-          ]
-        },
-        {
-          "name": "DATANODE",
-          "identities": [
-            {
-              "name": "datanode_dn",
-              "principal": {
-                "value": "dn/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "hdfs-site/dfs.datanode.kerberos.principal",
-                "local_username" : "${hadoop-env/hdfs_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/dn.service.keytab",
-                "owner": {
-                  "name": "${hadoop-env/hdfs_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "hdfs-site/dfs.datanode.keytab.file"
-              }
-            }
-          ],
-          "configurations" : [
-            {
-              "hdfs-site" : {
-                "dfs.datanode.address" : "0.0.0.0:1019",
-                "dfs.datanode.http.address": "0.0.0.0:1022"
-              }
-            }
-          ]
-        },
-        {
-          "name": "SECONDARY_NAMENODE",
-          "identities": [
-            {
-              "name": "secondary_namenode_nn",
-              "principal": {
-                "value": "nn/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "hdfs-site/dfs.secondary.namenode.kerberos.principal",
-                "local_username" : "${hadoop-env/hdfs_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/nn.service.keytab",
-                "owner": {
-                  "name": "${hadoop-env/hdfs_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "hdfs-site/dfs.secondary.namenode.keytab.file"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "hdfs-site/dfs.secondary.namenode.kerberos.internal.spnego.principal"
-              }
-            }
-          ]
-        },
-        {
-          "name": "NFS_GATEWAY",
-          "identities": [
-            {
-              "name": "nfsgateway",
-              "principal": {
-                "value": "nfs/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "hdfs-site/nfs.kerberos.principal",
-                "local_username" : "${hadoop-env/hdfs_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/nfs.service.keytab",
-                "owner": {
-                  "name": "${hadoop-env/hdfs_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "hdfs-site/nfs.keytab.file"
-              }
-            }
-          ]
-        },
-        {
-          "name": "JOURNALNODE",
-          "identities": [
-            {
-              "name": "journalnode_jn",
-              "principal": {
-                "value": "jn/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "hdfs-site/dfs.journalnode.kerberos.principal",
-                "local_username" : "${hadoop-env/hdfs_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/jn.service.keytab",
-                "owner": {
-                  "name": "${hadoop-env/hdfs_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "hdfs-site/dfs.journalnode.keytab.file"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "hdfs-site/dfs.journalnode.kerberos.internal.spnego.principal"
-              }
-            }
-          ]
-        }
-      ]
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3bf5d32d/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/metainfo.xml
index 30c49c7..ef2027f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/metainfo.xml
@@ -21,39 +21,8 @@
     <service>
       <name>HDFS</name>
       <displayName>HDFS</displayName>
-      <comment>Apache Hadoop Distributed File System</comment>
-      <version>2.7.1.3.0</version>
-      <extends>common-services/HDFS/2.1.0.2.0</extends>
-
-      <components>
-        <!-- NFS Gateway was added in HDP 2.3. -->
-        <component>
-          <name>NFS_GATEWAY</name>
-          <displayName>NFSGateway</displayName>
-          <cardinality>0+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <category>SLAVE</category>
-          <commandScript>
-            <script>scripts/nfsgateway.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>1200</timeout>
-          </commandScript>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-        </component>
-
-        <component>
-          <name>ZKFC</name>
-          <versionAdvertised>true</versionAdvertised>
-        </component>
-      </components>
+      <version>3.0.0.3.0</version>
+      <extends>common-services/HDFS/3.0.0</extends>
 
       <osSpecifics>
         <osSpecific>
@@ -171,20 +140,6 @@
         </osSpecific>
       </osSpecifics>
 
-      <quickLinksConfigurations>
-        <quickLinksConfiguration>
-          <fileName>quicklinks.json</fileName>
-          <default>true</default>
-        </quickLinksConfiguration>
-      </quickLinksConfigurations>
-
-      <themes>
-        <theme>
-          <fileName>theme.json</fileName>
-          <default>true</default>
-        </theme>
-      </themes>
-
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3bf5d32d/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/quicklinks/quicklinks.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/quicklinks/quicklinks.json b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/quicklinks/quicklinks.json
deleted file mode 100644
index 5318ba0..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/quicklinks/quicklinks.json
+++ /dev/null
@@ -1,80 +0,0 @@
-{
-  "name": "default",
-  "description": "default quick links configuration",
-  "configuration": {
-    "protocol":
-    {
-      "type":"https",
-      "checks":[
-        {
-          "property":"dfs.http.policy",
-          "desired":"HTTPS_ONLY",
-          "site":"hdfs-site"
-        }
-      ]
-    },
-
-    "links": [
-      {
-        "name": "namenode_ui",
-        "label": "NameNode UI",
-        "component_name": "NAMENODE",
-        "url":"%@://%@:%@",
-        "requires_user_name": "false",
-        "port":{
-          "http_property": "dfs.namenode.http-address",
-          "http_default_port": "50070",
-          "https_property": "dfs.namenode.https-address",
-          "https_default_port": "50470",
-          "regex": "\\w*:(\\d+)",
-          "site": "hdfs-site"
-        }
-      },
-      {
-        "name": "namenode_logs",
-        "label": "NameNode Logs",
-        "component_name": "NAMENODE",
-        "url":"%@://%@:%@/logs",
-        "requires_user_name": "false",
-        "port":{
-          "http_property": "dfs.namenode.http-address",
-          "http_default_port": "50070",
-          "https_property": "dfs.namenode.https-address",
-          "https_default_port": "50470",
-          "regex": "\\w*:(\\d+)",
-          "site": "hdfs-site"
-        }
-      },
-      {
-        "name": "namenode_jmx",
-        "label": "NameNode JMX",
-        "component_name": "NAMENODE",
-        "url":"%@://%@:%@/jmx",
-        "requires_user_name": "false",
-        "port":{
-          "http_property": "dfs.namenode.http-address",
-          "http_default_port": "50070",
-          "https_property": "dfs.namenode.https-address",
-          "https_default_port": "50470",
-          "regex": "\\w*:(\\d+)",
-          "site": "hdfs-site"
-        }
-      },
-      {
-        "name": "Thread Stacks",
-        "label": "Thread Stacks",
-        "component_name": "NAMENODE",
-        "url":"%@://%@:%@/stacks",
-        "requires_user_name": "false",
-        "port":{
-          "http_property": "dfs.namenode.http-address",
-          "http_default_port": "50070",
-          "https_property": "dfs.namenode.https-address",
-          "https_default_port": "50470",
-          "regex": "\\w*:(\\d+)",
-          "site": "hdfs-site"
-        }
-      }
-    ]
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3bf5d32d/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/themes/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/themes/theme.json b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/themes/theme.json
deleted file mode 100644
index 6f2b797..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/themes/theme.json
+++ /dev/null
@@ -1,179 +0,0 @@
-{
-  "name": "default",
-  "description": "Default theme for HDFS service",
-  "configuration": {
-    "layouts": [
-      {
-        "name": "default",
-        "tabs": [
-          {
-            "name": "settings",
-            "display-name": "Settings",
-            "layout": {
-              "tab-columns": "2",
-              "tab-rows": "1",
-              "sections": [
-                {
-                  "name": "section-namenode",
-                  "display-name": "NameNode",
-                  "row-index": "0",
-                  "column-index": "0",
-                  "row-span": "1",
-                  "column-span": "1",
-                  "section-columns": "1",
-                  "section-rows": "1",
-                  "subsections": [
-                    {
-                      "name": "subsection-namenode-col1",
-                      "row-index": "0",
-                      "column-index": "0",
-                      "row-span": "1",
-                      "column-span": "1"
-                    }
-                  ]
-                },
-                {
-                  "name": "section-datanode",
-                  "display-name": "DataNode",
-                  "row-index": "0",
-                  "column-index": "1",
-                  "row-span": "1",
-                  "column-span": "1",
-                  "section-columns": "1",
-                  "section-rows": "1",
-                  "subsections": [
-                    {
-                      "name": "subsection-datanode-col1",
-                      "row-index": "0",
-                      "column-index": "0",
-                      "row-span": "1",
-                      "column-span": "1"
-                    }
-                  ]
-                }
-              ]
-            }
-          }
-        ]
-      }
-    ],
-    "placement": {
-      "configuration-layout": "default",
-      "configs": [
-        {
-          "config": "hdfs-site/dfs.namenode.name.dir",
-          "subsection-name": "subsection-namenode-col1"
-        },
-        {
-          "config": "hadoop-env/namenode_heapsize",
-          "subsection-name": "subsection-namenode-col1"
-        },
-        {
-          "config": "hdfs-site/dfs.namenode.handler.count",
-          "subsection-name": "subsection-namenode-col1"
-        },
-        {
-          "config": "hdfs-site/dfs.namenode.safemode.threshold-pct",
-          "subsection-name": "subsection-namenode-col1"
-        },
-        {
-          "config": "hdfs-site/dfs.datanode.data.dir",
-          "subsection-name": "subsection-datanode-col1"
-        },
-        {
-          "config": "hdfs-site/dfs.datanode.failed.volumes.tolerated",
-          "subsection-name": "subsection-datanode-col1"
-        },
-        {
-          "config": "hadoop-env/dtnode_heapsize",
-          "subsection-name": "subsection-datanode-col1"
-        },
-        {
-          "config": "hdfs-site/dfs.datanode.max.transfer.threads",
-          "subsection-name": "subsection-datanode-col1"
-        }
-      ]
-    },
-    "widgets": [
-      {
-        "config": "hdfs-site/dfs.namenode.name.dir",
-        "widget": {
-          "type": "directories"
-        }
-      },
-      {
-        "config": "hdfs-site/dfs.namenode.safemode.threshold-pct",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "percent"
-            }
-          ]
-        }
-      },
-      {
-        "config": "hdfs-site/dfs.namenode.handler.count",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "int"
-            }
-          ]
-        }
-      },
-      {
-        "config": "hadoop-env/namenode_heapsize",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "GB"
-            }
-          ]
-        }
-      },
-      {
-        "config": "hdfs-site/dfs.datanode.failed.volumes.tolerated",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "int"
-            }
-          ]
-        }
-      },
-      {
-        "config": "hdfs-site/dfs.datanode.data.dir",
-        "widget": {
-          "type": "directories"
-        }
-      },
-      {
-        "config": "hadoop-env/dtnode_heapsize",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "GB"
-            }
-          ]
-        }
-      },
-      {
-        "config": "hdfs-site/dfs.datanode.max.transfer.threads",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "int"
-            }
-          ]
-        }
-      }
-    ]
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/3bf5d32d/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/YARN_widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/YARN_widgets.json b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/YARN_widgets.json
deleted file mode 100644
index 782f21d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/YARN_widgets.json
+++ /dev/null
@@ -1,670 +0,0 @@
-{
-  "layouts": [
-    {
-      "layout_name": "default_yarn_dashboard",
-      "display_name": "Standard YARN Dashboard",
-      "section_name": "YARN_SUMMARY",
-      "widgetLayoutInfo": [
-        {
-          "widget_name": "Memory Utilization",
-          "description": "Percentage of total memory allocated to containers running in the cluster.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AllocatedMB",
-              "metric_path": "metrics/yarn/Queue/root/AllocatedMB",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            },
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AvailableMB",
-              "metric_path": "metrics/yarn/Queue/root/AvailableMB",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            }
-          ],
-          "values": [
-            {
-              "name": "Memory Utilization",
-              "value": "${(yarn.QueueMetrics.Queue=root.AllocatedMB / (yarn.QueueMetrics.Queue=root.AllocatedMB + yarn.QueueMetrics.Queue=root.AvailableMB)) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "CPU Utilization",
-          "description": "Percentage of total virtual cores allocated to containers running in the cluster.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AllocatedVCores",
-              "metric_path": "metrics/yarn/Queue/root/AllocatedVCores",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            },
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AvailableVCores",
-              "metric_path": "metrics/yarn/Queue/root/AvailableVCores",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            }
-          ],
-          "values": [
-            {
-              "name": "Total Allocatable CPU Utilized across NodeManager",
-              "value": "${(yarn.QueueMetrics.Queue=root.AllocatedVCores / (yarn.QueueMetrics.Queue=root.AllocatedVCores + yarn.QueueMetrics.Queue=root.AvailableVCores)) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Bad Local Disks",
-          "description": "Number of unhealthy local disks across all NodeManagers.",
-          "widget_type": "NUMBER",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.BadLocalDirs",
-              "metric_path": "metrics/yarn/BadLocalDirs",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.BadLogDirs",
-              "metric_path": "metrics/yarn/BadLogDirs",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Number of unhealthy local disks for NodeManager",
-              "value": "${yarn.NodeManagerMetrics.BadLocalDirs + yarn.NodeManagerMetrics.BadLogDirs}"
-            }
-          ],
-          "properties": {
-            "display_unit": ""
-          }
-        },
-        {
-          "widget_name": "Container Failures",
-          "description": "Percentage of all containers failing in the cluster.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersFailed._rate",
-              "metric_path": "metrics/yarn/ContainersFailed._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersCompleted._rate",
-              "metric_path": "metrics/yarn/ContainersCompleted._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersLaunched._rate",
-              "metric_path": "metrics/yarn/ContainersLaunched._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersIniting._sum",
-              "metric_path": "metrics/yarn/ContainersIniting._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersKilled._rate",
-              "metric_path": "metrics/yarn/ContainersKilled._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersRunning._sum",
-              "metric_path": "metrics/yarn/ContainersRunning._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Container Failures",
-              "value": "${(yarn.NodeManagerMetrics.ContainersFailed._rate/(yarn.NodeManagerMetrics.ContainersFailed._rate + yarn.NodeManagerMetrics.ContainersCompleted._rate + yarn.NodeManagerMetrics.ContainersLaunched._rate + yarn.NodeManagerMetrics.ContainersIniting._sum + yarn.NodeManagerMetrics.ContainersKilled._rate + yarn.NodeManagerMetrics.ContainersRunning._sum)) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "App Failures",
-          "description": "Percentage of all launched applications failing in the cluster.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AppsFailed._rate",
-              "metric_path": "metrics/yarn/Queue/root/AppsFailed._rate",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            },
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AppsKilled._rate",
-              "metric_path": "metrics/yarn/Queue/root/AppsKilled._rate",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            },
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AppsPending",
-              "metric_path": "metrics/yarn/Queue/root/AppsPending",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            },
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AppsRunning",
-              "metric_path": "metrics/yarn/Queue/root/AppsRunning",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            },
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AppsSubmitted._rate",
-              "metric_path": "metrics/yarn/Queue/root/AppsSubmitted._rate",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            },
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AppsCompleted._rate",
-              "metric_path": "metrics/yarn/Queue/root/AppsCompleted._rate",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            }
-          ],
-          "values": [
-            {
-              "name": "App Failures",
-              "value": "${(yarn.QueueMetrics.Queue=root.AppsFailed._rate/(yarn.QueueMetrics.Queue=root.AppsFailed._rate + yarn.QueueMetrics.Queue=root.AppsKilled._rate + yarn.QueueMetrics.Queue=root.AppsPending + yarn.QueueMetrics.Queue=root.AppsRunning + yarn.QueueMetrics.Queue=root.AppsSubmitted._rate + yarn.QueueMetrics.Queue=root.AppsCompleted._rate)) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Pending Apps",
-          "description": "Count of applications waiting for cluster resources to become available.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AppsPending",
-              "metric_path": "metrics/yarn/Queue/root/AppsPending",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            }
-          ],
-          "values": [
-            {
-              "name": "Pending Apps",
-              "value": "${yarn.QueueMetrics.Queue=root.AppsPending}"
-            }
-          ],
-          "properties": {
-            "display_unit": "Apps",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Cluster Memory",
-          "description": "Percentage of memory used across all NodeManager hosts.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "mem_total._sum",
-              "metric_path": "metrics/memory/mem_total._avg",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "mem_free._sum",
-              "metric_path": "metrics/memory/mem_free._avg",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Memory utilization",
-              "value": "${((mem_total._sum - mem_free._sum)/mem_total._sum) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Cluster Disk",
-          "description": "Sum of disk throughput for all NodeManager hosts.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "read_bps._sum",
-              "metric_path": "metrics/disk/read_bps._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "write_bps._sum",
-              "metric_path": "metrics/disk/write_bps._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Read throughput",
-              "value": "${read_bps._sum/1048576}"
-            },
-            {
-              "name": "Write throughput",
-              "value": "${write_bps._sum/1048576}"
-            }
-          ],
-          "properties": {
-            "display_unit": "Mbps",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Cluster Network",
-          "description": "Average of Network utilized across all NodeManager hosts.",
-          "default_section_name": "YARN_SUMMARY",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "pkts_in._avg",
-              "metric_path": "metrics/network/pkts_in._avg",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "pkts_out._avg",
-              "metric_path": "metrics/network/pkts_out._avg",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Packets In",
-              "value": "${pkts_in._avg}"
-            },
-            {
-              "name": "Packets Out",
-              "value": "${pkts_out._avg}"
-            }
-          ],
-          "properties": {
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Cluster CPU",
-          "description": "Percentage of CPU utilized across all NodeManager hosts.",
-          "default_section_name": "YARN_SUMMARY",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "cpu_system._sum",
-              "metric_path": "metrics/cpu/cpu_system._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "cpu_user._sum",
-              "metric_path": "metrics/cpu/cpu_user._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "cpu_nice._sum",
-              "metric_path": "metrics/cpu/cpu_nice._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "cpu_idle._sum",
-              "metric_path": "metrics/cpu/cpu_idle._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "cpu_wio._sum",
-              "metric_path": "metrics/cpu/cpu_wio._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "CPU utilization",
-              "value": "${((cpu_system._sum + cpu_user._sum + cpu_nice._sum)/(cpu_system._sum + cpu_user._sum + cpu_nice._sum + cpu_idle._sum + cpu_wio._sum)) * 100}"
-            }
-          ],
-          "properties": {
-            "graph_type": "LINE",
-            "time_range": "1",
-            "display_unit": "%"
-          }
-        }
-      ]
-    },
-    {
-      "layout_name": "default_yarn_heatmap",
-      "display_name": "YARN Heatmaps",
-      "section_name": "YARN_HEATMAPS",
-      "widgetLayoutInfo": [
-        {
-          "widget_name": "YARN local disk space utilization per NodeManager",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.GoodLocalDirsDiskUtilizationPerc",
-              "metric_path": "metrics/yarn/GoodLocalDirsDiskUtilizationPerc",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.GoodLogDirsDiskUtilizationPerc",
-              "metric_path": "metrics/yarn/GoodLogDirsDiskUtilizationPerc",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "YARN local disk space utilization per NodeManager",
-              "value": "${(yarn.NodeManagerMetrics.GoodLocalDirsDiskUtilizationPerc + yarn.NodeManagerMetrics.GoodLogDirsDiskUtilizationPerc)/2}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "Total Allocatable RAM Utilized per NodeManager",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.AllocatedGB",
-              "metric_path": "metrics/yarn/AllocatedGB",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.AvailableGB",
-              "metric_path": "metrics/yarn/AvailableGB",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Total Allocatable RAM Utilized per NodeManager",
-              "value": "${(yarn.NodeManagerMetrics.AllocatedGB/(yarn.NodeManagerMetrics.AvailableGB + yarn.NodeManagerMetrics.AllocatedGB)) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "Total Allocatable CPU Utilized per NodeManager",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.AllocatedVCores",
-              "metric_path": "metrics/yarn/AllocatedVCores",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.AvailableVCores",
-              "metric_path": "metrics/yarn/AvailableVCores",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Total Allocatable CPU Utilized per NodeManager",
-              "value": "${(yarn.NodeManagerMetrics.AllocatedVCores/(yarn.NodeManagerMetrics.AllocatedVCores + yarn.NodeManagerMetrics.AvailableVCores)) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "Container Failures",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersFailed._rate",
-              "metric_path": "metrics/yarn/ContainersFailed._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersCompleted._rate",
-              "metric_path": "metrics/yarn/ContainersCompleted._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersLaunched._rate",
-              "metric_path": "metrics/yarn/ContainersLaunched._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersIniting",
-              "metric_path": "metrics/yarn/ContainersIniting",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersKilled._rate",
-              "metric_path": "metrics/yarn/ContainersKilled._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersRunning",
-              "metric_path": "metrics/yarn/ContainersRunning",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Container Failures",
-              "value": "${(yarn.NodeManagerMetrics.ContainersFailed._rate/(yarn.NodeManagerMetrics.ContainersFailed._rate + yarn.NodeManagerMetrics.ContainersCompleted._rate + yarn.NodeManagerMetrics.ContainersLaunched._rate + yarn.NodeManagerMetrics.ContainersIniting + yarn.NodeManagerMetrics.ContainersKilled._rate + yarn.NodeManagerMetrics.ContainersRunning)) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "NodeManager GC Time",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "Hadoop:service=NodeManager,name=JvmMetrics.GcTimeMillis",
-              "metric_path": "metrics/jvm/gcTimeMillis",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "NodeManager Garbage Collection Time",
-              "value": "${Hadoop:service=NodeManager,name=JvmMetrics.GcTimeMillis}"
-            }
-          ],
-          "properties": {
-            "display_unit": "ms",
-            "max_limit": "10000"
-          }
-        },
-        {
-          "widget_name": "NodeManager JVM Heap Memory Used",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "Hadoop:service=NodeManager,name=JvmMetrics.MemHeapUsedM",
-              "metric_path": "metrics/jvm/memHeapUsedM",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "NodeManager JVM Heap Memory Used",
-              "value": "${Hadoop:service=NodeManager,name=JvmMetrics.MemHeapUsedM}"
-            }
-          ],
-          "properties": {
-            "display_unit": "MB",
-            "max_limit": "512"
-          }
-        },
-        {
-          "widget_name": "Allocated Containers",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.AllocatedContainers",
-              "metric_path": "metrics/yarn/AllocatedContainers",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Allocated Containers",
-              "value": "${yarn.NodeManagerMetrics.AllocatedContainers}"
-            }
-          ],
-          "properties": {
-            "display_unit": "",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "NodeManager RAM Utilized",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.AllocatedGB",
-              "metric_path": "metrics/yarn/AllocatedGB",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "NodeManager RAM Utilized",
-              "value": "${yarn.NodeManagerMetrics.AllocatedGB}"
-            }
-          ],
-          "properties": {
-            "display_unit": "",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "NodeManager CPU Utilized",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.AllocatedVCores",
-              "metric_path": "metrics/yarn/AllocatedVCores",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "NodeManager CPU Utilized",
-              "value": "${yarn.NodeManagerMetrics.AllocatedVCores}"
-            }
-          ],
-          "properties": {
-            "display_unit": "",
-            "max_limit": "100"
-          }
-        }
-      ]
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3bf5d32d/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-env.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-env.xml
index a70fad3..deb4ef7 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-env.xml
@@ -1,25 +1,23 @@
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
 <!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
 -->
-<configuration supports_adding_forbidden="true">
+<!-- Put site-specific property overrides in this file. -->
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
   <!-- These configs were inherited from HDP 2.2 -->
   <!-- mapred-env.sh -->
   <property>
@@ -27,21 +25,21 @@
     <display-name>mapred-env template</display-name>
     <description>This is the jinja template for mapred-env.sh file</description>
     <value>
-# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
+      # export JAVA_HOME=/home/y/libexec/jdk1.6.0/
 
-export HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}}
+      export HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}}
 
-export HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA
+      export HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA
 
-#export HADOOP_JOB_HISTORYSERVER_OPTS=
-#export HADOOP_MAPRED_LOG_DIR="" # Where log files are stored.  $HADOOP_MAPRED_HOME/logs by default.
-#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.
-#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.
-#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default
-#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.
-export HADOOP_OPTS="-Dhdp.version=$HDP_VERSION $HADOOP_OPTS"
-export HADOOP_OPTS="-Djava.io.tmpdir={{hadoop_java_io_tmpdir}} $HADOOP_OPTS"
-export JAVA_LIBRARY_PATH="${JAVA_LIBRARY_PATH}:{{hadoop_java_io_tmpdir}}"
+      #export HADOOP_JOB_HISTORYSERVER_OPTS=
+      #export HADOOP_MAPRED_LOG_DIR="" # Where log files are stored.  $HADOOP_MAPRED_HOME/logs by default.
+      #export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.
+      #export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.
+      #export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default
+      #export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.
+      export HADOOP_OPTS="-Dhdp.version=$HDP_VERSION $HADOOP_OPTS"
+      export HADOOP_OPTS="-Djava.io.tmpdir={{hadoop_java_io_tmpdir}} $HADOOP_OPTS"
+      export JAVA_LIBRARY_PATH="${JAVA_LIBRARY_PATH}:{{hadoop_java_io_tmpdir}}"
     </value>
     <value-attributes>
       <type>content</type>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3bf5d32d/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-site.xml
index cef2b14..46f1c32 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-site.xml
@@ -18,6 +18,16 @@
 -->
 <!-- Put site-specific property overrides in this file. -->
 <configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
+  <property>
+    <name>mapreduce.application.classpath</name>
+    <value>$PWD/mr-framework/hadoop/share/hadoop/mapreduce/*:$PWD/mr-framework/hadoop/share/hadoop/mapreduce/lib/*:$PWD/mr-framework/hadoop/share/hadoop/common/*:$PWD/mr-framework/hadoop/share/hadoop/common/lib/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/lib/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/lib/*:$PWD/mr-framework/hadoop/share/hadoop/tools/lib/*:/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure</value>
+    <description>
+      CLASSPATH for MR applications. A comma-separated list of CLASSPATH
+      entries.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
   <!-- These configs were inherited from HDP 2.2 -->
   <property>
     <name>mapreduce.admin.user.env</name>
@@ -30,15 +40,6 @@
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
-    <name>mapreduce.application.classpath</name>
-    <value>$PWD/mr-framework/hadoop/share/hadoop/mapreduce/*:$PWD/mr-framework/hadoop/share/hadoop/mapreduce/lib/*:$PWD/mr-framework/hadoop/share/hadoop/common/*:$PWD/mr-framework/hadoop/share/hadoop/common/lib/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/lib/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/lib/*:$PWD/mr-framework/hadoop/share/hadoop/tools/lib/*:/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure</value>
-    <description>
-      CLASSPATH for MR applications. A comma-separated list of CLASSPATH
-      entries.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
     <name>mapreduce.application.framework.path</name>
     <value>/hdp/apps/${hdp.version}/mapreduce/mapreduce.tar.gz#mr-framework</value>
     <description/>
@@ -74,61 +75,4 @@
     <description/>
     <on-ambari-upgrade add="true"/>
   </property>
-  <property>
-    <name>mapreduce.reduce.shuffle.fetch.retry.enabled</name>
-    <value>1</value>
-    <description/>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.reduce.shuffle.fetch.retry.interval-ms</name>
-    <value>1000</value>
-    <description/>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.reduce.shuffle.fetch.retry.timeout-ms</name>
-    <value>30000</value>
-    <description/>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.job.emit-timeline-data</name>
-    <value>false</value>
-    <description/>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.jobhistory.bind-host</name>
-    <value>0.0.0.0</value>
-    <description/>
-    <on-ambari-upgrade add="true"/>
-  </property>
-
-  <!-- These configs were inherited from HDP 2.3 -->
-  <property>
-    <name>mapreduce.jobhistory.recovery.enable</name>
-    <value>true</value>
-    <description>Enable the history server to store server state and recover
-      server state upon startup.  If enabled then
-      mapreduce.jobhistory.recovery.store.class must be specified.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.jobhistory.recovery.store.class</name>
-    <value>org.apache.hadoop.mapreduce.v2.hs.HistoryServerLeveldbStateStoreService</value>
-    <description>The HistoryServerStateStoreService class to store history server
-      state for recovery.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.jobhistory.recovery.store.leveldb.path</name>
-    <value>/hadoop/mapreduce/jhs</value>
-    <description>The URI where history server state will be stored if HistoryServerLeveldbSystemStateStoreService
-      is configured as the recovery storage class.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3bf5d32d/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/capacity-scheduler.xml
deleted file mode 100644
index 4768e46..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/capacity-scheduler.xml
+++ /dev/null
@@ -1,71 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <!-- These configs were inherited from HDP 2.2 -->
-  <property>
-    <name>yarn.scheduler.capacity.resource-calculator</name>
-    <description>
-      The ResourceCalculator implementation to be used to compare Resources in the scheduler.
-      The default i.e. org.apache.hadoop.yarn.util.resource.DefaultResourseCalculator only uses
-      Memory while DominantResourceCalculator uses Dominant-resource to compare multi-dimensional
-      resources such as Memory, CPU etc. A Java ResourceCalculator class name is expected.
-    </description>
-    <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
-    <display-name>CPU Scheduling</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>org.apache.hadoop.yarn.util.resource.DominantResourceCalculator</value>
-          <label>Enabled</label>
-        </entry>
-        <entry>
-          <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
-          <label>Disabled</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.root.accessible-node-labels</name>
-    <value>*</value>
-    <description/>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <!-- In HDP 2.3, yarn.scheduler.capacity.root.default-node-label-expression was deleted -->
-
-  <!-- These configs were inherited from HDP 2.5 -->
-  <property>
-    <name>capacity-scheduler</name>
-    <description>Enter key=value (one per line) for all properties of capacity-scheduler.xml</description>
-    <depends-on>
-      <property>
-        <type>hive-interactive-env</type>
-        <name>enable_hive_interactive</name>
-      </property>
-      <property>
-        <type>hive-interactive-env</type>
-        <name>llap_queue_capacity</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>