You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by dm...@apache.org on 2017/01/20 10:19:05 UTC

[03/46] ambari git commit: AMBARI-18739. Perf: Create Rolling and Express Upgrade Packs (dlysnichenko)

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/YARN_widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/YARN_widgets.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/YARN_widgets.json
deleted file mode 100644
index 4b76a17..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/YARN_widgets.json
+++ /dev/null
@@ -1,611 +0,0 @@
-{
-  "layouts": [
-    {
-      "layout_name": "default_yarn_dashboard",
-      "display_name": "Standard YARN Dashboard",
-      "section_name": "YARN_SUMMARY",
-      "widgetLayoutInfo": [
-        {
-          "widget_name": "Memory Utilization",
-          "description": "Percentage of total memory allocated to containers running in the cluster.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AllocatedMB",
-              "metric_path": "metrics/yarn/Queue/root/AllocatedMB",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            },
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AvailableMB",
-              "metric_path": "metrics/yarn/Queue/root/AvailableMB",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            }
-          ],
-          "values": [
-            {
-              "name": "Memory Utilization",
-              "value": "${(yarn.QueueMetrics.Queue=root.AllocatedMB / (yarn.QueueMetrics.Queue=root.AllocatedMB + yarn.QueueMetrics.Queue=root.AvailableMB)) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "CPU Utilization",
-          "description": "Percentage of total virtual cores allocated to containers running in the cluster.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AllocatedVCores",
-              "metric_path": "metrics/yarn/Queue/root/AllocatedVCores",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            },
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AvailableVCores",
-              "metric_path": "metrics/yarn/Queue/root/AvailableVCores",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            }
-          ],
-          "values": [
-            {
-              "name": "Total Allocatable CPU Utilized across NodeManager",
-              "value": "${(yarn.QueueMetrics.Queue=root.AllocatedVCores / (yarn.QueueMetrics.Queue=root.AllocatedVCores + yarn.QueueMetrics.Queue=root.AvailableVCores)) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Container Failures",
-          "description": "Percentage of all containers failing in the cluster.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersFailed._rate",
-              "metric_path": "metrics/yarn/ContainersFailed._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersCompleted._rate",
-              "metric_path": "metrics/yarn/ContainersCompleted._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersLaunched._rate",
-              "metric_path": "metrics/yarn/ContainersLaunched._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersIniting._sum",
-              "metric_path": "metrics/yarn/ContainersIniting._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersKilled._rate",
-              "metric_path": "metrics/yarn/ContainersKilled._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersRunning._sum",
-              "metric_path": "metrics/yarn/ContainersRunning._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Container Failures",
-              "value": "${(yarn.NodeManagerMetrics.ContainersFailed._rate/(yarn.NodeManagerMetrics.ContainersFailed._rate + yarn.NodeManagerMetrics.ContainersCompleted._rate + yarn.NodeManagerMetrics.ContainersLaunched._rate + yarn.NodeManagerMetrics.ContainersIniting._sum + yarn.NodeManagerMetrics.ContainersKilled._rate + yarn.NodeManagerMetrics.ContainersRunning._sum)) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "App Failures",
-          "description": "Percentage of all launched applications failing in the cluster.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AppsFailed._rate",
-              "metric_path": "metrics/yarn/Queue/root/AppsFailed._rate",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            },
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AppsKilled._rate",
-              "metric_path": "metrics/yarn/Queue/root/AppsKilled._rate",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            },
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AppsPending",
-              "metric_path": "metrics/yarn/Queue/root/AppsPending",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            },
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AppsRunning",
-              "metric_path": "metrics/yarn/Queue/root/AppsRunning",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            },
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AppsSubmitted._rate",
-              "metric_path": "metrics/yarn/Queue/root/AppsSubmitted._rate",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            },
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AppsCompleted._rate",
-              "metric_path": "metrics/yarn/Queue/root/AppsCompleted._rate",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            }
-          ],
-          "values": [
-            {
-              "name": "App Failures",
-              "value": "${(yarn.QueueMetrics.Queue=root.AppsFailed._rate/(yarn.QueueMetrics.Queue=root.AppsFailed._rate + yarn.QueueMetrics.Queue=root.AppsKilled._rate + yarn.QueueMetrics.Queue=root.AppsPending + yarn.QueueMetrics.Queue=root.AppsRunning + yarn.QueueMetrics.Queue=root.AppsSubmitted._rate + yarn.QueueMetrics.Queue=root.AppsCompleted._rate)) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Pending Apps",
-          "description": "Count of applications waiting for cluster resources to become available.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AppsPending",
-              "metric_path": "metrics/yarn/Queue/root/AppsPending",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            }
-          ],
-          "values": [
-            {
-              "name": "Pending Apps",
-              "value": "${yarn.QueueMetrics.Queue=root.AppsPending}"
-            }
-          ],
-          "properties": {
-            "display_unit": "Apps",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Cluster Memory",
-          "description": "Percentage of memory used across all NodeManager hosts.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "mem_total._sum",
-              "metric_path": "metrics/memory/mem_total._avg",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "mem_free._sum",
-              "metric_path": "metrics/memory/mem_free._avg",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Memory utilization",
-              "value": "${((mem_total._sum - mem_free._sum)/mem_total._sum) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Cluster Disk",
-          "description": "Sum of disk throughput for all NodeManager hosts.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "read_bps._sum",
-              "metric_path": "metrics/disk/read_bps._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "write_bps._sum",
-              "metric_path": "metrics/disk/write_bps._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Read throughput",
-              "value": "${read_bps._sum/1048576}"
-            },
-            {
-              "name": "Write throughput",
-              "value": "${write_bps._sum/1048576}"
-            }
-          ],
-          "properties": {
-            "display_unit": "Mbps",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Cluster Network",
-          "description": "Average of Network utilized across all NodeManager hosts.",
-          "default_section_name": "YARN_SUMMARY",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "pkts_in._avg",
-              "metric_path": "metrics/network/pkts_in._avg",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "pkts_out._avg",
-              "metric_path": "metrics/network/pkts_out._avg",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Packets In",
-              "value": "${pkts_in._avg}"
-            },
-            {
-              "name": "Packets Out",
-              "value": "${pkts_out._avg}"
-            }
-          ],
-          "properties": {
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Cluster CPU",
-          "description": "Percentage of CPU utilized across all NodeManager hosts.",
-          "default_section_name": "YARN_SUMMARY",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "cpu_system._sum",
-              "metric_path": "metrics/cpu/cpu_system._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "cpu_user._sum",
-              "metric_path": "metrics/cpu/cpu_user._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "cpu_nice._sum",
-              "metric_path": "metrics/cpu/cpu_nice._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "cpu_idle._sum",
-              "metric_path": "metrics/cpu/cpu_idle._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "cpu_wio._sum",
-              "metric_path": "metrics/cpu/cpu_wio._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "CPU utilization",
-              "value": "${((cpu_system._sum + cpu_user._sum + cpu_nice._sum)/(cpu_system._sum + cpu_user._sum + cpu_nice._sum + cpu_idle._sum + cpu_wio._sum)) * 100}"
-            }
-          ],
-          "properties": {
-            "graph_type": "LINE",
-            "time_range": "1",
-            "display_unit": "%"
-          }
-        }
-      ]
-    },
-    {
-      "layout_name": "default_yarn_heatmap",
-      "display_name": "YARN Heatmaps",
-      "section_name": "YARN_HEATMAPS",
-      "widgetLayoutInfo": [
-        {
-          "widget_name": "Total Allocatable RAM Utilized per NodeManager",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.AllocatedGB",
-              "metric_path": "metrics/yarn/AllocatedGB",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.AvailableGB",
-              "metric_path": "metrics/yarn/AvailableGB",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Total Allocatable RAM Utilized per NodeManager",
-              "value": "${(yarn.NodeManagerMetrics.AllocatedGB/(yarn.NodeManagerMetrics.AvailableGB + yarn.NodeManagerMetrics.AllocatedGB)) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "Total Allocatable CPU Utilized per NodeManager",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.AllocatedVCores",
-              "metric_path": "metrics/yarn/AllocatedVCores",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.AvailableVCores",
-              "metric_path": "metrics/yarn/AvailableVCores",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Total Allocatable CPU Utilized per NodeManager",
-              "value": "${(yarn.NodeManagerMetrics.AllocatedVCores/(yarn.NodeManagerMetrics.AllocatedVCores + yarn.NodeManagerMetrics.AvailableVCores)) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "Container Failures",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersFailed._rate",
-              "metric_path": "metrics/yarn/ContainersFailed._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersCompleted._rate",
-              "metric_path": "metrics/yarn/ContainersCompleted._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersLaunched._rate",
-              "metric_path": "metrics/yarn/ContainersLaunched._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersIniting",
-              "metric_path": "metrics/yarn/ContainersIniting",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersKilled._rate",
-              "metric_path": "metrics/yarn/ContainersKilled._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersRunning",
-              "metric_path": "metrics/yarn/ContainersRunning",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Container Failures",
-              "value": "${(yarn.NodeManagerMetrics.ContainersFailed._rate/(yarn.NodeManagerMetrics.ContainersFailed._rate + yarn.NodeManagerMetrics.ContainersCompleted._rate + yarn.NodeManagerMetrics.ContainersLaunched._rate + yarn.NodeManagerMetrics.ContainersIniting + yarn.NodeManagerMetrics.ContainersKilled._rate + yarn.NodeManagerMetrics.ContainersRunning)) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "NodeManager GC Time",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "Hadoop:service=NodeManager,name=JvmMetrics.GcTimeMillis",
-              "metric_path": "metrics/jvm/gcTimeMillis",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "NodeManager Garbage Collection Time",
-              "value": "${Hadoop:service=NodeManager,name=JvmMetrics.GcTimeMillis}"
-            }
-          ],
-          "properties": {
-            "display_unit": "ms",
-            "max_limit": "10000"
-          }
-        },
-        {
-          "widget_name": "NodeManager JVM Heap Memory Used",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "Hadoop:service=NodeManager,name=JvmMetrics.MemHeapUsedM",
-              "metric_path": "metrics/jvm/memHeapUsedM",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "NodeManager JVM Heap Memory Used",
-              "value": "${Hadoop:service=NodeManager,name=JvmMetrics.MemHeapUsedM}"
-            }
-          ],
-          "properties": {
-            "display_unit": "MB",
-            "max_limit": "512"
-          }
-        },
-        {
-          "widget_name": "Allocated Containers",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.AllocatedContainers",
-              "metric_path": "metrics/yarn/AllocatedContainers",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Allocated Containers",
-              "value": "${yarn.NodeManagerMetrics.AllocatedContainers}"
-            }
-          ],
-          "properties": {
-            "display_unit": "",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "NodeManager RAM Utilized",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.AllocatedGB",
-              "metric_path": "metrics/yarn/AllocatedGB",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "NodeManager RAM Utilized",
-              "value": "${yarn.NodeManagerMetrics.AllocatedGB}"
-            }
-          ],
-          "properties": {
-            "display_unit": "",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "NodeManager CPU Utilized",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.AllocatedVCores",
-              "metric_path": "metrics/yarn/AllocatedVCores",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "NodeManager CPU Utilized",
-              "value": "${yarn.NodeManagerMetrics.AllocatedVCores}"
-            }
-          ],
-          "properties": {
-            "display_unit": "",
-            "max_limit": "100"
-          }
-        }
-      ]
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/alerts.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/alerts.json
deleted file mode 100644
index 67cf881..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/alerts.json
+++ /dev/null
@@ -1,77 +0,0 @@
-{
-  "MAPREDUCE2": {
-    "HISTORYSERVER": [
-      {
-        "name": "mapreduce_history_process",
-        "label": "History Server process",
-        "description": "Alert for history server process status",
-        "interval": 1,
-        "scope": "HOST",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "PERF/1.0/services/YARN/package/alerts/alert_history_process.py",
-          "parameters": []
-        }
-      }
-    ]
-  },
-  "YARN": {
-
-    "NODEMANAGER": [
-      {
-        "name": "yarn_nodemanager_health",
-        "label": "NodeManager Health",
-        "description": "This host-level alert checks the node health property available from the NodeManager component.",
-        "interval": 1,
-        "scope": "HOST",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "PERF/1.0/services/YARN/package/alerts/alert_nodemanager_health.py",
-          "parameters": [
-            {
-              "name": "connection.timeout",
-              "display_name": "Connection Timeout",
-              "value": 5.0,
-              "type": "NUMERIC",
-              "description": "The maximum time before this alert is considered to be CRITICAL",
-              "units": "seconds",
-              "threshold": "CRITICAL"
-            }
-          ]
-        }
-      }
-    ],
-    "RESOURCEMANAGER": [
-      {
-        "name": "yarn_resourcemanager_process",
-        "label": "ResourceManager process",
-        "description": "Alert for resourcemanager process status",
-        "interval": 1,
-        "scope": "HOST",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "PERF/1.0/services/YARN/package/alerts/alert_resourcemanager_process.py",
-          "parameters": []
-        }
-      }
-    ],
-    "APP_TIMELINE_SERVER": [
-      {
-        "name": "yarn_app_timeline_server_process",
-        "label": "App Timeline process",
-        "description": "Alert for app timeline server process status",
-        "interval": 1,
-        "scope": "HOST",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "PERF/1.0/services/YARN/package/alerts/alert_timeline_process.py",
-          "parameters": []
-        }
-      }
-    ]
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration-mapred/mapred-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration-mapred/mapred-env.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration-mapred/mapred-env.xml
deleted file mode 100644
index fbd056d..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration-mapred/mapred-env.xml
+++ /dev/null
@@ -1,50 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_adding_forbidden="true">
-  <!-- mapred-env.sh -->
-  <property>
-    <name>content</name>
-    <display-name>mapred-env template</display-name>
-    <description>This is the jinja template for mapred-env.sh file</description>
-    <value>
-# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
-
-export HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}}
-
-export HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA
-
-#export HADOOP_JOB_HISTORYSERVER_OPTS=
-#export HADOOP_MAPRED_LOG_DIR="" # Where log files are stored.  $HADOOP_MAPRED_HOME/logs by default.
-#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.
-#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.
-#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default
-#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.
-export HADOOP_OPTS="-Dhdp.version=$HDP_VERSION $HADOOP_OPTS"
-export HADOOP_OPTS="-Djava.io.tmpdir={{hadoop_java_io_tmpdir}} $HADOOP_OPTS"
-export JAVA_LIBRARY_PATH="${JAVA_LIBRARY_PATH}:{{hadoop_java_io_tmpdir}}"
-    </value>
-    <value-attributes>
-      <type>content</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration-mapred/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration-mapred/mapred-site.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration-mapred/mapred-site.xml
deleted file mode 100644
index fd03b4f..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration-mapred/mapred-site.xml
+++ /dev/null
@@ -1,134 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<!-- Put site-specific property overrides in this file. -->
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
-  <!-- These properties exist in HDP 2.2 and higher. -->
-  <property>
-    <name>mapreduce.admin.user.env</name>
-    <value>LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64</value>
-    <description>
-      Additional execution environment entries for map and reduce task processes.
-      This is not an additive property. You must preserve the original value if
-      you want your map and reduce tasks to have access to native libraries (compression, etc)
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>mapreduce.application.classpath</name>
-    <value>$PWD/mr-framework/hadoop/share/hadoop/mapreduce/*:$PWD/mr-framework/hadoop/share/hadoop/mapreduce/lib/*:$PWD/mr-framework/hadoop/share/hadoop/common/*:$PWD/mr-framework/hadoop/share/hadoop/common/lib/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/lib/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/lib/*:$PWD/mr-framework/hadoop/share/hadoop/tools/lib/*:/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure</value>
-    <description>
-      CLASSPATH for MR applications. A comma-separated list of CLASSPATH
-      entries.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>mapreduce.application.framework.path</name>
-    <value>/hdp/apps/${hdp.version}/mapreduce/mapreduce.tar.gz#mr-framework</value>
-    <description/>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.app.mapreduce.am.admin-command-opts</name>
-    <value>-Dhdp.version=${hdp.version}</value>
-    <description>
-      Java opts for the MR App Master processes.
-      The following symbol, if present, will be interpolated: @taskid@ is replaced
-      by current TaskID. Any other occurrences of '@' will go unchanged.
-      For example, to enable verbose gc logging to a file named for the taskid in
-      /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
-      -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc
-
-      Usage of -Djava.library.path can cause programs to no longer function if
-      hadoop native libraries are used. These values should instead be set as part
-      of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
-      mapreduce.reduce.env config settings.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>mapreduce.admin.map.child.java.opts</name>
-    <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}</value>
-    <description/>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>mapreduce.admin.reduce.child.java.opts</name>
-    <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}</value>
-    <description/>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>mapreduce.reduce.shuffle.fetch.retry.enabled</name>
-    <value>1</value>
-    <description/>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>mapreduce.reduce.shuffle.fetch.retry.interval-ms</name>
-    <value>1000</value>
-    <description/>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>mapreduce.reduce.shuffle.fetch.retry.timeout-ms</name>
-    <value>30000</value>
-    <description/>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>mapreduce.job.emit-timeline-data</name>
-    <value>false</value>
-    <description/>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>mapreduce.jobhistory.bind-host</name>
-    <value>0.0.0.0</value>
-    <description/>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <!-- These properties exist in HDP 2.3 and higher. -->
-  <property>
-    <name>mapreduce.jobhistory.recovery.enable</name>
-    <value>true</value>
-    <description>Enable the history server to store server state and recover
-      server state upon startup.  If enabled then
-      mapreduce.jobhistory.recovery.store.class must be specified.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>mapreduce.jobhistory.recovery.store.class</name>
-    <value>org.apache.hadoop.mapreduce.v2.hs.HistoryServerLeveldbStateStoreService</value>
-    <description>The HistoryServerStateStoreService class to store history server
-      state for recovery.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>mapreduce.jobhistory.recovery.store.leveldb.path</name>
-    <value>/hadoop/mapreduce/jhs</value>
-    <description>The URI where history server state will be stored if HistoryServerLeveldbSystemStateStoreService
-      is configured as the recovery storage class.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/capacity-scheduler.xml
deleted file mode 100644
index 473a7b9..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/capacity-scheduler.xml
+++ /dev/null
@@ -1,69 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <!-- These properties exist in HDP 2.2 and higher. -->
-  <property>
-    <name>yarn.scheduler.capacity.resource-calculator</name>
-    <description>
-      The ResourceCalculator implementation to be used to compare Resources in the scheduler.
-      The default i.e. org.apache.hadoop.yarn.util.resource.DefaultResourseCalculator only uses
-      Memory while DominantResourceCalculator uses Dominant-resource to compare multi-dimensional
-      resources such as Memory, CPU etc. A Java ResourceCalculator class name is expected.
-    </description>
-    <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
-    <display-name>CPU Scheduling</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>org.apache.hadoop.yarn.util.resource.DominantResourceCalculator</value>
-          <label>Enabled</label>
-        </entry>
-        <entry>
-          <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
-          <label>Disabled</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.root.accessible-node-labels</name>
-    <value>*</value>
-    <description/>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <!-- These properties exist in HDP 2.5 and higher. -->
-  <property>
-    <name>capacity-scheduler</name>
-    <description>Enter key=value (one per line) for all properties of capacity-scheduler.xml</description>
-    <depends-on>
-      <property>
-        <type>hive-interactive-env</type>
-        <name>enable_hive_interactive</name>
-      </property>
-      <property>
-        <type>hive-interactive-env</type>
-        <name>llap_queue_capacity</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/ranger-yarn-audit.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/ranger-yarn-audit.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/ranger-yarn-audit.xml
deleted file mode 100644
index 0352424..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/ranger-yarn-audit.xml
+++ /dev/null
@@ -1,121 +0,0 @@
-<?xml version="1.0"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>xasecure.audit.is.enabled</name>
-    <value>true</value>
-    <description>Is Audit enabled?</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.hdfs</name>
-    <value>true</value>
-    <display-name>Audit to HDFS</display-name>
-    <description>Is Audit to HDFS enabled?</description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>ranger-env</type>
-        <name>xasecure.audit.destination.hdfs</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.hdfs.dir</name>
-    <value>hdfs://NAMENODE_HOSTNAME:8020/ranger/audit</value>
-    <description>HDFS folder to write audit to, make sure the service user has requried permissions</description>
-    <depends-on>
-      <property>
-        <type>ranger-env</type>
-        <name>xasecure.audit.destination.hdfs.dir</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.hdfs.batch.filespool.dir</name>
-    <value>/var/log/hadoop/yarn/audit/hdfs/spool</value>
-    <description>/var/log/hadoop/yarn/audit/hdfs/spool</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.solr</name>
-    <value>false</value>
-    <display-name>Audit to SOLR</display-name>
-    <description>Is Solr audit enabled?</description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>ranger-env</type>
-        <name>xasecure.audit.destination.solr</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.solr.urls</name>
-    <value/>
-    <description>Solr URL</description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>ranger-admin-site</type>
-        <name>ranger.audit.solr.urls</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.solr.zookeepers</name>
-    <value>NONE</value>
-    <description>Solr Zookeeper string</description>
-    <depends-on>
-      <property>
-        <type>ranger-admin-site</type>
-        <name>ranger.audit.solr.zookeepers</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.solr.batch.filespool.dir</name>
-    <value>/var/log/hadoop/yarn/audit/solr/spool</value>
-    <description>/var/log/hadoop/yarn/audit/solr/spool</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.provider.summary.enabled</name>
-    <value>false</value>
-    <display-name>Audit provider summary enabled</display-name>
-    <description>Enable Summary audit?</description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/ranger-yarn-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/ranger-yarn-plugin-properties.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/ranger-yarn-plugin-properties.xml
deleted file mode 100644
index 97867cc..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/ranger-yarn-plugin-properties.xml
+++ /dev/null
@@ -1,82 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="true">
-  <property>
-    <name>policy_user</name>
-    <value>ambari-qa</value>
-    <display-name>Policy user for YARN</display-name>
-    <description>This user must be system user and also present at Ranger admin portal</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>hadoop.rpc.protection</name>
-    <value/>
-    <description>Used for repository creation on ranger admin</description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>common.name.for.certificate</name>
-    <value/>
-    <description>Common name for certificate, this value should match what is specified in repo within ranger admin</description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger-yarn-plugin-enabled</name>
-    <value>No</value>
-    <display-name>Enable Ranger for YARN</display-name>
-    <description>Enable ranger yarn plugin ?</description>
-    <depends-on>
-      <property>
-        <type>ranger-env</type>
-        <name>ranger-yarn-plugin-enabled</name>
-      </property>
-    </depends-on>
-    <value-attributes>
-      <type>boolean</type>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>REPOSITORY_CONFIG_USERNAME</name>
-    <value>yarn</value>
-    <display-name>Ranger repository config user</display-name>
-    <description>Used for repository creation on ranger admin</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>REPOSITORY_CONFIG_PASSWORD</name>
-    <value>yarn</value>
-    <display-name>Ranger repository config password</display-name>
-    <property-type>PASSWORD</property-type>
-    <description>Used for repository creation on ranger admin</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/ranger-yarn-policymgr-ssl.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/ranger-yarn-policymgr-ssl.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/ranger-yarn-policymgr-ssl.xml
deleted file mode 100644
index 5410104..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/ranger-yarn-policymgr-ssl.xml
+++ /dev/null
@@ -1,66 +0,0 @@
-<?xml version="1.0"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>xasecure.policymgr.clientssl.keystore</name>
-    <value>/usr/hdp/current/hadoop-client/conf/ranger-yarn-plugin-keystore.jks</value>
-    <description>Java Keystore files</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.policymgr.clientssl.keystore.password</name>
-    <value>myKeyFilePassword</value>
-    <property-type>PASSWORD</property-type>
-    <description>password for keystore</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.policymgr.clientssl.truststore</name>
-    <value>/usr/hdp/current/hadoop-client/conf/ranger-yarn-plugin-truststore.jks</value>
-    <description>java truststore file</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.policymgr.clientssl.truststore.password</name>
-    <value>changeit</value>
-    <property-type>PASSWORD</property-type>
-    <description>java truststore password</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.policymgr.clientssl.keystore.credential.file</name>
-    <value>jceks://file{{credential_file}}</value>
-    <description>java keystore credential file</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.policymgr.clientssl.truststore.credential.file</name>
-    <value>jceks://file{{credential_file}}</value>
-    <description>java truststore credential file</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/ranger-yarn-security.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/ranger-yarn-security.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/ranger-yarn-security.xml
deleted file mode 100644
index 5f69962..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/ranger-yarn-security.xml
+++ /dev/null
@@ -1,58 +0,0 @@
-<?xml version="1.0"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>ranger.plugin.yarn.service.name</name>
-    <value>{{repo_name}}</value>
-    <description>Name of the Ranger service containing policies for this Yarn instance</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger.plugin.yarn.policy.source.impl</name>
-    <value>org.apache.ranger.admin.client.RangerAdminRESTClient</value>
-    <description>Class to retrieve policies from the source</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger.plugin.yarn.policy.rest.url</name>
-    <value>{{policymgr_mgr_url}}</value>
-    <description>URL to Ranger Admin</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger.plugin.yarn.policy.rest.ssl.config.file</name>
-    <value>/etc/hadoop/conf/ranger-policymgr-ssl-yarn.xml</value>
-    <description>Path to the file containing SSL details to contact Ranger Admin</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger.plugin.yarn.policy.pollIntervalMs</name>
-    <value>30000</value>
-    <description>How often to poll for changes in policies?</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger.plugin.yarn.policy.cache.dir</name>
-    <value>/etc/ranger/{{repo_name}}/policycache</value>
-    <description>Directory where Ranger policies are cached after successful retrieval from the source</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/yarn-alert-config.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/yarn-alert-config.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/yarn-alert-config.xml
deleted file mode 100644
index 392eea7..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/yarn-alert-config.xml
+++ /dev/null
@@ -1,80 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
-
-        http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
--->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
-
-    <property>
-        <name>alert.behavior.type</name>
-        <value>percentage</value>
-        <description>
-            This property describes type of alert behaviour.
-            There are three types percentage, timeout, flip.
-        </description>
-        <on-ambari-upgrade add="false"/>
-    </property>
-
-
-    <property>
-        <name>alert.success.percentage</name>
-        <value>100</value>
-        <description>
-            This property will be actual only when alert.behaviour.type
-            set to "percentage". Here you should set percent of successful
-            alert checks.
-        </description>
-        <on-ambari-upgrade add="false"/>
-    </property>
-
-
-    <property>
-        <name>alert.timeout.return.value</name>
-        <value>false</value>
-        <description>
-            This property will be actual only when alert.behaviour.type
-            set to "timeout". Here you should set result which alert will
-            return after timeout, false|true|none.
-        </description>
-        <on-ambari-upgrade add="false"/>
-    </property>
-
-    <property>
-        <name>alert.timeout.secs</name>
-        <value>120</value>
-        <description>
-            This property will be actual only when alert.behaviour.type
-            set to "timeout". Here you should set number of seconds for
-            alert to sleep.
-        </description>
-        <on-ambari-upgrade add="false"/>
-    </property>
-
-
-    <property>
-        <name>alert.flip.interval.mins</name>
-        <value>3</value>
-        <description>
-            This property will be actual only when alert.behaviour.type
-            set to "flip". Here you should set number of minutes at which
-            the alert should flip from true|false.
-        </description>
-        <on-ambari-upgrade add="false"/>
-    </property>
-
-
-</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/yarn-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/yarn-env.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/yarn-env.xml
deleted file mode 100644
index f81080e..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/yarn-env.xml
+++ /dev/null
@@ -1,201 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="true" supports_adding_forbidden="true">
-  <!-- These properties exist in HDP 2.1 and higher. -->
-  <property>
-    <name>apptimelineserver_heapsize</name>
-    <value>1024</value>
-    <display-name>AppTimelineServer Java heap size</display-name>
-    <description>Max heapsize for AppTimelineServer using a numerical value in the scale of MB</description>
-    <value-attributes>
-      <overridable>false</overridable>
-      <unit>MB</unit>
-      <type>int</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <!-- These properties exist in HDP 2.2 and higher. -->
-  <property>
-    <name>yarn_cgroups_enabled</name>
-    <value>false</value>
-    <description>You can use CGroups to isolate CPU-heavy processes in a Hadoop cluster.</description>
-    <display-name>CPU Isolation</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>true</value>
-          <label>Enabled</label>
-        </entry>
-        <entry>
-          <value>false</value>
-          <label>Disabled</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <!-- These properties exist in HDP 2.3 and higher. -->
-  <property>
-    <name>is_supported_yarn_ranger</name>
-    <value>true</value>
-    <description>Set to false by default,  needs to be set to true in stacks that use Ranger Yarn Plugin</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <!-- yarn-env.sh -->
-  <property>
-    <name>content</name>
-    <display-name>yarn-env template</display-name>
-    <description>This is the jinja template for yarn-env.sh file</description>
-    <value>
-      export HADOOP_YARN_HOME={{hadoop_yarn_home}}
-      export YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER
-      export YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER
-      export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
-      export JAVA_HOME={{java64_home}}
-      export JAVA_LIBRARY_PATH="${JAVA_LIBRARY_PATH}:{{hadoop_java_io_tmpdir}}"
-
-      # We need to add the EWMA appender for the yarn daemons only;
-      # however, YARN_ROOT_LOGGER is shared by the yarn client and the
-      # daemons. This is restrict the EWMA appender to daemons only.
-      INVOKER="${0##*/}"
-      if [ "$INVOKER" == "yarn-daemon.sh" ]; then
-        export YARN_ROOT_LOGGER=${YARN_ROOT_LOGGER:-INFO,EWMA,RFA}
-      fi
-
-      # User for YARN daemons
-      export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
-
-      # resolve links - $0 may be a softlink
-      export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}"
-
-      # some Java parameters
-      # export JAVA_HOME=/home/y/libexec/jdk1.6.0/
-      if [ "$JAVA_HOME" != "" ]; then
-      #echo "run java in $JAVA_HOME"
-      JAVA_HOME=$JAVA_HOME
-      fi
-
-      if [ "$JAVA_HOME" = "" ]; then
-      echo "Error: JAVA_HOME is not set."
-      exit 1
-      fi
-
-      JAVA=$JAVA_HOME/bin/java
-      JAVA_HEAP_MAX=-Xmx1000m
-
-      # For setting YARN specific HEAP sizes please use this
-      # Parameter and set appropriately
-      YARN_HEAPSIZE={{yarn_heapsize}}
-
-      # check envvars which might override default args
-      if [ "$YARN_HEAPSIZE" != "" ]; then
-      JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
-      fi
-
-      # Resource Manager specific parameters
-
-      # Specify the max Heapsize for the ResourceManager using a numerical value
-      # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
-      # the value to 1000.
-      # This value will be overridden by an Xmx setting specified in either YARN_OPTS
-      # and/or YARN_RESOURCEMANAGER_OPTS.
-      # If not specified, the default value will be picked from either YARN_HEAPMAX
-      # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
-      export YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}
-
-      # Specify the JVM options to be used when starting the ResourceManager.
-      # These options will be appended to the options specified as YARN_OPTS
-      # and therefore may override any similar flags set in YARN_OPTS
-      #export YARN_RESOURCEMANAGER_OPTS=
-
-      # Node Manager specific parameters
-
-      # Specify the max Heapsize for the NodeManager using a numerical value
-      # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
-      # the value to 1000.
-      # This value will be overridden by an Xmx setting specified in either YARN_OPTS
-      # and/or YARN_NODEMANAGER_OPTS.
-      # If not specified, the default value will be picked from either YARN_HEAPMAX
-      # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
-      export YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}
-
-      # Specify the max Heapsize for the timeline server using a numerical value
-      # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
-      # the value to 1024.
-      # This value will be overridden by an Xmx setting specified in either YARN_OPTS
-      # and/or YARN_TIMELINESERVER_OPTS.
-      # If not specified, the default value will be picked from either YARN_HEAPMAX
-      # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
-      export YARN_TIMELINESERVER_HEAPSIZE={{apptimelineserver_heapsize}}
-
-      # Specify the JVM options to be used when starting the NodeManager.
-      # These options will be appended to the options specified as YARN_OPTS
-      # and therefore may override any similar flags set in YARN_OPTS
-      #export YARN_NODEMANAGER_OPTS=
-
-      # so that filenames w/ spaces are handled correctly in loops below
-      IFS=
-
-
-      # default log directory and file
-      if [ "$YARN_LOG_DIR" = "" ]; then
-      YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
-      fi
-      if [ "$YARN_LOGFILE" = "" ]; then
-      YARN_LOGFILE='yarn.log'
-      fi
-
-      # default policy file for service-level authorization
-      if [ "$YARN_POLICYFILE" = "" ]; then
-      YARN_POLICYFILE="hadoop-policy.xml"
-      fi
-
-      # restore ordinary behaviour
-      unset IFS
-
-
-      YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
-      YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
-      YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
-      YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
-      YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME"
-      YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING"
-      YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
-      YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
-      export YARN_NODEMANAGER_OPTS="$YARN_NODEMANAGER_OPTS -Dnm.audit.logger=INFO,NMAUDIT"
-      export YARN_RESOURCEMANAGER_OPTS="$YARN_RESOURCEMANAGER_OPTS -Drm.audit.logger=INFO,RMAUDIT"
-      if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
-      YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
-      fi
-      YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
-      YARN_OPTS="$YARN_OPTS -Djava.io.tmpdir={{hadoop_java_io_tmpdir}}"
-    </value>
-    <value-attributes>
-      <type>content</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/yarn-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/yarn-log4j.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/yarn-log4j.xml
deleted file mode 100644
index 5e9ac98..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/yarn-log4j.xml
+++ /dev/null
@@ -1,103 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>content</name>
-    <display-name>yarn-log4j template</display-name>
-    <description>Custom log4j.properties</description>
-    <value>
-#Relative to Yarn Log Dir Prefix
-yarn.log.dir=.
-#
-# Job Summary Appender
-#
-# Use following logger to send summary to separate file defined by
-# hadoop.mapreduce.jobsummary.log.file rolled daily:
-# hadoop.mapreduce.jobsummary.logger=INFO,JSA
-#
-hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
-hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
-log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
-# Set the ResourceManager summary log filename
-yarn.server.resourcemanager.appsummary.log.file=hadoop-mapreduce.jobsummary.log
-# Set the ResourceManager summary log level and appender
-yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}
-#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
-
-# To enable AppSummaryLogging for the RM,
-# set yarn.server.resourcemanager.appsummary.logger to
-# LEVEL,RMSUMMARY in hadoop-env.sh
-
-# Appender for ResourceManager Application Summary Log
-# Requires the following properties to be set
-#    - hadoop.log.dir (Hadoop Log directory)
-#    - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
-#    - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
-log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
-log4j.appender.RMSUMMARY.File=${yarn.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
-log4j.appender.RMSUMMARY.MaxFileSize=256MB
-log4j.appender.RMSUMMARY.MaxBackupIndex=20
-log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
-log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
-log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-log4j.appender.JSA.DatePattern=.yyyy-MM-dd
-log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
-log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
-log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
-
-# Appender for viewing information for errors and warnings
-yarn.ewma.cleanupInterval=300
-yarn.ewma.messageAgeLimitSeconds=86400
-yarn.ewma.maxUniqueMessages=250
-log4j.appender.EWMA=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender
-log4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval}
-log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds}
-log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}
-
-# Audit logging for ResourceManager
-rm.audit.logger=${hadoop.root.logger}
-log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=${rm.audit.logger}
-log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=false
-log4j.appender.RMAUDIT=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.RMAUDIT.File=${yarn.log.dir}/rm-audit.log
-log4j.appender.RMAUDIT.layout=org.apache.log4j.PatternLayout
-log4j.appender.RMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.RMAUDIT.DatePattern=.yyyy-MM-dd
-
-# Audit logging for NodeManager
-nm.audit.logger=${hadoop.root.logger}
-log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=${nm.audit.logger}
-log4j.additivity.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=false
-log4j.appender.NMAUDIT=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.NMAUDIT.File=${yarn.log.dir}/nm-audit.log
-log4j.appender.NMAUDIT.layout=org.apache.log4j.PatternLayout
-log4j.appender.NMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.NMAUDIT.DatePattern=.yyyy-MM-dd
-    </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>