You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jo...@apache.org on 2014/11/11 21:43:04 UTC

[01/17] ambari git commit: AMBARI-8276 - Alerts: Remove Nagios Service From The Stack (jonathanhurley)

Repository: ambari
Updated Branches:
  refs/heads/trunk 0bf66729c -> e4ededeb6


http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/contrib/addons/src/addOns/nagios/scripts/nagios_alerts.php
----------------------------------------------------------------------
diff --git a/contrib/addons/src/addOns/nagios/scripts/nagios_alerts.php b/contrib/addons/src/addOns/nagios/scripts/nagios_alerts.php
deleted file mode 100644
index 067b7ff..0000000
--- a/contrib/addons/src/addOns/nagios/scripts/nagios_alerts.php
+++ /dev/null
@@ -1,513 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Constants. */
-define("HDP_MON_RESPONSE_OPTION_KEY__PROPERTIES", "Properties");
-define("HDP_MON_RESPONSE_OPTION_KEY__TYPE", "Type");
-
-define("HDP_MON_RESPONSE_OPTION_VALUE__PROPERTIES_UNCACHEABLE", "Uncacheable");
-define("HDP_MON_RESPONSE_OPTION_VALUE__TYPE_JSON", "JSON");
-define("HDP_MON_RESPONSE_OPTION_VALUE__TYPE_JAVASCRIPT", "JAVASCRIPT");
-
-define("HDP_MON_QUERY_ARG__JSONP", "jsonp");
-
-/** Spits out appropriate response headers, as per the options passed in. */
-function hdp_mon_generate_response_headers( $response_options )
-{
-  if( $response_options[HDP_MON_RESPONSE_OPTION_KEY__PROPERTIES] == HDP_MON_RESPONSE_OPTION_VALUE__PROPERTIES_UNCACHEABLE )
-  {
-    // Make the response uncache-able.
-    header("Expires: Mon, 26 Jul 1997 05:00:00 GMT"); // Date in the past
-    header("Last-Modified: " . gmdate("D, d M Y H:i:s") . " GMT"); // Always modified
-    header("Cache-Control: no-cache, must-revalidate"); // HTTP/1.1
-    header("Pragma: no-cache"); // HTTP/1.0
-  }
-
-  switch( $response_options[HDP_MON_RESPONSE_OPTION_KEY__TYPE] )
-  {
-    case HDP_MON_RESPONSE_OPTION_VALUE__TYPE_JSON:
-      {
-        header('Content-type: application/json');
-      }
-      break;
-
-    case HDP_MON_RESPONSE_OPTION_VALUE__TYPE_JAVASCRIPT:
-      {
-        header('Content-type: application/javascript');
-      }
-      break;
-  }
-}
-
-/** Given $response_data (which we expect to be a JSON string), generate an
- *  HTTP response, which includes emitting the necessary HTTP response headers
- *  followed by the response body (that is either plain ol' $response_data,
- *  or a JSONP wrapper around it).
- */
-function hdp_mon_generate_response( $response_data )
-{
-  $jsonpFunctionName = NULL;
-  if (isset($_GET[HDP_MON_QUERY_ARG__JSONP])) {
-    $jsonpFunctionName = $_GET[HDP_MON_QUERY_ARG__JSONP];
-  }
-
-  hdp_mon_generate_response_headers( array
-  ( HDP_MON_RESPONSE_OPTION_KEY__PROPERTIES => HDP_MON_RESPONSE_OPTION_VALUE__PROPERTIES_UNCACHEABLE,
-  HDP_MON_RESPONSE_OPTION_KEY__TYPE =>
-  isset( $jsonpFunctionName )  && $jsonpFunctionName != "" ?
-  HDP_MON_RESPONSE_OPTION_VALUE__TYPE_JAVASCRIPT :
-  HDP_MON_RESPONSE_OPTION_VALUE__TYPE_JSON ) );
-
-  if( isset( $jsonpFunctionName ) )
-  {
-    echo "$jsonpFunctionName( $response_data );";
-  }
-  else
-  {
-    echo $response_data;
-  }
-}
-
-  /* alert_type { ok, non-ok, warning, critical, all } */
-  define ("all", "-2");
-  define ("nok", "-1");
-  define ("ok", "0");
-  define ("warn", "1");
-  define ("critical", "2");
-
-  define ("HDFS_SERVICE_CHECK", "NAMENODE::NameNode process down");
-  define ("MAPREDUCE_SERVICE_CHECK", "JOBTRACKER::JobTracker process down");
-  define ("HBASE_SERVICE_CHECK", "HBASEMASTER::HBaseMaster process down");
-  define ("ZOOKEEPER_SERVICE_CHECK", "ZOOKEEPER::Percent ZooKeeper Servers down");
-  define ("HIVE_SERVICE_CHECK", "HIVE-METASTORE::Hive Metastore status check");
-  define ("OOZIE_SERVICE_CHECK", "OOZIE::Oozie Server status check");
-  define ("WEBHCAT_SERVICE_CHECK", "WEBHCAT::WebHCat Server status check");
-  define ("PUPPET_SERVICE_CHECK", "PUPPET::Puppet agent down");
-
-  // on SUSE, some versions of Nagios stored data in /var/lib
-  $status_file = "/var/nagios/status.dat";
-  if (!file_exists($status_file) && file_exists("/etc/SuSE-release")) {
-    $status_file = "/var/lib/nagios/status.dat";
-  }
-  
-  $q1="";
-  if (array_key_exists('q1', $_GET)) {
-    $q1=$_GET["q1"];
-  }
-  $q2="";
-  if (array_key_exists('q2', $_GET)) {
-    $q2=$_GET["q2"];
-  }
-  $alert_type="";
-  if (array_key_exists('alert_type', $_GET)) {
-    $alert_type=$_GET["alert_type"];
-  }
-  $host="";
-  if (array_key_exists('host_name', $_GET)) {
-    $host=$_GET["host_name"];
-  }
-  $indent="";
-  if (array_key_exists('indent', $_GET)) {
-    $indent=$_GET["indent"];
-  }
-
-  $result = array();
-  $status_file_content = file_get_contents($status_file);
-
-  if ($q1 == "alerts") {
-    /* Add the service status object to result array */
-    $result['alerts'] = query_alerts ($status_file_content, $alert_type, $host);
-  }
-
-  if ($q2 == "hosts") {
-    /* Add the service status object to result array */
-    $result['hosts'] = query_hosts ($status_file_content, $alert_type, $host);
-  }
-
-  /* Add host count object to the results */
-  $result['hostcounts'] = query_host_count ($status_file_content);
-
-  /* Add services runtime states */
-  $result['servicestates'] = query_service_states ($status_file_content);
-
-  /* Return results */
-  if ($indent == "true") {
-    hdp_mon_generate_response(indent(json_encode($result)));
-  } else {
-    hdp_mon_generate_response(json_encode($result));
-  }
-
-  # Functions
-  /* Query service states */
-  function query_service_states ($status_file_content) {
-    $num_matches = preg_match_all("/servicestatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    $services_object = array ();
-    $services_object["PUPPET"] = 0;
-    foreach ($matches[0] as $object) {
-
-      if (getParameter($object, "service_description") == HDFS_SERVICE_CHECK) {
-        $services_object["HDFS"] = getParameter($object, "last_hard_state");
-        if ($services_object["HDFS"] >= 1) {
-          $services_object["HDFS"] = 1;
-        }
-        continue;
-      }
-      if (getParameter($object, "service_description") == MAPREDUCE_SERVICE_CHECK) {
-        $services_object["MAPREDUCE"] = getParameter($object, "last_hard_state");
-        if ($services_object["MAPREDUCE"] >= 1) {
-          $services_object["MAPREDUCE"] = 1;
-        }
-        continue;
-      }
-      if (getParameter($object, "service_description") == HBASE_SERVICE_CHECK) {
-        $services_object["HBASE"] = getParameter($object, "last_hard_state");
-        if ($services_object["HBASE"] >= 1) {
-          $services_object["HBASE"] = 1;
-        }
-        continue;
-      }
-      if (getParameter($object, "service_description") == HIVE_SERVICE_CHECK) {
-        $services_object["HIVE"] = getParameter($object, "last_hard_state");
-        if ($services_object["HIVE"] >= 1) {
-          $services_object["HIVE"] = 1;
-        }
-        continue;
-      }
-      if (getParameter($object, "service_description") == OOZIE_SERVICE_CHECK) {
-        $services_object["OOZIE"] = getParameter($object, "last_hard_state");
-        if ($services_object["OOZIE"] >= 1) {
-          $services_object["OOZIE"] = 1;
-        }
-        continue;
-      }
-      if (getParameter($object, "service_description") == WEBHCAT_SERVICE_CHECK) {
-          $services_object["HIVE"] = getParameter($object, "last_hard_state");
-        if ($services_object["HIVE"] >= 1) {
-            $services_object["HIVE"] = 1;
-        }
-        continue;
-      }
-      /* In case of zookeeper, service is treated running if alert is ok or warning (i.e partial
-       * instances of zookeepers are running
-       */
-      if (getParameter($object, "service_description") == ZOOKEEPER_SERVICE_CHECK) {
-        $services_object["ZOOKEEPER"] = getParameter($object, "last_hard_state");
-        if ($services_object["ZOOKEEPER"] <= 1) {
-          $services_object["ZOOKEEPER"] = 0;
-        }
-        continue;
-      }
-      if (getParameter($object, "service_description") == PUPPET_SERVICE_CHECK) {
-        $state = getParameter($object, "last_hard_state");
-        if ($state >= 1) {
-          $services_object["PUPPET"]++;
-        }
-        continue;
-      }
-    }
-    if ($services_object["PUPPET"] >= 1) {
-      $services_object["PUPPET"] = 1;
-    }
-    $services_object = array_map('strval', $services_object);
-    return $services_object;
-  }
-
-  /* Query host count */
-  function query_host_count ($status_file_content) {
-    $num_matches = preg_match_all("/hoststatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    $hostcounts_object = array ();
-    $up_hosts = 0;
-    $down_hosts = 0;
-
-    foreach ($matches[0] as $object) {
-      if (getParameter($object, "last_hard_state") != ok) {
-        $down_hosts++;
-      } else {
-        $up_hosts++;
-      }
-    }
-    $hostcounts_object['up_hosts'] = $up_hosts;
-    $hostcounts_object['down_hosts'] = $down_hosts;
-    $hostcounts_object = array_map('strval', $hostcounts_object);
-    return $hostcounts_object;
-  }
-
-  /* Query Hosts */
-  function query_hosts ($status_file_content, $alert_type, $host) {
-    $hoststatus_attributes = array ("host_name", "current_state", "last_hard_state",
-                              "plugin_output", "last_check", "current_attempt",
-                              "last_hard_state_change", "last_time_up", "last_time_down",
-                              "last_time_unreachable", "is_flapping", "last_check");
-
-    $num_matches = preg_match_all("/hoststatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    $hosts_objects = array ();
-    $i = 0;
-    foreach ($matches[0] as $object) {
-      $hoststatus = array ();
-      $chost = getParameter($object, "host_name");
-      if (empty($host) || $chost == $host) {
-        foreach ($hoststatus_attributes as $attrib) {
-          $hoststatus[$attrib] = htmlentities(getParameter($object, $attrib), ENT_COMPAT);
-        }
-        $hoststatus['alerts'] = query_alerts ($status_file_content, $alert_type, $chost);
-        if (!empty($host)) {
-          $hosts_objects[$i] = $hoststatus;
-          $i++;
-          break;
-        }
-      }
-      if (!empty($hoststatus)) {
-        $hosts_objects[$i] = $hoststatus;
-        $i++;
-      }
-    }
-    /* echo "COUNT : " . count ($services_objects) . "\n"; */
-    return $hosts_objects;
-  }
-
-  /* Query Alerts */
-  function query_alerts ($status_file_content, $alert_type, $host) {
-
-    $servicestatus_attributes = array ("service_description", "host_name", "current_attempt",
-                                       "current_state", "plugin_output", "last_hard_state_change", "last_hard_state",
-                                       "last_time_ok", "last_time_warning", "last_time_unknown",
-                                       "last_time_critical", "is_flapping", "last_check",
-                                       "long_plugin_output");
-
-    $num_matches = preg_match_all("/servicestatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    #echo $matches[0][0] . ", " . $matches[0][1] . "\n";
-    #echo $matches[1][0] . ", " . $matches[1][1] . "\n";
-    $services_objects = array ();
-    $i = 0;
-    foreach ($matches[1] as $object) {      
-      $servicestatus = getParameterMap($object, $servicestatus_attributes);
-      switch ($alert_type) {
-      case "all":
-        if (empty($host) || $servicestatus['host_name'] == $host) {
-          $servicestatus['service_type'] = get_service_type($servicestatus['service_description']);
-          $srv_desc = explode ("::",$servicestatus['service_description'],2);
-
-          $servicestatus['service_description'] = $srv_desc[1];
-        }
-        break;
-      case "nok":
-        if (getParameterMapValue($map, "last_hard_state") != ok &&
-           (empty($host) || getParameterMapValue($map, "host_name") == $host)) {
-          foreach ($servicestatus_attributes as $attrib) {
-            $servicestatus[$attrib] = htmlentities(getParameterMapValue($map, $attrib), ENT_COMPAT);
-          }
-          $servicestatus['service_type'] = get_service_type($servicestatus['service_description']);
-          $srv_desc = explode ("::",$servicestatus['service_description'],2);
-          $servicestatus['service_description'] = $srv_desc[1];
-        }
-        break;
-      case "ok":
-        if (getParameterMapValue($map, "last_hard_state") == ok &&
-           (empty($host) || getParameterMapValue($map, "host_name") == $host)) {
-          foreach ($servicestatus_attributes as $attrib) {
-            $servicestatus[$attrib] = htmlentities(getParameterMapValue($map, $attrib), ENT_COMPAT);
-          }
-          $servicestatus['service_type'] = get_service_type($servicestatus['service_description']);
-          $srv_desc = explode ("::",$servicestatus['service_description'],2);
-          $servicestatus['service_description'] = $srv_desc[1];
-        }
-        break;
-      case "warn":
-        if (getParameterMapValue($map, "last_hard_state") == warn &&
-           (empty($host) || getParameterMapValue($map, "host_name") == $host)) {
-          foreach ($servicestatus_attributes as $attrib) {
-            $servicestatus[$attrib] = htmlentities(getParameterMapValue($map, $attrib), ENT_COMPAT);
-          }
-          $servicestatus['service_type'] = get_service_type($servicestatus['service_description']);
-          $srv_desc = explode ("::",$servicestatus['service_description'],2);
-          $servicestatus['service_description'] = $srv_desc[1];
-        }
-        break;
-      case "critical":
-        if (getParameterMapValue($map, "last_hard_state") == critical &&
-           (empty($host) || getParameterMapValue($map, "host_name") == $host)) {
-          foreach ($servicestatus_attributes as $attrib) {
-            $servicestatus[$attrib] = htmlentities(getParameterMapValue($map, $attrib), ENT_COMPAT);
-          }
-          $servicestatus['service_type'] = get_service_type($servicestatus['service_description']);
-          $srv_desc = explode ("::",$servicestatus['service_description'],2);
-          $servicestatus['service_description'] = $srv_desc[1];
-        }
-        break;
-      }
-      
-      if (!empty($servicestatus)) {
-        $services_objects[$i] = $servicestatus;
-        $i++;
-      }
-    }
-
-    // echo "COUNT : " . count ($services_objects) . "\n";
-    return $services_objects;
-  }
-
-  function get_service_type($service_description)
-  {
-    $pieces = explode("::", $service_description);
-    switch ($pieces[0]) {
-	  case "DATANODE":
-      case "NAMENODE":
-      case "JOURNALNODE":
-        $pieces[0] = "HDFS";
-        break;
-      case "JOBTRACKER":
-	  case "TASKTRACKER":
-        $pieces[0] = "MAPREDUCE";
-        break;
-      case "HBASEMASTER":
-      case "REGIONSERVER":
-        $pieces[0] = "HBASE";
-        break;
-      case "HIVE-METASTORE":
-      case "HIVE-SERVER":
-      case "WEBHCAT":
-        $pieces[0] = "HIVE";
-        break;
-      case "ZKSERVERS":
-	    $pieces[0] = "ZOOKEEPER";
-        break;
-      case "AMBARI":
-	    $pieces[0] = "AMBARI";
-      break;
-      case "FLUME":
-            $pieces[0] = "FLUME";
-      break;      
-      case "JOBHISTORY":
-        $pieces[0] = "MAPREDUCE2";
-        break;
-      case "RESOURCEMANAGER":
-      case "APP_TIMELINE_SERVER":
-      case "NODEMANAGER":
-        $pieces[0] = "YARN";
-        break;
-      case "STORM_UI_SERVER":
-      case "NIMBUS":
-      case "DRPC_SERVER":
-      case "SUPERVISOR":
-      case "STORM_REST_API":
-        $pieces[0] = "STORM";
-        break;
-      case "NAGIOS":
-      case "HDFS":
-      case "MAPREDUCE":
-      case "HBASE":
-      case "ZOOKEEPER":
-      case "OOZIE":
-      case "GANGLIA":
-      case "STORM":
-      case "FALCON":
-      case "PUPPET":
-        break;
-      default:
-        $pieces[0] = "UNKNOWN";
-    }
-    return $pieces[0];
-  }
-
-  function getParameter($object, $key)
-  {
-    $pattern="/\s" . $key . "[\s= ]*([\S, ]*)\n/";
-    $num_mat = preg_match($pattern, $object, $matches);
-    $value = "";
-    if ($num_mat) {
-      $value = $matches[1];
-    }
-    return $value;
-  }
-
-  function getParameterMapValue($map, $key) {
-    $value = $map[$key];
-
-    if (!is_null($value))
-      return "" . $value;
-
-    return "";
-  }
-
-
-  function getParameterMap($object, $keynames) {
-
-    $cnt = preg_match_all('/\t([\S]*)=[\n]?[\t]?([\S= ]*)/', $object, $matches, PREG_PATTERN_ORDER);
-
-    $tmpmap = array_combine($matches[1], $matches[2]);
-
-    $map = array();
-    foreach ($keynames as $key) {
-      $map[$key] = htmlentities($tmpmap[$key], ENT_COMPAT);
-    }
-
-    return $map;
-  }
-  
-function indent($json) {
-
-    $result      = '';
-    $pos         = 0;
-    $strLen      = strlen($json);
-    $indentStr   = '  ';
-    $newLine     = "\n";
-    $prevChar    = '';
-    $outOfQuotes = true;
-
-    for ($i=0; $i<=$strLen; $i++) {
-
-        // Grab the next character in the string.
-        $char = substr($json, $i, 1);
-
-        // Are we inside a quoted string?
-        if ($char == '"' && $prevChar != '\\') {
-            $outOfQuotes = !$outOfQuotes;
-
-        // If this character is the end of an element,
-        // output a new line and indent the next line.
-        } else if(($char == '}' || $char == ']') && $outOfQuotes) {
-            $result .= $newLine;
-            $pos --;
-            for ($j=0; $j<$pos; $j++) {
-                $result .= $indentStr;
-            }
-        }
-
-        // Add the character to the result string.
-        $result .= $char;
-
-        // If the last character was the beginning of an element,
-        // output a new line and indent the next line.
-        if (($char == ',' || $char == '{' || $char == '[') && $outOfQuotes) {
-            $result .= $newLine;
-            if ($char == '{' || $char == '[') {
-                $pos ++;
-            }
-
-            for ($j = 0; $j < $pos; $j++) {
-                $result .= $indentStr;
-            }
-        }
-
-        $prevChar = $char;
-    }
-
-    return $result;
-}
-?>


[06/17] ambari git commit: AMBARI-8276 - Alerts: Remove Nagios Service From The Stack (jonathanhurley)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
index 4d08d6f..6ab35c2 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
@@ -38,9 +38,9 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.google.inject.util.Modules;
+import javax.persistence.EntityManager;
+import javax.xml.bind.JAXBException;
+
 import junit.framework.Assert;
 
 import org.apache.ambari.server.AmbariException;
@@ -70,8 +70,8 @@ import org.apache.ambari.server.state.ServiceInfo;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.StackInfo;
 import org.apache.ambari.server.state.alert.AlertDefinition;
-import org.apache.ambari.server.state.alert.MetricSource;
 import org.apache.ambari.server.state.alert.AlertDefinitionFactory;
+import org.apache.ambari.server.state.alert.MetricSource;
 import org.apache.ambari.server.state.alert.PortSource;
 import org.apache.ambari.server.state.alert.Reporting;
 import org.apache.ambari.server.state.alert.Source;
@@ -86,9 +86,9 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.inject.AbstractModule;
-
-import javax.persistence.EntityManager;
-import javax.xml.bind.JAXBException;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import com.google.inject.util.Modules;
 
 public class AmbariMetaInfoTest {
 
@@ -973,21 +973,6 @@ public class AmbariMetaInfoTest {
   }
 
   @Test
-  public void testNagios134Dependencies() throws Exception {
-    ServiceInfo service = metaInfo.getService(STACK_NAME_HDP, "1.3.4", "NAGIOS");
-    List<ComponentInfo> componentList = service.getComponents();
-    Assert.assertEquals(1, componentList.size());
-    ComponentInfo component = componentList.get(0);
-    Assert.assertEquals("NAGIOS_SERVER", component.getName());
-    // dependencies
-    Assert.assertEquals(0, component.getDependencies().size());
-    // component auto deploy
-    Assert.assertNull(component.getAutoDeploy());
-    // cardinality
-    Assert.assertEquals("1", component.getCardinality());
-  }
-
-  @Test
   public void testOozie134Dependencies() throws Exception {
     ServiceInfo service = metaInfo.getService(STACK_NAME_HDP, "1.3.4", "OOZIE");
     List<ComponentInfo> componentList = service.getComponents();

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/java/org/apache/ambari/server/configuration/ComponentSSLConfigurationTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/configuration/ComponentSSLConfigurationTest.java b/ambari-server/src/test/java/org/apache/ambari/server/configuration/ComponentSSLConfigurationTest.java
index d319fab..58bc47e 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/configuration/ComponentSSLConfigurationTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/configuration/ComponentSSLConfigurationTest.java
@@ -18,23 +18,23 @@
 
 package org.apache.ambari.server.configuration;
 
+import java.util.Properties;
+
 import org.junit.Assert;
 import org.junit.Test;
 
-import java.util.Properties;
-
 /**
  * ComponentSSLConfiguration tests.
  */
 public class ComponentSSLConfigurationTest {
 
-  public static ComponentSSLConfiguration getConfiguration(String path, String pass, String type, boolean gangliaSSL, boolean nagiosSSL) {
+  public static ComponentSSLConfiguration getConfiguration(String path,
+      String pass, String type, boolean gangliaSSL) {
     Properties ambariProperties = new Properties();
     ambariProperties.setProperty(Configuration.SSL_TRUSTSTORE_PATH_KEY, path);
     ambariProperties.setProperty(Configuration.SSL_TRUSTSTORE_PASSWORD_KEY, pass);
     ambariProperties.setProperty(Configuration.SSL_TRUSTSTORE_TYPE_KEY, type);
     ambariProperties.setProperty(Configuration.GANGLIA_HTTPS_KEY, Boolean.toString(gangliaSSL));
-    ambariProperties.setProperty(Configuration.NAGIOS_HTTPS_KEY, Boolean.toString(nagiosSSL));
 
     Configuration configuration =  new TestConfiguration(ambariProperties);
 
@@ -47,34 +47,32 @@ public class ComponentSSLConfigurationTest {
 
   @Test
   public void testGetTruststorePath() throws Exception {
-    ComponentSSLConfiguration sslConfiguration = getConfiguration("tspath", "tspass", "tstype", true, false);
+    ComponentSSLConfiguration sslConfiguration = getConfiguration("tspath",
+        "tspass", "tstype", true);
     Assert.assertEquals("tspath", sslConfiguration.getTruststorePath());
   }
 
   @Test
   public void testGetTruststorePassword() throws Exception {
-    ComponentSSLConfiguration sslConfiguration = getConfiguration("tspath", "tspass", "tstype", true, false);
+    ComponentSSLConfiguration sslConfiguration = getConfiguration("tspath",
+        "tspass", "tstype", true);
     Assert.assertEquals("tspass", sslConfiguration.getTruststorePassword());
   }
 
   @Test
   public void testGetTruststoreType() throws Exception {
-    ComponentSSLConfiguration sslConfiguration = getConfiguration("tspath", "tspass", "tstype", true, false);
+    ComponentSSLConfiguration sslConfiguration = getConfiguration("tspath",
+        "tspass", "tstype", true);
     Assert.assertEquals("tstype", sslConfiguration.getTruststoreType());
   }
 
   @Test
   public void testIsGangliaSSL() throws Exception {
-    ComponentSSLConfiguration sslConfiguration = getConfiguration("tspath", "tspass", "tstype", true, false);
+    ComponentSSLConfiguration sslConfiguration = getConfiguration("tspath",
+        "tspass", "tstype", true);
     Assert.assertTrue(sslConfiguration.isGangliaSSL());
   }
 
-  @Test
-  public void testIsNagiosSSL() throws Exception {
-    ComponentSSLConfiguration sslConfiguration = getConfiguration("tspath", "tspass", "tstype", true, false);
-    Assert.assertFalse(sslConfiguration.isNagiosSSL());
-  }
-
   private static class TestConfiguration extends Configuration {
 
     private TestConfiguration(Properties properties) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index 2a0ebc5..e1e25e0 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@ -153,7 +153,7 @@ public class AmbariManagementControllerTest {
   private static final String REPO_ID = "HDP-1.1.1.16";
   private static final String PROPERTY_NAME = "hbase.regionserver.msginterval";
   private static final String SERVICE_NAME = "HDFS";
-  private static final String NAGIOS_SERVICE_NAME = "NAGIOS";
+  private static final String FAKE_SERVICE_NAME = "FAKENAGIOS";
   private static final int STACK_VERSIONS_CNT = 12;
   private static final int REPOS_CNT = 3;
   private static final int STACKS_CNT = 3;
@@ -4071,102 +4071,6 @@ public class AmbariManagementControllerTest {
     Assert.assertNull(hrc.getExecutionCommandWrapper().getExecutionCommand().getPassiveInfo());
   }
 
-  @Test
-  public void testPassiveSentWithNagiosRestart() throws AmbariException {
-    setupClusterWithHosts("c1", "HDP-2.0.7", Arrays.asList("h1"), "centos5");
-
-    Cluster cluster = clusters.getCluster("c1");
-    cluster.setDesiredStackVersion(new StackId("HDP-2.0.7"));
-    cluster.setCurrentStackVersion(new StackId("HDP-2.0.7"));
-
-    Service hdfs = cluster.addService("HDFS");
-    hdfs.persist();
-    hdfs.addServiceComponent(Role.HDFS_CLIENT.name()).persist();
-    hdfs.addServiceComponent(Role.NAMENODE.name()).persist();
-    hdfs.addServiceComponent(Role.DATANODE.name()).persist();
-
-    hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost("h1").persist();
-    hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost("h1").persist();
-    hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost("h1").persist();
-
-
-
-    Service nagios = cluster.addService("NAGIOS");
-    nagios.persist();
-    nagios.addServiceComponent(Role.NAGIOS_SERVER.name()).persist();
-    nagios.getServiceComponent(Role.NAGIOS_SERVER.name()).addServiceComponentHost("h1").persist();
-
-    installService("c1", "HDFS", false, false);
-    installService("c1", "NAGIOS", false, false);
-
-    startService("c1", "HDFS", false, false);
-    startService("c1", "NAGIOS", false, false);
-
-    // set this after starting - setting it before will skip it due to rules
-    // around bulk starts
-    hdfs.getServiceComponent(Role.DATANODE.name()).getServiceComponentHost(
-        "h1").setMaintenanceState(MaintenanceState.ON);
-
-    Cluster c = clusters.getCluster("c1");
-    Service s = c.getService("HDFS");
-
-    Assert.assertEquals(State.STARTED, s.getDesiredState());
-    for (ServiceComponent sc : s.getServiceComponents().values()) {
-      for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) {
-        if (sc.isClientComponent()) {
-          Assert.assertEquals(State.INSTALLED, sch.getDesiredState());
-        } else {
-          Assert.assertEquals(State.STARTED, sch.getDesiredState());
-        }
-      }
-    }
-
-    Map<String, String> params = new HashMap<String, String>() {{
-      put("test", "test");
-    }};
-    RequestResourceFilter resourceFilter = new RequestResourceFilter(
-      "NAGIOS",
-      "NAGIOS_SERVER",
-      new ArrayList<String>() {{ add("h1"); }});
-    ExecuteActionRequest actionRequest = new ExecuteActionRequest("c1",
-      "RESTART", params, false);
-    actionRequest.getResourceFilters().add(resourceFilter);
-
-    Map<String, String> requestProperties = new HashMap<String, String>();
-    requestProperties.put(REQUEST_CONTEXT_PROPERTY, "Called from a test");
-
-    RequestStatusResponse response = controller.createAction(actionRequest, requestProperties);
-
-    List<Stage> stages = actionDB.getAllStages(response.getRequestId());
-    Assert.assertNotNull(stages);
-
-    HostRoleCommand hrc = null;
-    for (Stage stage : stages) {
-      for (HostRoleCommand cmd : stage.getOrderedHostRoleCommands()) {
-        if (cmd.getRole().equals(Role.NAGIOS_SERVER)) {
-          hrc = cmd;
-        }
-      }
-    }
-    Assert.assertNotNull(hrc);
-    Assert.assertEquals("RESTART NAGIOS/NAGIOS_SERVER", hrc.getCommandDetail());
-
-
-    Set<Map<String, String>> pi =
-        hrc.getExecutionCommandWrapper().getExecutionCommand().getPassiveInfo();
-
-    Assert.assertNotNull(pi);
-    Assert.assertTrue(pi.size() > 0);
-    Map<String, String> map = pi.iterator().next();
-    Assert.assertTrue(map.containsKey("host"));
-    Assert.assertTrue(map.containsKey("service"));
-    Assert.assertTrue(map.containsKey("component"));
-    Assert.assertEquals("h1", map.get("host"));
-    Assert.assertEquals("HDFS", map.get("service"));
-    Assert.assertEquals("DATANODE", map.get("component"));
-  }
-
-
   @SuppressWarnings("serial")
   @Test
   public void testCreateActionsFailures() throws Exception {
@@ -7090,7 +6994,7 @@ public class AmbariManagementControllerTest {
   public void testGetStackServices() throws Exception {
     StackServiceRequest request = new StackServiceRequest(STACK_NAME, NEW_STACK_VERSION, null);
     Set<StackServiceResponse> responses = controller.getStackServices(Collections.singleton(request));
-    Assert.assertEquals(12, responses.size());
+    Assert.assertEquals(11, responses.size());
 
 
     StackServiceRequest requestWithParams = new StackServiceRequest(STACK_NAME, NEW_STACK_VERSION, SERVICE_NAME);
@@ -7196,8 +7100,7 @@ public class AmbariManagementControllerTest {
   @Test
   public void testStackServiceCheckSupported() throws Exception {
     StackServiceRequest hdfsServiceRequest = new StackServiceRequest(
-        STACK_NAME,
-        NEW_STACK_VERSION, SERVICE_NAME);
+        STACK_NAME, "2.0.8", SERVICE_NAME);
 
     Set<StackServiceResponse> responses = controller.getStackServices(Collections.singleton(hdfsServiceRequest));
     Assert.assertEquals(1, responses.size());
@@ -7205,10 +7108,10 @@ public class AmbariManagementControllerTest {
     StackServiceResponse response = responses.iterator().next();
     assertTrue(response.isServiceCheckSupported());
 
-    StackServiceRequest nagiosServiceRequest = new StackServiceRequest(
-        STACK_NAME, NEW_STACK_VERSION, NAGIOS_SERVICE_NAME);
+    StackServiceRequest fakeServiceRequest = new StackServiceRequest(
+        STACK_NAME, "2.0.8", FAKE_SERVICE_NAME);
 
-    responses = controller.getStackServices(Collections.singleton(nagiosServiceRequest));
+    responses = controller.getStackServices(Collections.singleton(fakeServiceRequest));
     Assert.assertEquals(1, responses.size());
 
     response = responses.iterator().next();
@@ -9601,24 +9504,25 @@ public class AmbariManagementControllerTest {
     String clusterName = "c1";
     createCluster(clusterName);
     Cluster cluster = clusters.getCluster(clusterName);
-    StackId stackId = new StackId("HDP-2.0.5");
+    StackId stackId = new StackId("HDP-2.0.8");
     cluster.setDesiredStackVersion(stackId);
     cluster.setCurrentStackVersion(stackId);
 
     String hdfsService = "HDFS";
-    String nagiosService = "NAGIOS";
+    String fakeMonitoringService = "FAKENAGIOS";
     createService(clusterName, hdfsService, null);
-    createService(clusterName, nagiosService, null);
+    createService(clusterName, fakeMonitoringService, null);
 
     String namenode = "NAMENODE";
     String datanode = "DATANODE";
     String hdfsClient = "HDFS_CLIENT";
-    String nagiosServer = "NAGIOS_SERVER";
+    String fakeServer = "FAKE_MONITORING_SERVER";
+
     createServiceComponent(clusterName, hdfsService, namenode,
       State.INIT);
     createServiceComponent(clusterName, hdfsService, datanode,
       State.INIT);
-    createServiceComponent(clusterName, nagiosService, nagiosServer,
+    createServiceComponent(clusterName, fakeMonitoringService, fakeServer,
       State.INIT);
 
     String host1 = "h1";
@@ -9626,13 +9530,13 @@ public class AmbariManagementControllerTest {
     addHost(host1, clusterName);
     createServiceComponentHost(clusterName, hdfsService, namenode, host1, null);
     createServiceComponentHost(clusterName, hdfsService, datanode, host1, null);
-    createServiceComponentHost(clusterName, nagiosService, nagiosServer, host1,
+    createServiceComponentHost(clusterName, fakeMonitoringService, fakeServer, host1,
       null);
 
 
     ServiceComponentHost nagiosSch = null;
     for (ServiceComponentHost sch : cluster.getServiceComponentHosts(host1)) {
-      if (sch.getServiceComponentName().equals(nagiosServer)) {
+      if (sch.getServiceComponentName().equals(fakeServer)) {
         nagiosSch = sch;
       }
     }
@@ -9640,6 +9544,7 @@ public class AmbariManagementControllerTest {
 
     createServiceComponent(clusterName, hdfsService, hdfsClient,
       State.INIT);
+
     createServiceComponentHost(clusterName, hdfsService, hdfsClient, host1, null);
 
     assertTrue(nagiosSch.isRestartRequired());
@@ -9703,25 +9608,19 @@ public class AmbariManagementControllerTest {
   public void testMaintenanceState() throws Exception {
     String clusterName = "c1";
     createCluster(clusterName);
-    clusters.getCluster(clusterName)
-        .setDesiredStackVersion(new StackId("HDP-1.2.0"));
+    clusters.getCluster(clusterName).setDesiredStackVersion(
+        new StackId("HDP-1.2.0"));
+
     String serviceName = "HDFS";
-    String nagiosService = "NAGIOS";
     createService(clusterName, serviceName, null);
-    createService(clusterName, nagiosService, null);
 
     String componentName1 = "NAMENODE";
     String componentName2 = "DATANODE";
     String componentName3 = "HDFS_CLIENT";
-    String componentName4 = "NAGIOS_SERVER";
-    createServiceComponent(clusterName, serviceName, componentName1,
-        State.INIT);
-    createServiceComponent(clusterName, serviceName, componentName2,
-        State.INIT);
-    createServiceComponent(clusterName, serviceName, componentName3,
-        State.INIT);
-    createServiceComponent(clusterName, nagiosService, componentName4,
-        State.INIT);
+
+    createServiceComponent(clusterName, serviceName, componentName1, State.INIT);
+    createServiceComponent(clusterName, serviceName, componentName2, State.INIT);
+    createServiceComponent(clusterName, serviceName, componentName3, State.INIT);
 
     String host1 = "h1";
     String host2 = "h2";
@@ -9729,10 +9628,12 @@ public class AmbariManagementControllerTest {
     addHost(host1, clusterName);
     addHost(host2, clusterName);
 
-    createServiceComponentHost(clusterName, serviceName, componentName1, host1, null);
-    createServiceComponentHost(clusterName, serviceName, componentName2, host1, null);
-    createServiceComponentHost(clusterName, serviceName, componentName2, host2, null);
-    createServiceComponentHost(clusterName, nagiosService, componentName4, host1, null);
+    createServiceComponentHost(clusterName, serviceName, componentName1, host1,
+        null);
+    createServiceComponentHost(clusterName, serviceName, componentName2, host1,
+        null);
+    createServiceComponentHost(clusterName, serviceName, componentName2, host2,
+        null);
 
     Map<String, String> requestProperties = new HashMap<String, String>();
     requestProperties.put("context", "Called from a test");
@@ -9741,14 +9642,14 @@ public class AmbariManagementControllerTest {
     Service service = cluster.getService(serviceName);
     Map<String, Host> hosts = clusters.getHostsForCluster(clusterName);
 
-    MaintenanceStateHelper maintenanceStateHelper =
-            MaintenanceStateHelperTest.getMaintenanceStateHelperInstance(clusters);
+    MaintenanceStateHelper maintenanceStateHelper = MaintenanceStateHelperTest.getMaintenanceStateHelperInstance(clusters);
 
     // test updating a service
     ServiceRequest sr = new ServiceRequest(clusterName, serviceName, null);
     sr.setMaintenanceState(MaintenanceState.ON.name());
-    ServiceResourceProviderTest.updateServices(controller, Collections.singleton(sr),
-        requestProperties, false, false, maintenanceStateHelper);
+    ServiceResourceProviderTest.updateServices(controller,
+        Collections.singleton(sr), requestProperties, false, false,
+        maintenanceStateHelper);
     Assert.assertEquals(MaintenanceState.ON, service.getMaintenanceState());
 
     // check the host components implied state vs desired state
@@ -9762,8 +9663,9 @@ public class AmbariManagementControllerTest {
 
     // reset
     sr.setMaintenanceState(MaintenanceState.OFF.name());
-    ServiceResourceProviderTest.updateServices(controller, Collections.singleton(sr),
-        requestProperties, false, false, maintenanceStateHelper);
+    ServiceResourceProviderTest.updateServices(controller,
+        Collections.singleton(sr), requestProperties, false, false,
+        maintenanceStateHelper);
     Assert.assertEquals(MaintenanceState.OFF, service.getMaintenanceState());
 
     // check the host components implied state vs desired state
@@ -9782,9 +9684,11 @@ public class AmbariManagementControllerTest {
         new HashMap<String, String>());
 
     Host host = hosts.get(host1);
-    Assert.assertEquals(MaintenanceState.ON, host.getMaintenanceState(cluster.getClusterId()));
+    Assert.assertEquals(MaintenanceState.ON,
+        host.getMaintenanceState(cluster.getClusterId()));
 
-    // check the host components implied state vs desired state, only for affected hosts
+    // check the host components implied state vs desired state, only for
+    // affected hosts
     for (ServiceComponent sc : service.getServiceComponents().values()) {
       for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) {
         MaintenanceState implied = controller.getEffectiveMaintenanceState(sch);
@@ -9803,7 +9707,8 @@ public class AmbariManagementControllerTest {
         new HashMap<String, String>());
 
     host = hosts.get(host1);
-    Assert.assertEquals(MaintenanceState.OFF, host.getMaintenanceState(cluster.getClusterId()));
+    Assert.assertEquals(MaintenanceState.OFF,
+        host.getMaintenanceState(cluster.getClusterId()));
 
     // check the host components active state vs desired state
     for (ServiceComponent sc : service.getServiceComponents().values()) {
@@ -9822,12 +9727,15 @@ public class AmbariManagementControllerTest {
     Set<HostRequest> set = new HashSet<HostRequest>();
     set.add(hr1);
     set.add(hr2);
-    HostResourceProviderTest.updateHosts(controller, set, new HashMap<String, String>());
+    HostResourceProviderTest.updateHosts(controller, set,
+        new HashMap<String, String>());
 
     host = hosts.get(host1);
-    Assert.assertEquals(MaintenanceState.ON, host.getMaintenanceState(cluster.getClusterId()));
+    Assert.assertEquals(MaintenanceState.ON,
+        host.getMaintenanceState(cluster.getClusterId()));
     host = hosts.get(host2);
-    Assert.assertEquals(MaintenanceState.ON, host.getMaintenanceState(cluster.getClusterId()));
+    Assert.assertEquals(MaintenanceState.ON,
+        host.getMaintenanceState(cluster.getClusterId()));
 
     // reset
     hr1 = new HostRequest(host1, clusterName, requestProperties);
@@ -9838,43 +9746,49 @@ public class AmbariManagementControllerTest {
     set.add(hr1);
     set.add(hr2);
 
-    HostResourceProviderTest.updateHosts(controller, set, new HashMap<String, String>());
+    HostResourceProviderTest.updateHosts(controller, set,
+        new HashMap<String, String>());
     host = hosts.get(host1);
-    Assert.assertEquals(MaintenanceState.OFF, host.getMaintenanceState(cluster.getClusterId()));
+    Assert.assertEquals(MaintenanceState.OFF,
+        host.getMaintenanceState(cluster.getClusterId()));
     host = hosts.get(host2);
-    Assert.assertEquals(MaintenanceState.OFF, host.getMaintenanceState(cluster.getClusterId()));
-
+    Assert.assertEquals(MaintenanceState.OFF,
+        host.getMaintenanceState(cluster.getClusterId()));
 
     // only do one SCH
-    ServiceComponentHost targetSch = service.getServiceComponent(
-        componentName2).getServiceComponentHosts().get(host2);
+    ServiceComponentHost targetSch = service.getServiceComponent(componentName2).getServiceComponentHosts().get(
+        host2);
     Assert.assertNotNull(targetSch);
     targetSch.setMaintenanceState(MaintenanceState.ON);
 
     // check the host components active state vs desired state
-    Assert.assertEquals(MaintenanceState.ON, controller.getEffectiveMaintenanceState(targetSch));
+    Assert.assertEquals(MaintenanceState.ON,
+        controller.getEffectiveMaintenanceState(targetSch));
 
     // update the service
     service.setMaintenanceState(MaintenanceState.ON);
-    Assert.assertEquals(MaintenanceState.ON, controller.getEffectiveMaintenanceState(targetSch));
+    Assert.assertEquals(MaintenanceState.ON,
+        controller.getEffectiveMaintenanceState(targetSch));
 
     // make SCH active
     targetSch.setMaintenanceState(MaintenanceState.OFF);
     Assert.assertEquals(MaintenanceState.IMPLIED_FROM_SERVICE,
-      controller.getEffectiveMaintenanceState(targetSch));
+        controller.getEffectiveMaintenanceState(targetSch));
 
     // update the service
     service.setMaintenanceState(MaintenanceState.OFF);
-    Assert.assertEquals(MaintenanceState.OFF, controller.getEffectiveMaintenanceState(targetSch));
+    Assert.assertEquals(MaintenanceState.OFF,
+        controller.getEffectiveMaintenanceState(targetSch));
 
     host = hosts.get(host2);
     // update host
     host.setMaintenanceState(cluster.getClusterId(), MaintenanceState.ON);
     Assert.assertEquals(MaintenanceState.IMPLIED_FROM_HOST,
-      controller.getEffectiveMaintenanceState(targetSch));
+        controller.getEffectiveMaintenanceState(targetSch));
 
     targetSch.setMaintenanceState(MaintenanceState.ON);
-    Assert.assertEquals(MaintenanceState.ON, controller.getEffectiveMaintenanceState(targetSch));
+    Assert.assertEquals(MaintenanceState.ON,
+        controller.getEffectiveMaintenanceState(targetSch));
 
     // check the host components active state vs desired state
     for (ServiceComponent sc : service.getServiceComponents().values()) {
@@ -9883,17 +9797,13 @@ public class AmbariManagementControllerTest {
       }
     }
 
-    long id1 = installService(clusterName, serviceName, false, false, maintenanceStateHelper);
-    long id2 = installService(clusterName, nagiosService, false, false, maintenanceStateHelper);
+    long id1 = installService(clusterName, serviceName, false, false,
+        maintenanceStateHelper);
 
     List<HostRoleCommand> hdfsCmds = actionDB.getRequestTasks(id1);
-    List<HostRoleCommand> nagiosCmds = actionDB.getRequestTasks(id2);
-
     Assert.assertNotNull(hdfsCmds);
-    Assert.assertNotNull(nagiosCmds);
 
     HostRoleCommand datanodeCmd = null;
-    HostRoleCommand nagiosCmd = null;
 
     for (HostRoleCommand cmd : hdfsCmds) {
       if (cmd.getRole().equals(Role.DATANODE)) {
@@ -9901,19 +9811,7 @@ public class AmbariManagementControllerTest {
       }
     }
 
-    for (HostRoleCommand cmd : nagiosCmds) {
-      if (cmd.getRole().equals(Role.NAGIOS_SERVER)) {
-        nagiosCmd = cmd;
-      }
-    }
-
     Assert.assertNotNull(datanodeCmd);
-    Assert.assertNotNull(nagiosCmd);
-    Assert.assertNotNull(nagiosCmd.getExecutionCommandWrapper()
-      .getExecutionCommand().getPassiveInfo());
-    Assert.assertEquals(Integer.valueOf(1),
-      Integer.valueOf(nagiosCmd.getExecutionCommandWrapper()
-        .getExecutionCommand().getPassiveInfo().size()));
 
     // verify passive sch was skipped
     for (ServiceComponent sc : service.getServiceComponents().values()) {
@@ -9922,10 +9820,10 @@ public class AmbariManagementControllerTest {
       }
 
       for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) {
-        Assert.assertEquals(sch == targetSch ? State.INIT : State.INSTALLED, sch.getState());
+        Assert.assertEquals(sch == targetSch ? State.INIT : State.INSTALLED,
+            sch.getState());
       }
     }
-
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/java/org/apache/ambari/server/controller/ganglia/GangliaPropertyProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/ganglia/GangliaPropertyProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/ganglia/GangliaPropertyProviderTest.java
index dcb793e..c9b7bdd 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/ganglia/GangliaPropertyProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/ganglia/GangliaPropertyProviderTest.java
@@ -17,6 +17,19 @@
  */
 package org.apache.ambari.server.controller.ganglia;
 
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.expect;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
 import org.apache.ambari.server.configuration.ComponentSSLConfiguration;
 import org.apache.ambari.server.configuration.ComponentSSLConfigurationTest;
 import org.apache.ambari.server.controller.internal.PropertyInfo;
@@ -38,19 +51,6 @@ import org.junit.runners.Parameterized;
 import org.powermock.api.easymock.PowerMock;
 import org.powermock.core.classloader.annotations.PrepareForTest;
 
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import static org.easymock.EasyMock.anyObject;
-import static org.easymock.EasyMock.expect;
-
 /**
  * Test the Ganglia property provider.
  */
@@ -68,21 +68,21 @@ public class GangliaPropertyProviderTest {
   private static final String CLUSTER_NAME_PROPERTY_ID = PropertyHelper.getPropertyId("HostRoles", "cluster_name");
   private static final String HOST_NAME_PROPERTY_ID = PropertyHelper.getPropertyId("HostRoles", "host_name");
   private static final String COMPONENT_NAME_PROPERTY_ID = PropertyHelper.getPropertyId("HostRoles", "component_name");
-  
 
-  
+
+
   private ComponentSSLConfiguration configuration;
 
   @Parameterized.Parameters
   public static Collection<Object[]> configs() {
-    ComponentSSLConfiguration configuration1 =
-        ComponentSSLConfigurationTest.getConfiguration("tspath", "tspass", "tstype", false, false);
+    ComponentSSLConfiguration configuration1 = ComponentSSLConfigurationTest.getConfiguration(
+        "tspath", "tspass", "tstype", false);
 
-    ComponentSSLConfiguration configuration2 =
-        ComponentSSLConfigurationTest.getConfiguration("tspath", "tspass", "tstype", true, false);
+    ComponentSSLConfiguration configuration2 = ComponentSSLConfigurationTest.getConfiguration(
+        "tspath", "tspass", "tstype", true);
 
-    ComponentSSLConfiguration configuration3 =
-        ComponentSSLConfigurationTest.getConfiguration("tspath", "tspass", "tstype", false, true);
+    ComponentSSLConfiguration configuration3 = ComponentSSLConfigurationTest.getConfiguration(
+        "tspath", "tspass", "tstype", false);
 
     return Arrays.asList(new Object[][]{
         {configuration1},
@@ -159,17 +159,17 @@ public class GangliaPropertyProviderTest {
 
     Assert.assertEquals(1, propertyProvider.populateResources(Collections.singleton(resource), request, null).size());
 
-    
+
     List<String> metricsRegexes = new ArrayList<String>();
-    
+
     metricsRegexes.add("metrics/mapred/shuffleOutput/shuffle_exceptions_caught");
     metricsRegexes.add("metrics/mapred/shuffleOutput/shuffle_failed_outputs");
     metricsRegexes.add("metrics/mapred/shuffleOutput/shuffle_output_bytes");
     metricsRegexes.add("metrics/mapred/shuffleOutput/shuffle_success_outputs");
-    
-    
+
+
     String metricsList = getMetricsRegexes(metricsRegexes, gangliaPropertyIds, "TASKTRACKER");
-    
+
     URIBuilder expectedUri = new URIBuilder();
 
     expectedUri.setScheme((configuration.isGangliaSSL() ? "https" : "http"));
@@ -181,14 +181,14 @@ public class GangliaPropertyProviderTest {
     expectedUri.setParameter("s", "10");
     expectedUri.setParameter("e", "20");
     expectedUri.setParameter("r", "1");
-    
+
 
     URIBuilder actualUri = new URIBuilder(streamProvider.getLastSpec());
 
     Assert.assertEquals(expectedUri.getScheme(), actualUri.getScheme());
     Assert.assertEquals(expectedUri.getHost(), actualUri.getHost());
     Assert.assertEquals(expectedUri.getPath(), actualUri.getPath());
-    
+
     Assert.assertTrue(isUrlParamsEquals(actualUri, expectedUri));
 
     Assert.assertEquals(6, PropertyHelper.getProperties(resource).size());
@@ -207,7 +207,7 @@ public class GangliaPropertyProviderTest {
     Assert.assertNotNull(resource.getPropertyValue(shuffle_output_bytes));
     Assert.assertNotNull(resource.getPropertyValue(shuffle_success_outputs));
   }
-  
+
   @Test
   public void testPopulateResources_checkHostComponent() throws Exception {
     TestStreamProvider streamProvider  = new TestStreamProvider("temporal_ganglia_data.txt");
@@ -233,20 +233,20 @@ public class GangliaPropertyProviderTest {
     Map<String, TemporalInfo> temporalInfoMap = new HashMap<String, TemporalInfo>();
     temporalInfoMap.put(PROPERTY_ID, new TemporalInfoImpl(10L, 20L, 1L));
     Request  request = PropertyHelper.getReadRequest(Collections.singleton(PROPERTY_ID), temporalInfoMap);
-    
+
     expect(hostProvider.getGangliaCollectorHostName(anyObject(String.class))).andReturn("ganglia-host");
     expect(hostProvider.isGangliaCollectorComponentLive(anyObject(String.class))).andReturn(true).once();
     expect(hostProvider.isGangliaCollectorHostLive(anyObject(String.class))).andReturn(true).once();
-    
-    
+
+
     PowerMock.replay(hostProvider);
-    
+
     Set<Resource> populateResources = propertyProvider.populateResources(Collections.singleton(resource), request, null);
-    
+
     PowerMock.verify(hostProvider);
-    
+
     Assert.assertEquals(1, populateResources.size());
-    
+
   }
 
   @Test
@@ -317,7 +317,7 @@ public class GangliaPropertyProviderTest {
     Request  request = PropertyHelper.getReadRequest(Collections.singleton(PROPERTY_ID), temporalInfoMap);
 
     Assert.assertEquals(3, propertyProvider.populateResources(resources, request, null).size());
-    
+
     URIBuilder uriBuilder = new URIBuilder();
 
     uriBuilder.setScheme((configuration.isGangliaSSL() ? "https" : "http"));
@@ -331,7 +331,7 @@ public class GangliaPropertyProviderTest {
     uriBuilder.setParameter("r", "1");
 
     String expected = uriBuilder.toString();
-    
+
     Assert.assertEquals(expected, streamProvider.getLastSpec());
 
     for (Resource res : resources) {
@@ -357,16 +357,17 @@ public class GangliaPropertyProviderTest {
     Set<Resource> resources = new HashSet<Resource>();
 
     StringBuilder hostsList = new StringBuilder();
-    
+
     for (int i = 0; i < 150; ++i) {
       Resource resource = new ResourceImpl(Resource.Type.Host);
       resource.setProperty(HOST_NAME_PROPERTY_ID, "host" + i);
       resources.add(resource);
-      
-      if (hostsList.length() != 0)
+
+      if (hostsList.length() != 0) {
         hostsList.append("," + "host" + i );
-      else
-        hostsList.append("host" + i); 
+      } else {
+        hostsList.append("host" + i);
+      }
     }
 
     // only ask for one property
@@ -376,29 +377,29 @@ public class GangliaPropertyProviderTest {
 
     Assert.assertEquals(150, propertyProvider.populateResources(resources, request, null).size());
 
-    
+
     URIBuilder expectedUri = new URIBuilder();
-    
+
     expectedUri.setScheme((configuration.isGangliaSSL() ? "https" : "http"));
     expectedUri.setHost("domU-12-31-39-0E-34-E1.compute-1.internal");
     expectedUri.setPath("/cgi-bin/rrd.py");
     expectedUri.setParameter("c", "HDPJobTracker,HDPHBaseMaster,HDPKafka,HDPResourceManager,HDPFlumeServer,HDPSlaves,HDPHistoryServer,HDPJournalNode,HDPTaskTracker,HDPHBaseRegionServer,HDPNameNode");
-   
+
     expectedUri.setParameter("h", hostsList.toString());
     expectedUri.setParameter("m", "jvm.metrics.gcCount");
     expectedUri.setParameter("s", "10");
     expectedUri.setParameter("e", "20");
     expectedUri.setParameter("r", "1");
-    
+
     URIBuilder actualUri = new URIBuilder(streamProvider.getLastSpec());
-    
+
     Assert.assertEquals(expectedUri.getScheme(), actualUri.getScheme());
     Assert.assertEquals(expectedUri.getHost(), actualUri.getHost());
     Assert.assertEquals(expectedUri.getPath(), actualUri.getPath());
-    
+
     Assert.assertTrue(isUrlParamsEquals(actualUri, expectedUri));
   }
-  
+
   @Test
   public void testPopulateResources_params() throws Exception {
     TestStreamProvider streamProvider  = new TestStreamProvider("flume_ganglia_data.txt");
@@ -428,13 +429,13 @@ public class GangliaPropertyProviderTest {
     Assert.assertEquals(1, propertyProvider.populateResources(Collections.singleton(resource), request, null).size());
 
     List<String> metricsRegexes = new ArrayList<String>();
-    
+
     metricsRegexes.add(FLUME_CHANNEL_CAPACITY_PROPERTY);
 
     String metricsList = getMetricsRegexes(metricsRegexes, gangliaPropertyIds, "FLUME_HANDLER");
-    
+
     URIBuilder expectedUri = new URIBuilder();
-    
+
     expectedUri.setScheme((configuration.isGangliaSSL() ? "https" : "http"));
     expectedUri.setHost("domU-12-31-39-0E-34-E1.compute-1.internal");
     expectedUri.setPath("/cgi-bin/rrd.py");
@@ -444,15 +445,15 @@ public class GangliaPropertyProviderTest {
     expectedUri.setParameter("s", "10");
     expectedUri.setParameter("e", "20");
     expectedUri.setParameter("r", "1");
-    
+
     URIBuilder actualUri = new URIBuilder(streamProvider.getLastSpec());
 
     Assert.assertEquals(expectedUri.getScheme(), actualUri.getScheme());
     Assert.assertEquals(expectedUri.getHost(), actualUri.getHost());
     Assert.assertEquals(expectedUri.getPath(), actualUri.getPath());
-    
-    Assert.assertTrue(isUrlParamsEquals(actualUri, expectedUri));    
-    
+
+    Assert.assertTrue(isUrlParamsEquals(actualUri, expectedUri));
+
     Assert.assertEquals(3, PropertyHelper.getProperties(resource).size());
     Assert.assertNotNull(resource.getPropertyValue(FLUME_CHANNEL_CAPACITY_PROPERTY));
   }
@@ -471,7 +472,7 @@ public class GangliaPropertyProviderTest {
         CLUSTER_NAME_PROPERTY_ID,
         HOST_NAME_PROPERTY_ID,
         COMPONENT_NAME_PROPERTY_ID);
-    
+
     // flume
     Resource resource = new ResourceImpl(Resource.Type.HostComponent);
 
@@ -490,12 +491,12 @@ public class GangliaPropertyProviderTest {
     Assert.assertEquals(1, propertyProvider.populateResources(Collections.singleton(resource), request, null).size());
 
     List<String> metricsRegexes = new ArrayList<String>();
-    
+
     metricsRegexes.add("metrics/flume");
     metricsRegexes.add("metrics/cpu/cpu_wio");
-    
+
     String metricsList = getMetricsRegexes(metricsRegexes, gangliaPropertyIds, "FLUME_HANDLER");
-    
+
     URIBuilder expectedUri = new URIBuilder();
 
     expectedUri.setScheme((configuration.isGangliaSSL() ? "https" : "http"));
@@ -506,15 +507,15 @@ public class GangliaPropertyProviderTest {
     expectedUri.setParameter("m", metricsList);
     expectedUri.setParameter("e", "now");
     expectedUri.setParameter("pt", "true");
-    
+
     URIBuilder actualUri = new URIBuilder(streamProvider.getLastSpec());
 
     Assert.assertEquals(expectedUri.getScheme(), actualUri.getScheme());
     Assert.assertEquals(expectedUri.getHost(), actualUri.getHost());
     Assert.assertEquals(expectedUri.getPath(), actualUri.getPath());
-    
+
     Assert.assertTrue(isUrlParamsEquals(actualUri, expectedUri));
-       
+
     Assert.assertEquals(22, PropertyHelper.getProperties(resource).size());
     Assert.assertNotNull(resource.getPropertyValue(PROPERTY_ID2));
     Assert.assertNotNull(resource.getPropertyValue(FLUME_CHANNEL_CAPACITY_PROPERTY));
@@ -547,7 +548,7 @@ public class GangliaPropertyProviderTest {
 
     String expected = (configuration.isGangliaSSL() ? "https" : "http") +
         "://domU-12-31-39-0E-34-E1.compute-1.internal/cgi-bin/rrd.py?c=HDPFlumeServer%2CHDPSlaves&h=ip-10-39-113-33.ec2.internal&m=";
-    
+
     Assert.assertTrue(streamProvider.getLastSpec().startsWith(expected));
 
     Assert.assertEquals(33, PropertyHelper.getProperties(resource).size());
@@ -583,11 +584,11 @@ public class GangliaPropertyProviderTest {
     Assert.assertEquals(1, propertyProvider.populateResources(Collections.singleton(resource), request, null).size());
 
     List<String> metricsRegexes = new ArrayList<String>();
-    
+
     metricsRegexes.add("metrics/flume");
-    
+
     String metricsList = getMetricsRegexes(metricsRegexes, gangliaPropertyIds, "FLUME_HANDLER");
-    
+
     URIBuilder expectedUri = new URIBuilder();
 
     expectedUri.setScheme((configuration.isGangliaSSL() ? "https" : "http"));
@@ -599,14 +600,14 @@ public class GangliaPropertyProviderTest {
     expectedUri.setParameter("s", "10");
     expectedUri.setParameter("e", "20");
     expectedUri.setParameter("r", "1");
-    
+
     URIBuilder actualUri = new URIBuilder(streamProvider.getLastSpec());
 
     Assert.assertEquals(expectedUri.getScheme(), actualUri.getScheme());
     Assert.assertEquals(expectedUri.getHost(), actualUri.getHost());
     Assert.assertEquals(expectedUri.getPath(), actualUri.getPath());
-    
-    Assert.assertTrue(isUrlParamsEquals(actualUri, expectedUri));    
+
+    Assert.assertTrue(isUrlParamsEquals(actualUri, expectedUri));
 
     Assert.assertEquals(21, PropertyHelper.getProperties(resource).size());
     Assert.assertNotNull(resource.getPropertyValue(FLUME_CHANNEL_CAPACITY_PROPERTY));
@@ -641,11 +642,11 @@ public class GangliaPropertyProviderTest {
     Assert.assertEquals(1, propertyProvider.populateResources(Collections.singleton(resource), request, null).size());
 
     List<String> metricsRegexes = new ArrayList<String>();
-    
+
     metricsRegexes.add("metrics/flume/");
-    
+
     String metricsList = getMetricsRegexes(metricsRegexes, gangliaPropertyIds, "FLUME_HANDLER");
-    
+
     URIBuilder expectedUri = new URIBuilder();
 
     expectedUri.setScheme((configuration.isGangliaSSL() ? "https" : "http"));
@@ -657,13 +658,13 @@ public class GangliaPropertyProviderTest {
     expectedUri.setParameter("s", "10");
     expectedUri.setParameter("e", "20");
     expectedUri.setParameter("r", "1");
-    
+
     URIBuilder actualUri = new URIBuilder(streamProvider.getLastSpec());
 
     Assert.assertEquals(expectedUri.getScheme(), actualUri.getScheme());
     Assert.assertEquals(expectedUri.getHost(), actualUri.getHost());
     Assert.assertEquals(expectedUri.getPath(), actualUri.getPath());
-    
+
     Assert.assertTrue(isUrlParamsEquals(actualUri, expectedUri));
 
     Assert.assertEquals(21, PropertyHelper.getProperties(resource).size());
@@ -697,14 +698,14 @@ public class GangliaPropertyProviderTest {
     Request  request = PropertyHelper.getReadRequest(Collections.singleton(FLUME_CATEGORY3), temporalInfoMap);
 
     Assert.assertEquals(1, propertyProvider.populateResources(Collections.singleton(resource), request, null).size());
-    
+
     List<String> metricsRegexes = new ArrayList<String>();
-    
+
     metricsRegexes.add("metrics/flume/$1/CHANNEL/$2/");
     metricsRegexes.add(FLUME_CHANNEL_CAPACITY_PROPERTY);
 
     String metricsList = getMetricsRegexes(metricsRegexes, gangliaPropertyIds, "FLUME_HANDLER");
-    
+
     URIBuilder expectedUri = new URIBuilder();
 
     expectedUri.setScheme((configuration.isGangliaSSL() ? "https" : "http"));
@@ -716,14 +717,14 @@ public class GangliaPropertyProviderTest {
     expectedUri.setParameter("s", "10");
     expectedUri.setParameter("e", "20");
     expectedUri.setParameter("r", "1");
-    
+
     URIBuilder actualUri = new URIBuilder(streamProvider.getLastSpec());
 
     Assert.assertEquals(expectedUri.getScheme(), actualUri.getScheme());
     Assert.assertEquals(expectedUri.getHost(), actualUri.getHost());
     Assert.assertEquals(expectedUri.getPath(), actualUri.getPath());
-    
-    Assert.assertTrue(isUrlParamsEquals(actualUri, expectedUri));    
+
+    Assert.assertTrue(isUrlParamsEquals(actualUri, expectedUri));
 
     Assert.assertEquals(11, PropertyHelper.getProperties(resource).size());
     Assert.assertNotNull(resource.getPropertyValue(FLUME_CHANNEL_CAPACITY_PROPERTY));
@@ -756,14 +757,14 @@ public class GangliaPropertyProviderTest {
     Request  request = PropertyHelper.getReadRequest(Collections.singleton(FLUME_CATEGORY4), temporalInfoMap);
 
     Assert.assertEquals(1, propertyProvider.populateResources(Collections.singleton(resource), request, null).size());
-    
+
     List<String> metricsRegexes = new ArrayList<String>();
-    
+
     metricsRegexes.add("metrics/flume/$1/CHANNEL/$2");
     metricsRegexes.add(FLUME_CHANNEL_CAPACITY_PROPERTY);
 
     String metricsList = getMetricsRegexes(metricsRegexes, gangliaPropertyIds, "FLUME_HANDLER");
-    
+
     URIBuilder expectedUri = new URIBuilder();
 
     expectedUri.setScheme((configuration.isGangliaSSL() ? "https" : "http"));
@@ -775,57 +776,59 @@ public class GangliaPropertyProviderTest {
     expectedUri.setParameter("s", "10");
     expectedUri.setParameter("e", "20");
     expectedUri.setParameter("r", "1");
-    
+
     URIBuilder actualUri = new URIBuilder(streamProvider.getLastSpec());
 
     Assert.assertEquals(expectedUri.getScheme(), actualUri.getScheme());
     Assert.assertEquals(expectedUri.getHost(), actualUri.getHost());
     Assert.assertEquals(expectedUri.getPath(), actualUri.getPath());
-    
-    Assert.assertTrue(isUrlParamsEquals(actualUri, expectedUri));    
-    
+
+    Assert.assertTrue(isUrlParamsEquals(actualUri, expectedUri));
+
     Assert.assertEquals(11, PropertyHelper.getProperties(resource).size());
     Assert.assertNotNull(resource.getPropertyValue(FLUME_CHANNEL_CAPACITY_PROPERTY));
   }
-  
 
 
 
-  
+
+
   private boolean isUrlParamsEquals(URIBuilder actualUri, URIBuilder expectedUri) {
     for (final NameValuePair expectedParam : expectedUri.getQueryParams()) {
       NameValuePair actualParam = (NameValuePair) CollectionUtils.find(actualUri.getQueryParams(), new Predicate() {
-        
+
         @Override
         public boolean evaluate(Object arg0) {
-          if (!(arg0 instanceof NameValuePair))
+          if (!(arg0 instanceof NameValuePair)) {
             return false;
-          
+          }
+
           NameValuePair otherObj = (NameValuePair) arg0;
           return otherObj.getName().equals(expectedParam.getName());
         }
       });
-      
+
 
       List<String> actualParamList = new ArrayList<String>(Arrays.asList(actualParam.getValue().split(",")));
       List<String> expectedParamList = new ArrayList<String>(Arrays.asList(expectedParam.getValue().split(",")));
-      
+
       Collections.sort(actualParamList);
       Collections.sort(expectedParamList);
-      
-      if (!actualParamList.equals(expectedParamList))
+
+      if (!actualParamList.equals(expectedParamList)) {
         return false;
+      }
     }
-    
+
     return true;
   }
-  
+
   private String getMetricsRegexes(List<String> metricsRegexes,
       Map<String, Map<String, PropertyInfo>> gangliaPropertyIds,
       String componentName) {
-    
+
     StringBuilder metricsBuilder = new StringBuilder();
-    
+
     for (Map.Entry<String, PropertyInfo> entry : gangliaPropertyIds.get(componentName).entrySet())
     {
       for (String metricRegex: metricsRegexes)
@@ -842,7 +845,7 @@ public class GangliaPropertyProviderTest {
 
     private boolean isHostLive;
     private boolean isComponentLive;
-    
+
     public TestGangliaHostProvider() {
       this(true, true);
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/java/org/apache/ambari/server/controller/ganglia/GangliaReportPropertyProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/ganglia/GangliaReportPropertyProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/ganglia/GangliaReportPropertyProviderTest.java
index 1ae17b3..b8609c2 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/ganglia/GangliaReportPropertyProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/ganglia/GangliaReportPropertyProviderTest.java
@@ -17,6 +17,12 @@
  */
 package org.apache.ambari.server.controller.ganglia;
 
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
 import org.apache.ambari.server.configuration.ComponentSSLConfiguration;
 import org.apache.ambari.server.configuration.ComponentSSLConfigurationTest;
 import org.apache.ambari.server.controller.internal.ResourceImpl;
@@ -31,12 +37,6 @@ import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-
 /**
  * Test the Ganglia report property provider.
  */
@@ -50,14 +50,14 @@ public class GangliaReportPropertyProviderTest {
 
   @Parameterized.Parameters
   public static Collection<Object[]> configs() {
-    ComponentSSLConfiguration configuration1 =
-        ComponentSSLConfigurationTest.getConfiguration("tspath", "tspass", "tstype", false, false);
+    ComponentSSLConfiguration configuration1 = ComponentSSLConfigurationTest.getConfiguration(
+        "tspath", "tspass", "tstype", false);
 
-    ComponentSSLConfiguration configuration2 =
-        ComponentSSLConfigurationTest.getConfiguration("tspath", "tspass", "tstype", true, false);
+    ComponentSSLConfiguration configuration2 = ComponentSSLConfigurationTest.getConfiguration(
+        "tspath", "tspass", "tstype", true);
 
-    ComponentSSLConfiguration configuration3 =
-        ComponentSSLConfigurationTest.getConfiguration("tspath", "tspass", "tstype", false, true);
+    ComponentSSLConfiguration configuration3 = ComponentSSLConfigurationTest.getConfiguration(
+        "tspath", "tspass", "tstype", false);
 
     return Arrays.asList(new Object[][]{
         {configuration1},

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerComponentProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerComponentProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerComponentProviderTest.java
index 9053167..07ff7fe 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerComponentProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerComponentProviderTest.java
@@ -18,16 +18,17 @@
 
 package org.apache.ambari.server.controller.gsinstaller;
 
+import java.util.HashMap;
+import java.util.Set;
+
 import junit.framework.Assert;
+
 import org.apache.ambari.server.controller.spi.Predicate;
 import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.utilities.PredicateBuilder;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
 import org.junit.Test;
 
-import java.util.HashMap;
-import java.util.Set;
-
 /**
  * Tests for GSInstallerComponentProvider.
  */
@@ -38,7 +39,7 @@ public class GSInstallerComponentProviderTest {
     ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
     GSInstallerResourceProvider provider = new GSInstallerComponentProvider(clusterDefinition);
     Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest(), null);
-    Assert.assertEquals(25, resources.size());
+    Assert.assertEquals(24, resources.size());
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerHostComponentProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerHostComponentProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerHostComponentProviderTest.java
index a4f04e9..c9878f5 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerHostComponentProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerHostComponentProviderTest.java
@@ -18,16 +18,17 @@
 
 package org.apache.ambari.server.controller.gsinstaller;
 
+import java.util.HashMap;
+import java.util.Set;
+
 import junit.framework.Assert;
+
 import org.apache.ambari.server.controller.spi.Predicate;
 import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.utilities.PredicateBuilder;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
 import org.junit.Test;
 
-import java.util.HashMap;
-import java.util.Set;
-
 /**
  *
  */
@@ -38,7 +39,7 @@ public class GSInstallerHostComponentProviderTest {
     ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
     GSInstallerResourceProvider provider = new GSInstallerHostComponentProvider(clusterDefinition);
     Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest(), null);
-    Assert.assertEquals(33, resources.size());
+    Assert.assertEquals(32, resources.size());
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerServiceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerServiceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerServiceProviderTest.java
index e871eba..3fd4bd7 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerServiceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerServiceProviderTest.java
@@ -18,16 +18,17 @@
 
 package org.apache.ambari.server.controller.gsinstaller;
 
+import java.util.HashMap;
+import java.util.Set;
+
 import junit.framework.Assert;
+
 import org.apache.ambari.server.controller.spi.Predicate;
 import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.utilities.PredicateBuilder;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
 import org.junit.Test;
 
-import java.util.HashMap;
-import java.util.Set;
-
 /**
  *
  */
@@ -38,24 +39,38 @@ public class GSInstallerServiceProviderTest {
     ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
     GSInstallerResourceProvider provider = new GSInstallerServiceProvider(clusterDefinition);
     Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest(), null);
-    Assert.assertEquals(12, resources.size());
+    Assert.assertEquals(11, resources.size());
   }
 
   @Test
   public void testGetResourcesWithPredicate() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerServiceProvider(clusterDefinition);
-    Predicate predicate = new PredicateBuilder().property(GSInstallerServiceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID).equals("MAPREDUCE").toPredicate();
-    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
+    ClusterDefinition clusterDefinition = new ClusterDefinition(
+        new TestGSInstallerStateProvider());
+    GSInstallerResourceProvider provider = new GSInstallerServiceProvider(
+        clusterDefinition);
+    Predicate predicate = new PredicateBuilder().property(
+        GSInstallerServiceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID).equals(
+        "MAPREDUCE").toPredicate();
+    Set<Resource> resources = provider.getResources(
+        PropertyHelper.getReadRequest(), predicate);
     Assert.assertEquals(1, resources.size());
 
-    predicate = new PredicateBuilder().property(GSInstallerServiceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID).equals("GANGLIA").or().
-        property(GSInstallerServiceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID).equals("NAGIOS").toPredicate();
-    resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
+    predicate = new PredicateBuilder().property(
+        GSInstallerServiceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID).equals(
+        "HDFS").or().property(
+        GSInstallerServiceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID).equals(
+        "GANGLIA").toPredicate();
+
+    resources = provider.getResources(PropertyHelper.getReadRequest(),
+        predicate);
+
     Assert.assertEquals(2, resources.size());
 
-    predicate = new PredicateBuilder().property(GSInstallerServiceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID).equals("NO SERVICE").toPredicate();
-    resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
+    predicate = new PredicateBuilder().property(
+        GSInstallerServiceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID).equals(
+        "NO SERVICE").toPredicate();
+    resources = provider.getResources(PropertyHelper.getReadRequest(),
+        predicate);
     Assert.assertTrue(resources.isEmpty());
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HttpPropertyProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HttpPropertyProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HttpPropertyProviderTest.java
index 2cc788a..154d125 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HttpPropertyProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HttpPropertyProviderTest.java
@@ -53,27 +53,25 @@ public class HttpPropertyProviderTest {
   private static final String PROPERTY_ID_CLUSTER_NAME = PropertyHelper.getPropertyId("HostRoles", "cluster_name");
   private static final String PROPERTY_ID_HOST_NAME = PropertyHelper.getPropertyId("HostRoles", "host_name");
   private static final String PROPERTY_ID_COMPONENT_NAME = PropertyHelper.getPropertyId("HostRoles", "component_name");
-  
-  private static final String PROPERTY_ID_NAGIOS_ALERTS = PropertyHelper.getPropertyId("HostRoles", "nagios_alerts");
+
+  private static final String PROPERTY_ID_STALE_CONFIGS = PropertyHelper.getPropertyId(
+      "HostRoles", "stale_configs");
 
   private ComponentSSLConfiguration configuration;
 
   @Parameterized.Parameters
   public static Collection<Object[]> configs() {
-    ComponentSSLConfiguration configuration1 =
-        ComponentSSLConfigurationTest.getConfiguration("tspath", "tspass", "tstype", false, false);
+    ComponentSSLConfiguration configuration1 = ComponentSSLConfigurationTest.getConfiguration(
+        "tspath", "tspass", "tstype", false);
 
-    ComponentSSLConfiguration configuration2 =
-        ComponentSSLConfigurationTest.getConfiguration("tspath", "tspass", "tstype", true, false);
+    ComponentSSLConfiguration configuration2 = ComponentSSLConfigurationTest.getConfiguration(
+        "tspath", "tspass", "tstype", true);
 
-    ComponentSSLConfiguration configuration3 =
-        ComponentSSLConfigurationTest.getConfiguration("tspath", "tspass", "tstype", false, true);
+    ComponentSSLConfiguration configuration3 = ComponentSSLConfigurationTest.getConfiguration(
+        "tspath", "tspass", "tstype", false);
 
-    return Arrays.asList(new Object[][]{
-        {configuration1},
-        {configuration2},
-        {configuration3}
-    });
+    return Arrays.asList(new Object[][] { { configuration1 },
+        { configuration2 }, { configuration3 } });
   }
 
 
@@ -127,15 +125,13 @@ public class HttpPropertyProviderTest {
 
   @Test
   public void testReadGangliaServer() throws Exception {
-    
-    Resource resource = doPopulate("GANGLIA_SERVER", Collections.<String>emptySet(), new TestStreamProvider(false));
+    Resource resource = doPopulate("GANGLIA_SERVER",
+        Collections.<String> emptySet(), new TestStreamProvider(false));
 
     // !!! GANGLIA_SERVER has no current http lookup
-    Assert.assertNull("Expected null, was: " +
-      resource.getPropertyValue(PROPERTY_ID_NAGIOS_ALERTS),
-      resource.getPropertyValue(PROPERTY_ID_NAGIOS_ALERTS));
+    Assert.assertNull(resource.getPropertyValue(PROPERTY_ID_STALE_CONFIGS));
   }
-  
+
   private Resource doPopulate(String componentName,
       Set<String> requestProperties, StreamProvider streamProvider) throws Exception {
     Injector injector = createNiceMock(Injector.class);
@@ -151,9 +147,9 @@ public class HttpPropertyProviderTest {
     resource.setProperty(PROPERTY_ID_HOST_NAME, "ec2-54-234-33-50.compute-1.amazonaws.com");
     resource.setProperty(PROPERTY_ID_CLUSTER_NAME, "testCluster");
     resource.setProperty(PROPERTY_ID_COMPONENT_NAME, componentName);
-    
+
     Request request = PropertyHelper.getReadRequest(requestProperties);
-    
+
     propProvider.populateResources(Collections.singleton(resource), request, null);
 
     return resource;
@@ -167,18 +163,19 @@ public class HttpPropertyProviderTest {
     private TestStreamProvider(boolean throwErr) {
       throwError = throwErr;
     }
-    
+
     @Override
     public InputStream readFrom(String spec) throws IOException {
-      if (!isLastSpecUpdated)
+      if (!isLastSpecUpdated) {
         lastSpec = spec;
-      
+      }
+
       isLastSpecUpdated = false;
 
       if (throwError) {
         throw new IOException("Fake error");
       }
-      
+
       String responseStr = "{\"alerts\": [{\"Alert Body\": \"Body\"}],\"clusterInfo\": {\"haState\": \"ACTIVE\"},"
           + " \"hostcounts\": {\"up_hosts\":\"1\", \"down_hosts\":\"0\"}}";
         return new ByteArrayInputStream(responseStr.getBytes("UTF-8"));
@@ -195,5 +192,5 @@ public class HttpPropertyProviderTest {
       return readFrom(spec);
     }
   }
-  
+
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestImplTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestImplTest.java
index 91adc9b..d69d48e 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestImplTest.java
@@ -18,15 +18,16 @@
 
 package org.apache.ambari.server.controller.internal;
 
+import java.util.HashSet;
+import java.util.Set;
+
 import junit.framework.Assert;
+
+import org.apache.ambari.server.controller.spi.Request;
 import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.apache.ambari.server.controller.spi.Request;
 import org.junit.Test;
 
-import java.util.HashSet;
-import java.util.Set;
-
 /**
  *
  */
@@ -55,7 +56,6 @@ public class RequestImplTest {
 
     //HostComponent resource properties
     Assert.assertFalse(validPropertyIds.contains("HostRoles/unsupported_property_id"));
-    Assert.assertTrue(validPropertyIds.contains("HostRoles/nagios_alerts"));
     Assert.assertTrue(validPropertyIds.contains("params/run_smoke_test"));
     Assert.assertTrue(validPropertyIds.contains("HostRoles/actual_configs"));
     Assert.assertTrue(validPropertyIds.contains("HostRoles/desired_stack_id"));

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/java/org/apache/ambari/server/controller/nagios/NagiosPropertyProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/nagios/NagiosPropertyProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/nagios/NagiosPropertyProviderTest.java
deleted file mode 100644
index 8037451..0000000
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/nagios/NagiosPropertyProviderTest.java
+++ /dev/null
@@ -1,584 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.nagios;
-
-import static org.easymock.EasyMock.createMock;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.reset;
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Set;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.controller.ganglia.TestStreamProvider;
-import org.apache.ambari.server.controller.internal.ResourceImpl;
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.spi.TemporalInfo;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.apache.ambari.server.state.Alert;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Service;
-import org.apache.ambari.server.state.ServiceComponent;
-import org.apache.ambari.server.state.ServiceComponentHost;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import com.google.inject.Binder;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.google.inject.Module;
-import java.util.Collection;
-import java.util.LinkedList;
-
-/**
- * Tests the nagios property provider
- */
-public class NagiosPropertyProviderTest {
-
-  private static final String HOST = "c6401.ambari.apache.org";
-
-  private GuiceModule module = null;
-  private Clusters clusters = null;
-  private Injector injector = null;
-
-  @Before
-  public void setup() throws Exception {
-    module = new GuiceModule();
-    injector = Guice.createInjector(module);
-    NagiosPropertyProvider.init(injector);
-    
-    clusters = injector.getInstance(Clusters.class);
-    Cluster cluster = createMock(Cluster.class);
-    
-    expect(cluster.getAlerts()).andReturn(Collections.<Alert>emptySet()).anyTimes();
-    
-    expect(clusters.getCluster("c1")).andReturn(cluster).anyTimes();
-    
-
-    Service nagiosService = createMock(Service.class);
-    expect(cluster.getService("NAGIOS")).andReturn(nagiosService).anyTimes();
-    
-    ServiceComponent nagiosServiceComponent = createMock(ServiceComponent.class);
-    expect(nagiosService.getServiceComponent("NAGIOS_SERVER")).andReturn(
-        nagiosServiceComponent).anyTimes();
-    
-    ServiceComponentHost nagiosScHost = createMock(ServiceComponentHost.class);
-    Map<String, ServiceComponentHost> map1 = new HashMap<String, ServiceComponentHost>();
-    map1.put(HOST, nagiosScHost);
-    expect(nagiosServiceComponent.getServiceComponentHosts()).andReturn(
-        map1).anyTimes();
-    
-    replay(clusters, cluster, nagiosService, nagiosServiceComponent);
-  }
-  
-  @Test
-  public void testNoNagiosService() throws Exception {
-    Cluster cluster = clusters.getCluster("c1");
-    reset(cluster); // simulate an error that NAGIOS not with the cluster
-    expect(cluster.getService("NAGIOS")).andThrow(new AmbariException("No Service"));
-    replay(cluster);
-    
-    TestStreamProvider streamProvider = new TestStreamProvider("nagios_alerts.txt");
-
-    NagiosPropertyProvider npp = new NagiosPropertyProvider(Resource.Type.Service,
-        streamProvider,
-        "ServiceInfo/cluster_name",
-        "ServiceInfo/service_name");
-    npp.forceReset();
-    
-    Resource resource = new ResourceImpl(Resource.Type.Service);
-    resource.setProperty("ServiceInfo/cluster_name", "c1");
-    resource.setProperty("ServiceInfo/service_name", "HBASE");
-    
-    // request with an empty set should get all supported properties
-    Request request = PropertyHelper.getReadRequest(Collections.<String>emptySet(), new HashMap<String, TemporalInfo>());
-
-    Set<Resource> set = npp.populateResources(Collections.singleton(resource), request, null);
-    Assert.assertEquals(1, set.size());
-    
-    Resource res = set.iterator().next();
-    
-    Map<String, Map<String, Object>> values = res.getPropertiesMap();
-    
-    Assert.assertFalse("Expected no alerts", values.containsKey("legacy_alerts"));
-  }
-
-
-  @Test
-  public void testClusterDoesNotExistNPE() throws Exception {
-    TestStreamProvider streamProvider = new TestStreamProvider("nagios_alerts.txt");
-
-    NagiosPropertyProvider npp = new NagiosPropertyProvider(Resource.Type.Service,
-        streamProvider,
-        "ServiceInfo/cluster_name",
-        "ServiceInfo/service_name");
-    npp.forceReset();
-
-    Resource resource = new ResourceImpl(Resource.Type.Service);
-    resource.setProperty("ServiceInfo/cluster_name", null);
-    resource.setProperty("ServiceInfo/service_name", "HBASE");
-
-    // request with an empty set should get all supported properties
-    Request request = PropertyHelper.getReadRequest(Collections.<String>emptySet(),
-            new HashMap<String, TemporalInfo>());
-
-    Set<Resource> set = npp.populateResources(Collections.singleton(resource), request, null);
-    Assert.assertEquals(1, set.size());
-
-  }
-
-  @Test
-  public void testNagiosClusterAlerts() throws Exception {
-
-    TestStreamProvider streamProvider = new TestStreamProvider("nagios_alerts.txt");
-
-    NagiosPropertyProvider npp = new NagiosPropertyProvider(Resource.Type.Cluster,
-      streamProvider,
-      "Clusters/cluster_name",
-      "Clusters/version");
-    npp.forceReset();
-
-    Resource resource = new ResourceImpl(Resource.Type.Cluster);
-    resource.setProperty("Clusters/cluster_name", "c1");
-    resource.setProperty("Clusters/version", "HDP-2.0.6");
-
-    // request with an empty set should get all supported properties
-    Request request = PropertyHelper.getReadRequest(Collections.<String>emptySet(), new HashMap<String, TemporalInfo>());
-
-    Set<Resource> set = npp.populateResources(Collections.singleton(resource), request, null);
-    Assert.assertEquals(1, set.size());
-
-    Resource res = set.iterator().next();
-
-    Map<String, Map<String, Object>> values = res.getPropertiesMap();
-
-    Assert.assertTrue(values.containsKey("legacy_alerts/summary"));
-
-    Map<String, Object> summary = values.get("legacy_alerts/summary");
-    Assert.assertEquals(4L, summary.size());
-    Assert.assertTrue(summary.containsKey("OK"));
-    Assert.assertTrue(summary.containsKey("WARNING"));
-    Assert.assertTrue(summary.containsKey("CRITICAL"));
-    Assert.assertTrue(summary.containsKey("PASSIVE"));
-    Assert.assertFalse(summary.containsKey("detail"));
-
-    //Totally 4 hosts, no hosts with no alerts
-    Assert.assertTrue(summary.get("OK").equals(Integer.valueOf(0)));
-    Assert.assertTrue(summary.get("WARNING").equals(Integer.valueOf(1)));
-    Assert.assertTrue(summary.get("CRITICAL").equals(Integer.valueOf(2)));
-    Assert.assertTrue(summary.get("PASSIVE").equals(Integer.valueOf(1)));
-
-  }
-
-  @Test
-  public void testNoNagiosServerComponent() throws Exception {
-
-    Cluster cluster = clusters.getCluster("c1");
-    reset(cluster);
-
-    Service nagiosService = createMock(Service.class);
-    expect(cluster.getService("NAGIOS")).andReturn(nagiosService);
-
-    ServiceComponent nagiosServiceComponent = createMock(ServiceComponent.class);
-    expect(nagiosService.getServiceComponent("NAGIOS_SERVER")).andThrow(new AmbariException("No Component"));
-
-    replay(cluster, nagiosService);
-
-    TestStreamProvider streamProvider = new TestStreamProvider("nagios_alerts.txt");
-
-    NagiosPropertyProvider npp = new NagiosPropertyProvider(Resource.Type.Service,
-        streamProvider,
-        "ServiceInfo/cluster_name",
-        "ServiceInfo/service_name");
-    npp.forceReset();
-    
-    Resource resource = new ResourceImpl(Resource.Type.Service);
-    resource.setProperty("ServiceInfo/cluster_name", "c1");
-    resource.setProperty("ServiceInfo/service_name", "HBASE");
-    
-    // request with an empty set should get all supported properties
-    Request request = PropertyHelper.getReadRequest(Collections.<String>emptySet(), new HashMap<String, TemporalInfo>());
-
-    Set<Resource> set = npp.populateResources(Collections.singleton(resource), request, null);
-    Assert.assertEquals(1, set.size());
-    
-    Resource res = set.iterator().next();
-    
-    Map<String, Map<String, Object>> values = res.getPropertiesMap();
-    
-    Assert.assertFalse("Expected no alerts", values.containsKey("legacy_alerts"));
-  }
-  
-  @Test
-  public void testNagiosServiceAlerts() throws Exception {
-    module.properties.remove(Configuration.NAGIOS_IGNORE_FOR_SERVICES_KEY); // make sure NAGIOS_IGNORE_FOR_SERVICES_KEY is not set, which could be set by testNagiosServiceAlertsAddIgnore
-
-    TestStreamProvider streamProvider = new TestStreamProvider("nagios_alerts.txt");
-
-    NagiosPropertyProvider npp = new NagiosPropertyProvider(Resource.Type.Service,
-        streamProvider,
-        "ServiceInfo/cluster_name",
-        "ServiceInfo/service_name");
-    npp.forceReset();
-    NagiosPropertyProvider.init(injector);
-    
-    Resource resource = new ResourceImpl(Resource.Type.Service);
-    resource.setProperty("ServiceInfo/cluster_name", "c1");
-    resource.setProperty("ServiceInfo/service_name", "HBASE");
-    
-    // request with an empty set should get all supported properties
-    Request request = PropertyHelper.getReadRequest(Collections.<String>emptySet(), new HashMap<String, TemporalInfo>());
-
-    Set<Resource> set = npp.populateResources(Collections.singleton(resource), request, null);
-    Assert.assertEquals(1, set.size());
-    
-    Resource res = set.iterator().next();
-    
-    Map<String, Map<String, Object>> values = res.getPropertiesMap();
-    
-    Assert.assertTrue(values.containsKey("legacy_alerts"));
-    Assert.assertTrue(values.containsKey("legacy_alerts/summary"));
-    Assert.assertTrue(values.get("legacy_alerts").containsKey("detail"));
-    Assert.assertTrue(List.class.isInstance(values.get("legacy_alerts").get("detail")));
-    
-    List<?> list = (List<?>) values.get("legacy_alerts").get("detail");
-    Assert.assertEquals(Integer.valueOf(3), Integer.valueOf(list.size()));
-    for (Object o : list) {
-      Assert.assertTrue(Map.class.isInstance(o));
-      Map<?, ?> map = (Map<?, ?>) o;
-      Assert.assertTrue(map.containsKey("service_name"));
-      String serviceName = map.get("service_name").toString();
-      Assert.assertTrue("expected HBASE", serviceName.equals("HBASE"));
-    }
-    
-    Map<String, Object> summary = values.get("legacy_alerts/summary");
-    Assert.assertTrue(summary.containsKey("OK"));
-    Assert.assertTrue(summary.containsKey("WARNING"));
-    Assert.assertTrue(summary.containsKey("CRITICAL"));
-    Assert.assertTrue(summary.containsKey("PASSIVE"));
-    
-    Assert.assertTrue(summary.get("OK").equals(Integer.valueOf(1)));
-    Assert.assertTrue(summary.get("WARNING").equals(Integer.valueOf(0)));
-    Assert.assertTrue(summary.get("CRITICAL").equals(Integer.valueOf(2)));
-  }  
-  
-
-  @Test
-  public void testNagiosHostAlerts() throws Exception {    
-    TestStreamProvider streamProvider = new TestStreamProvider("nagios_alerts.txt");
-
-    NagiosPropertyProvider npp = new NagiosPropertyProvider(Resource.Type.Host,
-        streamProvider,
-        "Hosts/cluster_name",
-        "Hosts/host_name");
-    npp.forceReset();
-    
-    Resource resource = new ResourceImpl(Resource.Type.Service);
-    resource.setProperty("Hosts/cluster_name", "c1");
-    resource.setProperty("Hosts/host_name", "c6403.ambari.apache.org");
-    
-    // request with an empty set should get all supported properties
-    Request request = PropertyHelper.getReadRequest(Collections.<String>emptySet(), new HashMap<String, TemporalInfo>());
-
-    Set<Resource> set = npp.populateResources(Collections.singleton(resource), request, null);
-    Assert.assertEquals(1, set.size());
-    
-    Resource res = set.iterator().next();
-    
-    Map<String, Map<String, Object>> values = res.getPropertiesMap();
-    
-    Assert.assertTrue(values.containsKey("legacy_alerts"));
-    Assert.assertTrue(values.containsKey("legacy_alerts/summary"));
-    Assert.assertTrue(values.get("legacy_alerts").containsKey("detail"));
-    Assert.assertTrue(List.class.isInstance(values.get("legacy_alerts").get("detail")));
-    
-    List<?> list = (List<?>) values.get("legacy_alerts").get("detail");
-    Assert.assertTrue(7 == list.size());
-    for (Object o : list) {
-      Assert.assertTrue(Map.class.isInstance(o));
-      Map<?, ?> map = (Map<?, ?>) o;
-      Assert.assertTrue(map.containsKey("host_name"));
-      String host = map.get("host_name").toString();
-      Assert.assertTrue("expected c6403.ambari.apache.org", host.equals("c6403.ambari.apache.org"));
-    }
-    
-    Map<String, Object> summary = values.get("legacy_alerts/summary");
-    Assert.assertTrue(summary.containsKey("OK"));
-    Assert.assertTrue(summary.containsKey("WARNING"));
-    Assert.assertTrue(summary.containsKey("CRITICAL"));
-    Assert.assertTrue(summary.containsKey("PASSIVE"));
-    
-    Assert.assertTrue(summary.get("OK").equals(Integer.valueOf(6)));
-    Assert.assertTrue(summary.get("WARNING").equals(Integer.valueOf(0)));
-    Assert.assertTrue(summary.get("CRITICAL").equals(Integer.valueOf(1)));
-  }
-  
-  @Test
-  public void testNagiosHostAlertsWithIgnore() throws Exception {
-    
-    TestStreamProvider streamProvider = new TestStreamProvider("nagios_alerts.txt");
-
-    NagiosPropertyProvider npp = new NagiosPropertyProvider(Resource.Type.Host,
-        streamProvider,
-        "Hosts/cluster_name",
-        "Hosts/host_name");
-    npp.forceReset();
-    
-    Resource resource = new ResourceImpl(Resource.Type.Service);
-    resource.setProperty("Hosts/cluster_name", "c1");
-    resource.setProperty("Hosts/host_name", "c6401.ambari.apache.org");
-    
-    // request with an empty set should get all supported properties
-    Request request = PropertyHelper.getReadRequest(Collections.<String>emptySet(), new HashMap<String, TemporalInfo>());
-
-    Set<Resource> set = npp.populateResources(Collections.singleton(resource), request, null);
-    Assert.assertEquals(1, set.size());
-    
-    Resource res = set.iterator().next();
-    
-    Map<String, Map<String, Object>> values = res.getPropertiesMap();
-    
-    Assert.assertTrue(values.containsKey("legacy_alerts"));
-    Assert.assertTrue(values.containsKey("legacy_alerts/summary"));
-    Assert.assertTrue(values.get("legacy_alerts").containsKey("detail"));
-    Assert.assertTrue(List.class.isInstance(values.get("legacy_alerts").get("detail")));
-    
-    List<?> list = (List<?>) values.get("legacy_alerts").get("detail");
-    Assert.assertEquals(Integer.valueOf(16), Integer.valueOf(list.size()));
-    for (Object o : list) {
-      Assert.assertTrue(Map.class.isInstance(o));
-      Map<?, ?> map = (Map<?, ?>) o;
-      Assert.assertTrue(map.containsKey("host_name"));
-      String host = map.get("host_name").toString();
-      Assert.assertEquals("c6401.ambari.apache.org", host);
-    }
-    
-    Map<String, Object> summary = values.get("legacy_alerts/summary");
-    Assert.assertTrue(summary.containsKey("OK"));
-    Assert.assertTrue(summary.containsKey("WARNING"));
-    Assert.assertTrue(summary.containsKey("CRITICAL"));
-    Assert.assertTrue(summary.containsKey("PASSIVE"));
-    
-    Assert.assertEquals(summary.get("OK"), Integer.valueOf(14));
-    Assert.assertEquals(summary.get("WARNING"), Integer.valueOf(0));
-    Assert.assertEquals(summary.get("CRITICAL"), Integer.valueOf(1));
-    Assert.assertEquals(Integer.valueOf(1), summary.get("PASSIVE"));
-  }  
-  
-  @Test
-  public void testNagiosServiceAlertsAddIgnore() throws Exception {
-    module.properties.setProperty(Configuration.NAGIOS_IGNORE_FOR_SERVICES_KEY,
-        "HBase Master process on c6401.ambari.apache.org");
-    
-    TestStreamProvider streamProvider = new TestStreamProvider("nagios_alerts.txt");
-
-    NagiosPropertyProvider npp = new NagiosPropertyProvider(Resource.Type.Service,
-        streamProvider,
-        "ServiceInfo/cluster_name",
-        "ServiceInfo/service_name");
-    npp.forceReset();
-    NagiosPropertyProvider.init(injector);
-    
-    Resource resource = new ResourceImpl(Resource.Type.Service);
-    resource.setProperty("ServiceInfo/cluster_name", "c1");
-    resource.setProperty("ServiceInfo/service_name", "HBASE");
-    
-    // request with an empty set should get all supported properties
-    Request request = PropertyHelper.getReadRequest(Collections.<String>emptySet(), new HashMap<String, TemporalInfo>());
-
-    Set<Resource> set = npp.populateResources(Collections.singleton(resource), request, null);
-    Assert.assertEquals(1, set.size());
-    
-    Resource res = set.iterator().next();
-    
-    Map<String, Map<String, Object>> values = res.getPropertiesMap();
-    
-    Assert.assertTrue(values.containsKey("legacy_alerts"));
-    Assert.assertTrue(values.containsKey("legacy_alerts/summary"));
-    Assert.assertTrue(values.get("legacy_alerts").containsKey("detail"));
-    Assert.assertTrue(List.class.isInstance(values.get("legacy_alerts").get("detail")));
-    
-    List<?> list = (List<?>) values.get("legacy_alerts").get("detail");
-    // removed an additional one
-    Assert.assertEquals(Integer.valueOf(2), Integer.valueOf(list.size()));
-    for (Object o : list) {
-      Assert.assertTrue(Map.class.isInstance(o));
-      Map<?, ?> map = (Map<?, ?>) o;
-      Assert.assertTrue(map.containsKey("service_name"));
-      String serviceName = map.get("service_name").toString();
-      Assert.assertTrue("expected HBASE", serviceName.equals("HBASE"));
-    }
-    
-    Map<String, Object> summary = values.get("legacy_alerts/summary");
-    Assert.assertTrue(summary.containsKey("OK"));
-    Assert.assertTrue(summary.containsKey("WARNING"));
-    Assert.assertTrue(summary.containsKey("CRITICAL"));
-    Assert.assertTrue(summary.containsKey("PASSIVE"));
-    
-    Assert.assertTrue(summary.get("OK").equals(Integer.valueOf(1)));
-    Assert.assertTrue(summary.get("WARNING").equals(Integer.valueOf(0)));
-    Assert.assertTrue(summary.get("CRITICAL").equals(Integer.valueOf(1)));
-  }
-
-  @Test
-  public void testNagiosServiceAlertsWithPassive() throws Exception {
-    Injector inj = Guice.createInjector(new GuiceModule());
-    
-    Clusters clusters = inj.getInstance(Clusters.class);
-    Cluster cluster = createMock(Cluster.class);
-    expect(cluster.getAlerts()).andReturn(Collections.<Alert>emptySet()).anyTimes();
-    expect(clusters.getCluster("c1")).andReturn(cluster);
-
-    Service nagiosService = createMock(Service.class);
-    expect(cluster.getService("NAGIOS")).andReturn(nagiosService);
-    
-    ServiceComponent nagiosServiceComponent = createMock(ServiceComponent.class);
-    expect(nagiosService.getServiceComponent("NAGIOS_SERVER")).andReturn(nagiosServiceComponent);
-    
-    ServiceComponentHost nagiosScHost = createMock(ServiceComponentHost.class);
-    Map<String, ServiceComponentHost> map1 = new HashMap<String, ServiceComponentHost>();
-    map1.put(HOST, nagiosScHost);
-    expect(nagiosServiceComponent.getServiceComponentHosts()).andReturn(map1);
-    
-    replay(clusters, cluster, nagiosService, nagiosServiceComponent);
-
-    
-    TestStreamProvider streamProvider = new TestStreamProvider("nagios_alerts.txt");
-
-    NagiosPropertyProvider npp = new NagiosPropertyProvider(Resource.Type.Service,
-        streamProvider,
-        "ServiceInfo/cluster_name",
-        "ServiceInfo/service_name");
-    npp.forceReset();
-    NagiosPropertyProvider.init(inj);
-    
-    Resource resource = new ResourceImpl(Resource.Type.Service);
-    resource.setProperty("ServiceInfo/cluster_name", "c1");
-    resource.setProperty("ServiceInfo/service_name", "GANGLIA");
-    
-    // request with an empty set should get all supported properties
-    Request request = PropertyHelper.getReadRequest(Collections.<String>emptySet(), new HashMap<String, TemporalInfo>());
-
-    Set<Resource> set = npp.populateResources(Collections.singleton(resource), request, null);
-    Assert.assertEquals(1, set.size());
-    
-    Resource res = set.iterator().next();
-    
-    Map<String, Map<String, Object>> values = res.getPropertiesMap();
-    
-    Assert.assertTrue(values.containsKey("legacy_alerts"));
-    Assert.assertTrue(values.containsKey("legacy_alerts/summary"));
-    Assert.assertTrue(values.get("legacy_alerts").containsKey("detail"));
-    Assert.assertTrue(List.class.isInstance(values.get("legacy_alerts").get("detail")));
-    
-    List<?> list = (List<?>) values.get("legacy_alerts").get("detail");
-    // removed an additional one
-    Assert.assertEquals(Integer.valueOf(4), Integer.valueOf(list.size()));
-    for (Object o : list) {
-      Assert.assertTrue(Map.class.isInstance(o));
-      Map<?, ?> map = (Map<?, ?>) o;
-      Assert.assertTrue(map.containsKey("service_name"));
-      String serviceName = map.get("service_name").toString();
-      Assert.assertEquals(serviceName, "GANGLIA");
-    }
-    
-    Map<String, Object> summary = values.get("legacy_alerts/summary");
-    Assert.assertTrue(summary.containsKey("OK"));
-    Assert.assertTrue(summary.containsKey("WARNING"));
-    Assert.assertTrue(summary.containsKey("CRITICAL"));
-    Assert.assertTrue(summary.containsKey("PASSIVE"));
-    
-    Assert.assertEquals(Integer.valueOf(3), summary.get("OK"));
-    Assert.assertEquals(Integer.valueOf(0), summary.get("WARNING"));
-    Assert.assertEquals(Integer.valueOf(0), summary.get("CRITICAL"));
-    Assert.assertEquals(Integer.valueOf(1), summary.get("PASSIVE"));
-  }
-  
-  @Test
-  public void testNagiosHostAlertsSubstringPassiveMarker() throws Exception {
-    
-    TestStreamProvider streamProvider = new TestStreamProvider("nagios_alerts.txt");
-
-    NagiosPropertyProvider npp = new NagiosPropertyProvider(Resource.Type.Host,
-        streamProvider,
-        "Hosts/cluster_name",
-        "Hosts/host_name");
-    npp.forceReset();
-    
-    Resource resource = new ResourceImpl(Resource.Type.Host);
-    resource.setProperty("Hosts/cluster_name", "c1");
-    resource.setProperty("Hosts/host_name", "c6404.ambari.apache.org");
-    
-    // request with an empty set should get all supported properties
-    Request request = PropertyHelper.getReadRequest(Collections.<String>emptySet(), new HashMap<String, TemporalInfo>());
-
-    Set<Resource> set = npp.populateResources(Collections.singleton(resource), request, null);
-    Assert.assertEquals(1, set.size());
-    
-    Resource res = set.iterator().next();
-    
-    Map<String, Map<String, Object>> values = res.getPropertiesMap();
-    
-    Assert.assertTrue(values.containsKey("legacy_alerts"));
-    Assert.assertTrue(values.containsKey("legacy_alerts/summary"));
-    Assert.assertTrue(values.get("legacy_alerts").containsKey("detail"));
-    Assert.assertTrue(List.class.isInstance(values.get("legacy_alerts").get("detail")));
-    
-    List<?> list = (List<?>) values.get("legacy_alerts").get("detail");
-    Assert.assertEquals(Integer.valueOf(1), Integer.valueOf(list.size()));
-    for (Object o : list) {
-      Assert.assertTrue(Map.class.isInstance(o));
-      Map<?, ?> map = (Map<?, ?>) o;
-      Assert.assertTrue(map.containsKey("host_name"));
-      String host = map.get("host_name").toString();
-      Assert.assertEquals("c6404.ambari.apache.org", host);
-    }
-    
-    Map<String, Object> summary = values.get("legacy_alerts/summary");
-    Assert.assertTrue(summary.containsKey("OK"));
-    Assert.assertTrue(summary.containsKey("WARNING"));
-    Assert.assertTrue(summary.containsKey("CRITICAL"));
-    Assert.assertTrue(summary.containsKey("PASSIVE"));
-    
-    Assert.assertEquals(Integer.valueOf(0), summary.get("OK"));
-    Assert.assertEquals(Integer.valueOf(0), summary.get("WARNING"));
-    Assert.assertEquals(Integer.valueOf(0), summary.get("CRITICAL"));
-    Assert.assertEquals(Integer.valueOf(1), summary.get("PASSIVE"));
-  }   
-  
-  private static class GuiceModule implements Module {
-
-    private Properties properties = new Properties();
-    
-    @Override
-    public void configure(Binder binder) {
-     binder.bind(Clusters.class).toInstance(createMock(Clusters.class));
-     binder.bind(Configuration.class).toInstance(new Configuration(properties));
-    }
-  }
-
-}


[07/17] ambari git commit: AMBARI-8276 - Alerts: Remove Nagios Service From The Stack (jonathanhurley)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/nagios.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/nagios.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/nagios.cfg.j2
deleted file mode 100644
index bcff8ac..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/nagios.cfg.j2
+++ /dev/null
@@ -1,1365 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-##############################################################################
-#
-# NAGIOS.CFG - Sample Main Config File for Nagios 3.2.3
-#
-# Read the documentation for more information on this configuration
-# file.  I've provided some comments here, but things may not be so
-# clear without further explanation.
-#
-# Last Modified: 12-14-2008
-#
-##############################################################################
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-# LOG FILE
-# This is the main log file where service and host events are logged
-# for historical purposes.  This should be the first option specified 
-# in the config file!!!
-
-log_file=/var/log/nagios/nagios.log
-
-
-# OBJECT CONFIGURATION FILE(S)
-# These are the object configuration files in which you define hosts,
-# host groups, contacts, contact groups, services, etc.
-# You can split your object definitions across several config files
-# if you wish (as shown below), or keep them all in a single config file.
-
-{% for cfg_file in cfg_files %}
-cfg_file={{cfg_file}}
-{% endfor %}
-
-# Definitions for monitoring the local (Linux) host
-#cfg_file={{conf_dir}}/objects/localhost.cfg
-
-# Definitions for monitoring a Windows machine
-#cfg_file={{conf_dir}}/objects/windows.cfg
-
-# Definitions for monitoring a router/switch
-#cfg_file={{conf_dir}}/objects/switch.cfg
-
-# Definitions for monitoring a network printer
-#cfg_file={{conf_dir}}/objects/printer.cfg
-
-# Definitions for hadoop servers
-cfg_file={{nagios_host_cfg}}
-cfg_file={{nagios_hostgroup_cfg}}
-cfg_file={{nagios_servicegroup_cfg}}
-cfg_file={{nagios_service_cfg}}
-cfg_file={{nagios_command_cfg}}
-
-
-# You can also tell Nagios to process all config files (with a .cfg
-# extension) in a particular directory by using the cfg_dir
-# directive as shown below:
-
-#cfg_dir={{conf_dir}}/servers
-#cfg_dir={{conf_dir}}/printers
-#cfg_dir={{conf_dir}}/switches
-#cfg_dir={{conf_dir}}/routers
-
-
-
-
-# OBJECT CACHE FILE
-# This option determines where object definitions are cached when
-# Nagios starts/restarts.  The CGIs read object definitions from 
-# this cache file (rather than looking at the object config files
-# directly) in order to prevent inconsistencies that can occur
-# when the config files are modified after Nagios starts.
-
-object_cache_file=/var/nagios/objects.cache
-
-
-
-# PRE-CACHED OBJECT FILE
-# This options determines the location of the precached object file.
-# If you run Nagios with the -p command line option, it will preprocess
-# your object configuration file(s) and write the cached config to this
-# file.  You can then start Nagios with the -u option to have it read
-# object definitions from this precached file, rather than the standard
-# object configuration files (see the cfg_file and cfg_dir options above).
-# Using a precached object file can speed up the time needed to (re)start 
-# the Nagios process if you've got a large and/or complex configuration.
-# Read the documentation section on optimizing Nagios to find our more
-# about how this feature works.
-
-precached_object_file=/var/nagios/objects.precache
-
-
-
-# RESOURCE FILE
-# This is an optional resource file that contains $USERx$ macro
-# definitions. Multiple resource files can be specified by using
-# multiple resource_file definitions.  The CGIs will not attempt to
-# read the contents of resource files, so information that is
-# considered to be sensitive (usernames, passwords, etc) can be
-# defined as macros in this file and restrictive permissions (600)
-# can be placed on this file.
-
-resource_file={{nagios_resource_cfg}}
-
-
-
-# STATUS FILE
-# This is where the current status of all monitored services and
-# hosts is stored.  Its contents are read and processed by the CGIs.
-# The contents of the status file are deleted every time Nagios
-#  restarts.
-
-status_file=/var/nagios/status.dat
-
-
-
-# STATUS FILE UPDATE INTERVAL
-# This option determines the frequency (in seconds) that
-# Nagios will periodically dump program, host, and 
-# service status data.
-
-status_update_interval=10
-
-
-
-# NAGIOS USER
-# This determines the effective user that Nagios should run as.  
-# You can either supply a username or a UID.
-
-nagios_user={{nagios_user}}
-
-
-
-# NAGIOS GROUP
-# This determines the effective group that Nagios should run as.  
-# You can either supply a group name or a GID.
-
-nagios_group={{nagios_group}}
-
-
-
-# EXTERNAL COMMAND OPTION
-# This option allows you to specify whether or not Nagios should check
-# for external commands (in the command file defined below).  By default
-# Nagios will *not* check for external commands, just to be on the
-# cautious side.  If you want to be able to use the CGI command interface
-# you will have to enable this.
-# Values: 0 = disable commands, 1 = enable commands
-
-check_external_commands=1
-
-
-
-# EXTERNAL COMMAND CHECK INTERVAL
-# This is the interval at which Nagios should check for external commands.
-# This value works of the interval_length you specify later.  If you leave
-# that at its default value of 60 (seconds), a value of 1 here will cause
-# Nagios to check for external commands every minute.  If you specify a
-# number followed by an "s" (i.e. 15s), this will be interpreted to mean
-# actual seconds rather than a multiple of the interval_length variable.
-# Note: In addition to reading the external command file at regularly 
-# scheduled intervals, Nagios will also check for external commands after
-# event handlers are executed.
-# NOTE: Setting this value to -1 causes Nagios to check the external
-# command file as often as possible.
-
-#command_check_interval=15s
-command_check_interval=-1
-
-
-
-# EXTERNAL COMMAND FILE
-# This is the file that Nagios checks for external command requests.
-# It is also where the command CGI will write commands that are submitted
-# by users, so it must be writeable by the user that the web server
-# is running as (usually 'nobody').  Permissions should be set at the 
-# directory level instead of on the file, as the file is deleted every
-# time its contents are processed.
-
-command_file=/var/nagios/rw/nagios.cmd
-
-
-
-# EXTERNAL COMMAND BUFFER SLOTS
-# This settings is used to tweak the number of items or "slots" that
-# the Nagios daemon should allocate to the buffer that holds incoming 
-# external commands before they are processed.  As external commands 
-# are processed by the daemon, they are removed from the buffer.  
-
-external_command_buffer_slots=4096
-
-
-
-# LOCK FILE
-# This is the lockfile that Nagios will use to store its PID number
-# in when it is running in daemon mode.
-
-lock_file={{nagios_pid_file}}
-
-
-
-# TEMP FILE
-# This is a temporary file that is used as scratch space when Nagios
-# updates the status log, cleans the comment file, etc.  This file
-# is created, used, and deleted throughout the time that Nagios is
-# running.
-
-temp_file=/var/nagios/nagios.tmp
-
-
-
-# TEMP PATH
-# This is path where Nagios can create temp files for service and
-# host check results, etc.
-
-temp_path=/tmp
-
-
-
-# EVENT BROKER OPTIONS
-# Controls what (if any) data gets sent to the event broker.
-# Values:  0      = Broker nothing
-#         -1      = Broker everything
-#         <other> = See documentation
-
-event_broker_options=-1
-
-
-
-# EVENT BROKER MODULE(S)
-# This directive is used to specify an event broker module that should
-# by loaded by Nagios at startup.  Use multiple directives if you want
-# to load more than one module.  Arguments that should be passed to
-# the module at startup are seperated from the module path by a space.
-#
-#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-# WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING
-#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-#
-# Do NOT overwrite modules while they are being used by Nagios or Nagios
-# will crash in a fiery display of SEGFAULT glory.  This is a bug/limitation
-# either in dlopen(), the kernel, and/or the filesystem.  And maybe Nagios...
-#
-# The correct/safe way of updating a module is by using one of these methods:
-#    1. Shutdown Nagios, replace the module file, restart Nagios
-#    2. Delete the original module file, move the new module file into place, restart Nagios
-#
-# Example:
-#
-#   broker_module=<modulepath> [moduleargs]
-
-#broker_module=/somewhere/module1.o
-#broker_module=/somewhere/module2.o arg1 arg2=3 debug=0
-
-
-
-# LOG ROTATION METHOD
-# This is the log rotation method that Nagios should use to rotate
-# the main log file. Values are as follows..
-#	n	= None - don't rotate the log
-#	h	= Hourly rotation (top of the hour)
-#	d	= Daily rotation (midnight every day)
-#	w	= Weekly rotation (midnight on Saturday evening)
-#	m	= Monthly rotation (midnight last day of month)
-
-log_rotation_method=d
-
-
-
-# LOG ARCHIVE PATH
-# This is the directory where archived (rotated) log files should be 
-# placed (assuming you've chosen to do log rotation).
-
-log_archive_path=/var/log/nagios/archives
-
-
-
-# LOGGING OPTIONS
-# If you want messages logged to the syslog facility, as well as the
-# Nagios log file set this option to 1.  If not, set it to 0.
-
-use_syslog=1
-
-
-
-# NOTIFICATION LOGGING OPTION
-# If you don't want notifications to be logged, set this value to 0.
-# If notifications should be logged, set the value to 1.
-
-log_notifications=1
-
-
-
-# SERVICE RETRY LOGGING OPTION
-# If you don't want service check retries to be logged, set this value
-# to 0.  If retries should be logged, set the value to 1.
-
-log_service_retries=1
-
-
-
-# HOST RETRY LOGGING OPTION
-# If you don't want host check retries to be logged, set this value to
-# 0.  If retries should be logged, set the value to 1.
-
-log_host_retries=1
-
-
-
-# EVENT HANDLER LOGGING OPTION
-# If you don't want host and service event handlers to be logged, set
-# this value to 0.  If event handlers should be logged, set the value
-# to 1.
-
-log_event_handlers=1
-
-
-
-# INITIAL STATES LOGGING OPTION
-# If you want Nagios to log all initial host and service states to
-# the main log file (the first time the service or host is checked)
-# you can enable this option by setting this value to 1.  If you
-# are not using an external application that does long term state
-# statistics reporting, you do not need to enable this option.  In
-# this case, set the value to 0.
-
-log_initial_states=0
-
-
-
-# EXTERNAL COMMANDS LOGGING OPTION
-# If you don't want Nagios to log external commands, set this value
-# to 0.  If external commands should be logged, set this value to 1.
-# Note: This option does not include logging of passive service
-# checks - see the option below for controlling whether or not
-# passive checks are logged.
-
-log_external_commands=1
-
-
-
-# PASSIVE CHECKS LOGGING OPTION
-# If you don't want Nagios to log passive host and service checks, set
-# this value to 0.  If passive checks should be logged, set
-# this value to 1.
-
-log_passive_checks=1
-
-
-
-# GLOBAL HOST AND SERVICE EVENT HANDLERS
-# These options allow you to specify a host and service event handler
-# command that is to be run for every host or service state change.
-# The global event handler is executed immediately prior to the event
-# handler that you have optionally specified in each host or
-# service definition. The command argument is the short name of a
-# command definition that you define in your host configuration file.
-# Read the HTML docs for more information.
-
-#global_host_event_handler=somecommand
-#global_service_event_handler=somecommand
-
-
-
-# SERVICE INTER-CHECK DELAY METHOD
-# This is the method that Nagios should use when initially
-# "spreading out" service checks when it starts monitoring.  The
-# default is to use smart delay calculation, which will try to
-# space all service checks out evenly to minimize CPU load.
-# Using the dumb setting will cause all checks to be scheduled
-# at the same time (with no delay between them)!  This is not a
-# good thing for production, but is useful when testing the
-# parallelization functionality.
-#	n	= None - don't use any delay between checks
-#	d	= Use a "dumb" delay of 1 second between checks
-#	s	= Use "smart" inter-check delay calculation
-#       x.xx    = Use an inter-check delay of x.xx seconds
-
-service_inter_check_delay_method=s
-
-
-
-# MAXIMUM SERVICE CHECK SPREAD
-# This variable determines the timeframe (in minutes) from the
-# program start time that an initial check of all services should
-# be completed.  Default is 30 minutes.
-
-max_service_check_spread=30
-
-
-
-# SERVICE CHECK INTERLEAVE FACTOR
-# This variable determines how service checks are interleaved.
-# Interleaving the service checks allows for a more even
-# distribution of service checks and reduced load on remote
-# hosts.  Setting this value to 1 is equivalent to how versions
-# of Nagios previous to 0.0.5 did service checks.  Set this
-# value to s (smart) for automatic calculation of the interleave
-# factor unless you have a specific reason to change it.
-#       s       = Use "smart" interleave factor calculation
-#       x       = Use an interleave factor of x, where x is a
-#                 number greater than or equal to 1.
-
-service_interleave_factor=s
-
-
-
-# HOST INTER-CHECK DELAY METHOD
-# This is the method that Nagios should use when initially
-# "spreading out" host checks when it starts monitoring.  The
-# default is to use smart delay calculation, which will try to
-# space all host checks out evenly to minimize CPU load.
-# Using the dumb setting will cause all checks to be scheduled
-# at the same time (with no delay between them)!
-#	n	= None - don't use any delay between checks
-#	d	= Use a "dumb" delay of 1 second between checks
-#	s	= Use "smart" inter-check delay calculation
-#       x.xx    = Use an inter-check delay of x.xx seconds
-
-host_inter_check_delay_method=s
-
-
-
-# MAXIMUM HOST CHECK SPREAD
-# This variable determines the timeframe (in minutes) from the
-# program start time that an initial check of all hosts should
-# be completed.  Default is 30 minutes.
-
-max_host_check_spread=30
-
-
-
-# MAXIMUM CONCURRENT SERVICE CHECKS
-# This option allows you to specify the maximum number of 
-# service checks that can be run in parallel at any given time.
-# Specifying a value of 1 for this variable essentially prevents
-# any service checks from being parallelized.  A value of 0
-# will not restrict the number of concurrent checks that are
-# being executed.
-
-max_concurrent_checks=0
-
-
-
-# HOST AND SERVICE CHECK REAPER FREQUENCY
-# This is the frequency (in seconds!) that Nagios will process
-# the results of host and service checks.
-
-check_result_reaper_frequency=10
-
-
-
-
-# MAX CHECK RESULT REAPER TIME
-# This is the max amount of time (in seconds) that  a single
-# check result reaper event will be allowed to run before 
-# returning control back to Nagios so it can perform other
-# duties.
-
-max_check_result_reaper_time=30
-
-
-
-
-# CHECK RESULT PATH
-# This is directory where Nagios stores the results of host and
-# service checks that have not yet been processed.
-#
-# Note: Make sure that only one instance of Nagios has access
-# to this directory!  
-
-check_result_path=/var/nagios/spool/checkresults
-
-
-
-
-# MAX CHECK RESULT FILE AGE
-# This option determines the maximum age (in seconds) which check
-# result files are considered to be valid.  Files older than this 
-# threshold will be mercilessly deleted without further processing.
-
-max_check_result_file_age=3600
-
-
-
-
-# CACHED HOST CHECK HORIZON
-# This option determines the maximum amount of time (in seconds)
-# that the state of a previous host check is considered current.
-# Cached host states (from host checks that were performed more
-# recently that the timeframe specified by this value) can immensely
-# improve performance in regards to the host check logic.
-# Too high of a value for this option may result in inaccurate host
-# states being used by Nagios, while a lower value may result in a
-# performance hit for host checks.  Use a value of 0 to disable host
-# check caching.
-
-cached_host_check_horizon=15
-
-
-
-# CACHED SERVICE CHECK HORIZON
-# This option determines the maximum amount of time (in seconds)
-# that the state of a previous service check is considered current.
-# Cached service states (from service checks that were performed more
-# recently that the timeframe specified by this value) can immensely
-# improve performance in regards to predictive dependency checks.
-# Use a value of 0 to disable service check caching.
-
-cached_service_check_horizon=15
-
-
-
-# ENABLE PREDICTIVE HOST DEPENDENCY CHECKS
-# This option determines whether or not Nagios will attempt to execute
-# checks of hosts when it predicts that future dependency logic test
-# may be needed.  These predictive checks can help ensure that your
-# host dependency logic works well.
-# Values:
-#  0 = Disable predictive checks
-#  1 = Enable predictive checks (default)
-
-enable_predictive_host_dependency_checks=1
-
-
-
-# ENABLE PREDICTIVE SERVICE DEPENDENCY CHECKS
-# This option determines whether or not Nagios will attempt to execute
-# checks of service when it predicts that future dependency logic test
-# may be needed.  These predictive checks can help ensure that your
-# service dependency logic works well.
-# Values:
-#  0 = Disable predictive checks
-#  1 = Enable predictive checks (default)
-
-enable_predictive_service_dependency_checks=1
-
-
-
-# SOFT STATE DEPENDENCIES
-# This option determines whether or not Nagios will use soft state 
-# information when checking host and service dependencies. Normally 
-# Nagios will only use the latest hard host or service state when 
-# checking dependencies. If you want it to use the latest state (regardless
-# of whether its a soft or hard state type), enable this option. 
-# Values:
-#  0 = Don't use soft state dependencies (default) 
-#  1 = Use soft state dependencies 
-
-soft_state_dependencies=0
-
-
-
-# TIME CHANGE ADJUSTMENT THRESHOLDS
-# These options determine when Nagios will react to detected changes
-# in system time (either forward or backwards).
-
-#time_change_threshold=900
-
-
-
-# AUTO-RESCHEDULING OPTION
-# This option determines whether or not Nagios will attempt to
-# automatically reschedule active host and service checks to
-# "smooth" them out over time.  This can help balance the load on
-# the monitoring server.  
-# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
-# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
-
-auto_reschedule_checks=0
-
-
-
-# AUTO-RESCHEDULING INTERVAL
-# This option determines how often (in seconds) Nagios will
-# attempt to automatically reschedule checks.  This option only
-# has an effect if the auto_reschedule_checks option is enabled.
-# Default is 30 seconds.
-# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
-# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
-
-auto_rescheduling_interval=30
-
-
-
-# AUTO-RESCHEDULING WINDOW
-# This option determines the "window" of time (in seconds) that
-# Nagios will look at when automatically rescheduling checks.
-# Only host and service checks that occur in the next X seconds
-# (determined by this variable) will be rescheduled. This option
-# only has an effect if the auto_reschedule_checks option is
-# enabled.  Default is 180 seconds (3 minutes).
-# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
-# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
-
-auto_rescheduling_window=180
-
-
-
-# SLEEP TIME
-# This is the number of seconds to sleep between checking for system
-# events and service checks that need to be run.
-
-sleep_time=0.25
-
-
-
-# TIMEOUT VALUES
-# These options control how much time Nagios will allow various
-# types of commands to execute before killing them off.  Options
-# are available for controlling maximum time allotted for
-# service checks, host checks, event handlers, notifications, the
-# ocsp command, and performance data commands.  All values are in
-# seconds.
-
-service_check_timeout=60
-host_check_timeout=30
-event_handler_timeout=30
-notification_timeout=30
-ocsp_timeout=5
-perfdata_timeout=5
-
-
-
-# RETAIN STATE INFORMATION
-# This setting determines whether or not Nagios will save state
-# information for services and hosts before it shuts down.  Upon
-# startup Nagios will reload all saved service and host state
-# information before starting to monitor.  This is useful for 
-# maintaining long-term data on state statistics, etc, but will
-# slow Nagios down a bit when it (re)starts.  Since its only
-# a one-time penalty, I think its well worth the additional
-# startup delay.
-
-retain_state_information=1
-
-
-
-# STATE RETENTION FILE
-# This is the file that Nagios should use to store host and
-# service state information before it shuts down.  The state 
-# information in this file is also read immediately prior to
-# starting to monitor the network when Nagios is restarted.
-# This file is used only if the retain_state_information
-# variable is set to 1.
-
-state_retention_file=/var/nagios/retention.dat
-
-
-
-# RETENTION DATA UPDATE INTERVAL
-# This setting determines how often (in minutes) that Nagios
-# will automatically save retention data during normal operation.
-# If you set this value to 0, Nagios will not save retention
-# data at regular interval, but it will still save retention
-# data before shutting down or restarting.  If you have disabled
-# state retention, this option has no effect.
-
-retention_update_interval=60
-
-
-
-# USE RETAINED PROGRAM STATE
-# This setting determines whether or not Nagios will set 
-# program status variables based on the values saved in the
-# retention file.  If you want to use retained program status
-# information, set this value to 1.  If not, set this value
-# to 0.
-
-use_retained_program_state=1
-
-
-
-# USE RETAINED SCHEDULING INFO
-# This setting determines whether or not Nagios will retain
-# the scheduling info (next check time) for hosts and services
-# based on the values saved in the retention file.  If you
-# If you want to use retained scheduling info, set this
-# value to 1.  If not, set this value to 0.
-
-use_retained_scheduling_info=1
-
-
-
-# RETAINED ATTRIBUTE MASKS (ADVANCED FEATURE)
-# The following variables are used to specify specific host and
-# service attributes that should *not* be retained by Nagios during
-# program restarts.
-#
-# The values of the masks are bitwise ANDs of values specified
-# by the "MODATTR_" definitions found in include/common.h.  
-# For example, if you do not want the current enabled/disabled state
-# of flap detection and event handlers for hosts to be retained, you
-# would use a value of 24 for the host attribute mask...
-# MODATTR_EVENT_HANDLER_ENABLED (8) + MODATTR_FLAP_DETECTION_ENABLED (16) = 24
-
-# This mask determines what host attributes are not retained
-retained_host_attribute_mask=0
-
-# This mask determines what service attributes are not retained
-retained_service_attribute_mask=0
-
-# These two masks determine what process attributes are not retained.
-# There are two masks, because some process attributes have host and service
-# options.  For example, you can disable active host checks, but leave active
-# service checks enabled.
-retained_process_host_attribute_mask=0
-retained_process_service_attribute_mask=0
-
-# These two masks determine what contact attributes are not retained.
-# There are two masks, because some contact attributes have host and
-# service options.  For example, you can disable host notifications for
-# a contact, but leave service notifications enabled for them.
-retained_contact_host_attribute_mask=0
-retained_contact_service_attribute_mask=0
-
-
-
-# INTERVAL LENGTH
-# This is the seconds per unit interval as used in the
-# host/contact/service configuration files.  Setting this to 60 means
-# that each interval is one minute long (60 seconds).  Other settings
-# have not been tested much, so your mileage is likely to vary...
-
-interval_length=60
-
-
-
-# CHECK FOR UPDATES
-# This option determines whether Nagios will automatically check to
-# see if new updates (releases) are available.  It is recommend that you
-# enable this option to ensure that you stay on top of the latest critical
-# patches to Nagios.  Nagios is critical to you - make sure you keep it in
-# good shape.  Nagios will check once a day for new updates. Data collected
-# by Nagios Enterprises from the update check is processed in accordance 
-# with our privacy policy - see http://api.nagios.org for details.
-
-check_for_updates=1
-
-
-
-# BARE UPDATE CHECK
-# This option deterines what data Nagios will send to api.nagios.org when
-# it checks for updates.  By default, Nagios will send information on the 
-# current version of Nagios you have installed, as well as an indicator as
-# to whether this was a new installation or not.  Nagios Enterprises uses
-# this data to determine the number of users running specific version of 
-# Nagios.  Enable this option if you do not want this information to be sent.
-
-bare_update_check=0
-
-
-
-# AGGRESSIVE HOST CHECKING OPTION
-# If you don't want to turn on aggressive host checking features, set
-# this value to 0 (the default).  Otherwise set this value to 1 to
-# enable the aggressive check option.  Read the docs for more info
-# on what aggressive host check is or check out the source code in
-# base/checks.c
-
-use_aggressive_host_checking=0
-
-
-
-# SERVICE CHECK EXECUTION OPTION
-# This determines whether or not Nagios will actively execute
-# service checks when it initially starts.  If this option is 
-# disabled, checks are not actively made, but Nagios can still
-# receive and process passive check results that come in.  Unless
-# you're implementing redundant hosts or have a special need for
-# disabling the execution of service checks, leave this enabled!
-# Values: 1 = enable checks, 0 = disable checks
-
-execute_service_checks=1
-
-
-
-# PASSIVE SERVICE CHECK ACCEPTANCE OPTION
-# This determines whether or not Nagios will accept passive
-# service checks results when it initially (re)starts.
-# Values: 1 = accept passive checks, 0 = reject passive checks
-
-accept_passive_service_checks=1
-
-
-
-# HOST CHECK EXECUTION OPTION
-# This determines whether or not Nagios will actively execute
-# host checks when it initially starts.  If this option is 
-# disabled, checks are not actively made, but Nagios can still
-# receive and process passive check results that come in.  Unless
-# you're implementing redundant hosts or have a special need for
-# disabling the execution of host checks, leave this enabled!
-# Values: 1 = enable checks, 0 = disable checks
-
-execute_host_checks=1
-
-
-
-# PASSIVE HOST CHECK ACCEPTANCE OPTION
-# This determines whether or not Nagios will accept passive
-# host checks results when it initially (re)starts.
-# Values: 1 = accept passive checks, 0 = reject passive checks
-
-accept_passive_host_checks=1
-
-
-
-# NOTIFICATIONS OPTION
-# This determines whether or not Nagios will sent out any host or
-# service notifications when it is initially (re)started.
-# Values: 1 = enable notifications, 0 = disable notifications
-
-enable_notifications=1
-
-
-
-# EVENT HANDLER USE OPTION
-# This determines whether or not Nagios will run any host or
-# service event handlers when it is initially (re)started.  Unless
-# you're implementing redundant hosts, leave this option enabled.
-# Values: 1 = enable event handlers, 0 = disable event handlers
-
-enable_event_handlers=1
-
-
-
-# PROCESS PERFORMANCE DATA OPTION
-# This determines whether or not Nagios will process performance
-# data returned from service and host checks.  If this option is
-# enabled, host performance data will be processed using the
-# host_perfdata_command (defined below) and service performance
-# data will be processed using the service_perfdata_command (also
-# defined below).  Read the HTML docs for more information on
-# performance data.
-# Values: 1 = process performance data, 0 = do not process performance data
-
-process_performance_data=0
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA PROCESSING COMMANDS
-# These commands are run after every host and service check is
-# performed.  These commands are executed only if the
-# enable_performance_data option (above) is set to 1.  The command
-# argument is the short name of a command definition that you 
-# define in your host configuration file.  Read the HTML docs for
-# more information on performance data.
-
-#host_perfdata_command=process-host-perfdata
-#service_perfdata_command=process-service-perfdata
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILES
-# These files are used to store host and service performance data.
-# Performance data is only written to these files if the
-# enable_performance_data option (above) is set to 1.
-
-#host_perfdata_file=/tmp/host-perfdata
-#service_perfdata_file=/tmp/service-perfdata
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE TEMPLATES
-# These options determine what data is written (and how) to the
-# performance data files.  The templates may contain macros, special
-# characters (\t for tab, \r for carriage return, \n for newline)
-# and plain text.  A newline is automatically added after each write
-# to the performance data file.  Some examples of what you can do are
-# shown below.
-
-#host_perfdata_file_template=[HOSTPERFDATA]\t$TIMET$\t$HOSTNAME$\t$HOSTEXECUTIONTIME$\t$HOSTOUTPUT$\t$HOSTPERFDATA$
-#service_perfdata_file_template=[SERVICEPERFDATA]\t$TIMET$\t$HOSTNAME$\t$SERVICEDESC$\t$SERVICEEXECUTIONTIME$\t$SERVICELATENCY$\t$SERVICEOUTPUT$\t$SERVICEPERFDATA$
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE MODES
-# This option determines whether or not the host and service
-# performance data files are opened in write ("w") or append ("a")
-# mode. If you want to use named pipes, you should use the special
-# pipe ("p") mode which avoid blocking at startup, otherwise you will
-# likely want the defult append ("a") mode.
-
-#host_perfdata_file_mode=a
-#service_perfdata_file_mode=a
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING INTERVAL
-# These options determine how often (in seconds) the host and service
-# performance data files are processed using the commands defined
-# below.  A value of 0 indicates the files should not be periodically
-# processed.
-
-#host_perfdata_file_processing_interval=0
-#service_perfdata_file_processing_interval=0
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING COMMANDS
-# These commands are used to periodically process the host and
-# service performance data files.  The interval at which the
-# processing occurs is determined by the options above.
-
-#host_perfdata_file_processing_command=process-host-perfdata-file
-#service_perfdata_file_processing_command=process-service-perfdata-file
-
-
-
-# OBSESS OVER SERVICE CHECKS OPTION
-# This determines whether or not Nagios will obsess over service
-# checks and run the ocsp_command defined below.  Unless you're
-# planning on implementing distributed monitoring, do not enable
-# this option.  Read the HTML docs for more information on
-# implementing distributed monitoring.
-# Values: 1 = obsess over services, 0 = do not obsess (default)
-
-obsess_over_services=0
-
-
-
-# OBSESSIVE COMPULSIVE SERVICE PROCESSOR COMMAND
-# This is the command that is run for every service check that is
-# processed by Nagios.  This command is executed only if the
-# obsess_over_services option (above) is set to 1.  The command 
-# argument is the short name of a command definition that you
-# define in your host configuration file. Read the HTML docs for
-# more information on implementing distributed monitoring.
-
-#ocsp_command=somecommand
-
-
-
-# OBSESS OVER HOST CHECKS OPTION
-# This determines whether or not Nagios will obsess over host
-# checks and run the ochp_command defined below.  Unless you're
-# planning on implementing distributed monitoring, do not enable
-# this option.  Read the HTML docs for more information on
-# implementing distributed monitoring.
-# Values: 1 = obsess over hosts, 0 = do not obsess (default)
-
-obsess_over_hosts=0
-
-
-
-# OBSESSIVE COMPULSIVE HOST PROCESSOR COMMAND
-# This is the command that is run for every host check that is
-# processed by Nagios.  This command is executed only if the
-# obsess_over_hosts option (above) is set to 1.  The command 
-# argument is the short name of a command definition that you
-# define in your host configuration file. Read the HTML docs for
-# more information on implementing distributed monitoring.
-
-#ochp_command=somecommand
-
-
-
-# TRANSLATE PASSIVE HOST CHECKS OPTION
-# This determines whether or not Nagios will translate
-# DOWN/UNREACHABLE passive host check results into their proper
-# state for this instance of Nagios.  This option is useful
-# if you have distributed or failover monitoring setup.  In
-# these cases your other Nagios servers probably have a different
-# "view" of the network, with regards to the parent/child relationship
-# of hosts.  If a distributed monitoring server thinks a host
-# is DOWN, it may actually be UNREACHABLE from the point of
-# this Nagios instance.  Enabling this option will tell Nagios
-# to translate any DOWN or UNREACHABLE host states it receives
-# passively into the correct state from the view of this server.
-# Values: 1 = perform translation, 0 = do not translate (default)
-
-translate_passive_host_checks=0
-
-
-
-# PASSIVE HOST CHECKS ARE SOFT OPTION
-# This determines whether or not Nagios will treat passive host
-# checks as being HARD or SOFT.  By default, a passive host check
-# result will put a host into a HARD state type.  This can be changed
-# by enabling this option.
-# Values: 0 = passive checks are HARD, 1 = passive checks are SOFT
-
-passive_host_checks_are_soft=0
-
-
-
-# ORPHANED HOST/SERVICE CHECK OPTIONS
-# These options determine whether or not Nagios will periodically 
-# check for orphaned host service checks.  Since service checks are
-# not rescheduled until the results of their previous execution 
-# instance are processed, there exists a possibility that some
-# checks may never get rescheduled.  A similar situation exists for
-# host checks, although the exact scheduling details differ a bit
-# from service checks.  Orphaned checks seem to be a rare
-# problem and should not happen under normal circumstances.
-# If you have problems with service checks never getting
-# rescheduled, make sure you have orphaned service checks enabled.
-# Values: 1 = enable checks, 0 = disable checks
-
-check_for_orphaned_services=1
-check_for_orphaned_hosts=1
-
-
-
-# SERVICE FRESHNESS CHECK OPTION
-# This option determines whether or not Nagios will periodically
-# check the "freshness" of service results.  Enabling this option
-# is useful for ensuring passive checks are received in a timely
-# manner.
-# Values: 1 = enabled freshness checking, 0 = disable freshness checking
-
-check_service_freshness=0
-
-
-
-# SERVICE FRESHNESS CHECK INTERVAL
-# This setting determines how often (in seconds) Nagios will
-# check the "freshness" of service check results.  If you have
-# disabled service freshness checking, this option has no effect.
-
-service_freshness_check_interval=60
-
-
-
-# HOST FRESHNESS CHECK OPTION
-# This option determines whether or not Nagios will periodically
-# check the "freshness" of host results.  Enabling this option
-# is useful for ensuring passive checks are received in a timely
-# manner.
-# Values: 1 = enabled freshness checking, 0 = disable freshness checking
-
-check_host_freshness=0
-
-
-
-# HOST FRESHNESS CHECK INTERVAL
-# This setting determines how often (in seconds) Nagios will
-# check the "freshness" of host check results.  If you have
-# disabled host freshness checking, this option has no effect.
-
-host_freshness_check_interval=60
-
-
-
-
-# ADDITIONAL FRESHNESS THRESHOLD LATENCY
-# This setting determines the number of seconds that Nagios
-# will add to any host and service freshness thresholds that
-# it calculates (those not explicitly specified by the user).
-
-additional_freshness_latency=15
-
-
-
-
-# FLAP DETECTION OPTION
-# This option determines whether or not Nagios will try
-# and detect hosts and services that are "flapping".  
-# Flapping occurs when a host or service changes between
-# states too frequently.  When Nagios detects that a 
-# host or service is flapping, it will temporarily suppress
-# notifications for that host/service until it stops
-# flapping.  Flap detection is very experimental, so read
-# the HTML documentation before enabling this feature!
-# Values: 1 = enable flap detection
-#         0 = disable flap detection (default)
-
-enable_flap_detection=1
-
-
-
-# FLAP DETECTION THRESHOLDS FOR HOSTS AND SERVICES
-# Read the HTML documentation on flap detection for
-# an explanation of what this option does.  This option
-# has no effect if flap detection is disabled.
-
-low_service_flap_threshold=5.0
-high_service_flap_threshold=20.0
-low_host_flap_threshold=5.0
-high_host_flap_threshold=20.0
-
-
-
-# DATE FORMAT OPTION
-# This option determines how short dates are displayed. Valid options
-# include:
-#	us		(MM-DD-YYYY HH:MM:SS)
-#	euro    	(DD-MM-YYYY HH:MM:SS)
-#	iso8601		(YYYY-MM-DD HH:MM:SS)
-#	strict-iso8601	(YYYY-MM-DDTHH:MM:SS)
-#
-
-date_format=us
-
-
-
-
-# TIMEZONE OFFSET
-# This option is used to override the default timezone that this
-# instance of Nagios runs in.  If not specified, Nagios will use
-# the system configured timezone.
-#
-# NOTE: In order to display the correct timezone in the CGIs, you
-# will also need to alter the Apache directives for the CGI path 
-# to include your timezone.  Example:
-#
-#   <Directory "/usr/local/nagios/sbin/">
-#      SetEnv TZ "Australia/Brisbane"
-#      ...
-#   </Directory>
-
-#use_timezone=US/Mountain
-#use_timezone=Australia/Brisbane
-
-
-
-
-# P1.PL FILE LOCATION
-# This value determines where the p1.pl perl script (used by the
-# embedded Perl interpreter) is located.  If you didn't compile
-# Nagios with embedded Perl support, this option has no effect.
-
-p1_file = {{nagios_p1_pl}}
-
-
-
-# EMBEDDED PERL INTERPRETER OPTION
-# This option determines whether or not the embedded Perl interpreter
-# will be enabled during runtime.  This option has no effect if Nagios
-# has not been compiled with support for embedded Perl.
-# Values: 0 = disable interpreter, 1 = enable interpreter
-
-enable_embedded_perl=1
-
-
-
-# EMBEDDED PERL USAGE OPTION
-# This option determines whether or not Nagios will process Perl plugins
-# and scripts with the embedded Perl interpreter if the plugins/scripts
-# do not explicitly indicate whether or not it is okay to do so. Read
-# the HTML documentation on the embedded Perl interpreter for more 
-# information on how this option works.
-
-use_embedded_perl_implicitly=1
-
-
-
-# ILLEGAL OBJECT NAME CHARACTERS
-# This option allows you to specify illegal characters that cannot
-# be used in host names, service descriptions, or names of other
-# object types.
-
-illegal_object_name_chars=`~!$%^&*|'"<>?,()=
-
-
-
-# ILLEGAL MACRO OUTPUT CHARACTERS
-# This option allows you to specify illegal characters that are
-# stripped from macros before being used in notifications, event
-# handlers, etc.  This DOES NOT affect macros used in service or
-# host check commands.
-# The following macros are stripped of the characters you specify:
-#	$HOSTOUTPUT$
-#	$HOSTPERFDATA$
-#	$HOSTACKAUTHOR$
-#	$HOSTACKCOMMENT$
-#	$SERVICEOUTPUT$
-#	$SERVICEPERFDATA$
-#	$SERVICEACKAUTHOR$
-#	$SERVICEACKCOMMENT$
-
-illegal_macro_output_chars=`~$&|'"<>
-
-
-
-# REGULAR EXPRESSION MATCHING
-# This option controls whether or not regular expression matching
-# takes place in the object config files.  Regular expression
-# matching is used to match host, hostgroup, service, and service
-# group names/descriptions in some fields of various object types.
-# Values: 1 = enable regexp matching, 0 = disable regexp matching
-
-use_regexp_matching=0
-
-
-
-# "TRUE" REGULAR EXPRESSION MATCHING
-# This option controls whether or not "true" regular expression 
-# matching takes place in the object config files.  This option
-# only has an effect if regular expression matching is enabled
-# (see above).  If this option is DISABLED, regular expression
-# matching only occurs if a string contains wildcard characters
-# (* and ?).  If the option is ENABLED, regexp matching occurs
-# all the time (which can be annoying).
-# Values: 1 = enable true matching, 0 = disable true matching
-
-use_true_regexp_matching=0
-
-
-
-# ADMINISTRATOR EMAIL/PAGER ADDRESSES
-# The email and pager address of a global administrator (likely you).
-# Nagios never uses these values itself, but you can access them by
-# using the $ADMINEMAIL$ and $ADMINPAGER$ macros in your notification
-# commands.
-
-admin_email=nagios@localhost
-admin_pager=pagenagios@localhost
-
-
-
-# DAEMON CORE DUMP OPTION
-# This option determines whether or not Nagios is allowed to create
-# a core dump when it runs as a daemon.  Note that it is generally
-# considered bad form to allow this, but it may be useful for
-# debugging purposes.  Enabling this option doesn't guarantee that
-# a core file will be produced, but that's just life...
-# Values: 1 - Allow core dumps
-#         0 - Do not allow core dumps (default)
-
-daemon_dumps_core=0
-
-
-
-# LARGE INSTALLATION TWEAKS OPTION
-# This option determines whether or not Nagios will take some shortcuts
-# which can save on memory and CPU usage in large Nagios installations.
-# Read the documentation for more information on the benefits/tradeoffs
-# of enabling this option.
-# Values: 1 - Enabled tweaks
-#         0 - Disable tweaks (default)
-
-use_large_installation_tweaks=1
-
-
-
-# ENABLE ENVIRONMENT MACROS
-# This option determines whether or not Nagios will make all standard
-# macros available as environment variables when host/service checks
-# and system commands (event handlers, notifications, etc.) are
-# executed.  Enabling this option can cause performance issues in 
-# large installations, as it will consume a bit more memory and (more
-# importantly) consume more CPU.
-# Values: 1 - Enable environment variable macros (default)
-#         0 - Disable environment variable macros
-
-# NAGIOS_* macros are required for Ambari Maintenance Mode (mm_wrapper.py)
-enable_environment_macros=1
-
-
-
-# CHILD PROCESS MEMORY OPTION
-# This option determines whether or not Nagios will free memory in
-# child processes (processed used to execute system commands and host/
-# service checks).  If you specify a value here, it will override
-# program defaults.
-# Value: 1 - Free memory in child processes
-#        0 - Do not free memory in child processes
-
-#free_child_process_memory=1
-
-
-
-# CHILD PROCESS FORKING BEHAVIOR
-# This option determines how Nagios will fork child processes
-# (used to execute system commands and host/service checks).  Normally
-# child processes are fork()ed twice, which provides a very high level
-# of isolation from problems.  Fork()ing once is probably enough and will
-# save a great deal on CPU usage (in large installs), so you might
-# want to consider using this.  If you specify a value here, it will
-# program defaults.
-# Value: 1 - Child processes fork() twice
-#        0 - Child processes fork() just once
-
-#child_processes_fork_twice=1
-
-
-
-# DEBUG LEVEL
-# This option determines how much (if any) debugging information will
-# be written to the debug file.  OR values together to log multiple
-# types of information.
-# Values: 
-#          -1 = Everything
-#          0 = Nothing
-#	   1 = Functions
-#          2 = Configuration
-#          4 = Process information
-#	   8 = Scheduled events
-#          16 = Host/service checks
-#          32 = Notifications
-#          64 = Event broker
-#          128 = External commands
-#          256 = Commands
-#          512 = Scheduled downtime
-#          1024 = Comments
-#          2048 = Macros
-
-debug_level=0
-
-
-
-# DEBUG VERBOSITY
-# This option determines how verbose the debug log out will be.
-# Values: 0 = Brief output
-#         1 = More detailed
-#         2 = Very detailed
-
-debug_verbosity=1
-
-
-
-# DEBUG FILE
-# This option determines where Nagios should write debugging information.
-
-debug_file=/var/log/nagios/nagios.debug
-
-
-
-# MAX DEBUG FILE SIZE
-# This option determines the maximum size (in bytes) of the debug file.  If
-# the file grows larger than this size, it will be renamed with a .old
-# extension.  If a file already exists with a .old extension it will
-# automatically be deleted.  This helps ensure your disk space usage doesn't
-# get out of control when debugging Nagios.
-
-max_debug_file_size=1000000
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/nagios.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/nagios.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/nagios.conf.j2
deleted file mode 100644
index f415e65..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/nagios.conf.j2
+++ /dev/null
@@ -1,84 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-#
-# SAMPLE CONFIG SNIPPETS FOR APACHE WEB SERVER
-# Last Modified: 11-26-2005
-#
-# This file contains examples of entries that need
-# to be incorporated into your Apache web server
-# configuration file.  Customize the paths, etc. as
-# needed to fit your system.
-#
-
-ScriptAlias {{cgi_weblink}} "{{cgi_dir}}"
-
-<Directory "{{cgi_dir}}">
-#  SSLRequireSSL
-   Options ExecCGI
-   AllowOverride None
-   Order allow,deny
-   Allow from all
-#  Order deny,allow
-#  Deny from all
-#  Allow from 127.0.0.1
-   AuthName "Nagios Access"
-   AuthType Basic
-   AuthUserFile {{conf_dir}}/htpasswd.users
-   Require valid-user
-</Directory>
-
-Alias /nagios "{{nagios_web_dir}}"
-{# Ubuntu has different nagios url #}
-{% if os_family == "ubuntu" %}
-Alias /nagios3 "{{nagios_web_dir}}"
-{% endif %}
-
-<Directory "{{nagios_web_dir}}">
-#  SSLRequireSSL
-   Options FollowSymLinks
-   AllowOverride None
-   Order allow,deny
-   Allow from all
-#  Order deny,allow
-#  Deny from all
-#  Allow from 127.0.0.1
-   AuthName "Nagios Access"
-   AuthType Basic
-   AuthUserFile {{conf_dir}}/htpasswd.users
-   Require valid-user
-</Directory>
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/nagios.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/nagios.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/nagios.j2
deleted file mode 100644
index 0927915..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/nagios.j2
+++ /dev/null
@@ -1,164 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#!/bin/sh
-# $Id$
-# Nagios	Startup script for the Nagios monitoring daemon
-#
-# chkconfig:	- 85 15
-# description:	Nagios is a service monitoring system
-# processname: nagios
-# config: /etc/nagios/nagios.cfg
-# pidfile: /var/nagios/nagios.pid
-#
-### BEGIN INIT INFO
-# Provides:		nagios
-# Required-Start:	$local_fs $syslog $network
-# Required-Stop:	$local_fs $syslog $network
-# Short-Description:    start and stop Nagios monitoring server
-# Description:		Nagios is is a service monitoring system 
-### END INIT INFO
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-# Source function library.
-. /etc/rc.d/init.d/functions
-
-prefix="/usr"
-exec_prefix="/usr"
-exec="/usr/sbin/nagios"
-prog="nagios"
-config="{{conf_dir}}/nagios.cfg"
-pidfile="{{nagios_pid_file}}"
-user="{{nagios_user}}"
-
-[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog
-
-lockfile=/var/lock/subsys/$prog
-
-start() {
-    [ -x $exec ] || exit 5
-    [ -f $config ] || exit 6
-    echo -n $"Starting $prog: "
-    daemon --user=$user $exec -d $config
-    retval=$?
-    echo
-    [ $retval -eq 0 ] && touch $lockfile
-    return $retval
-}
-
-stop() {
-    echo -n $"Stopping $prog: "
-    killproc -d 10 $exec
-    retval=$?
-    echo
-    [ $retval -eq 0 ] && rm -f $lockfile
-    return $retval
-}
-
-
-restart() {
-    stop
-    start
-}
-
-reload() {
-    echo -n $"Reloading $prog: "
-    killproc $exec -HUP
-    RETVAL=$?
-    echo
-}
-
-force_reload() {
-    restart
-}
-
-check_config() {
-        $nice runuser -s /bin/bash - $user -c "$corelimit >/dev/null 2>&1 ; $exec -v $config > /dev/null 2>&1"
-        RETVAL=$?
-        if [ $RETVAL -ne 0 ] ; then
-                echo -n $"Configuration validation failed"
-                failure
-                echo
-                exit 1
-
-        fi
-}
-
-
-case "$1" in
-    start)
-        status $prog && exit 0
-	check_config
-        $1
-        ;;
-    stop)
-        status $prog|| exit 0
-        $1
-        ;;
-    restart)
-	check_config
-        $1
-        ;;
-    reload)
-        status $prog || exit 7
-	check_config
-        $1
-        ;;
-    force-reload)
-	check_config
-        force_reload
-        ;;
-    status)
-        status $prog
-        ;;
-    condrestart|try-restart)
-        status $prog|| exit 0
-	check_config
-        restart
-        ;;
-    configtest)
-        echo -n  $"Checking config for $prog: "
-        check_config && success
-        echo
-	;;
-    *)
-        echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload|configtest}"
-        exit 2
-esac
-exit $?

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/resource.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/resource.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/resource.cfg.j2
deleted file mode 100644
index 291d90f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/resource.cfg.j2
+++ /dev/null
@@ -1,51 +0,0 @@
-{#
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
-  limitations under the License.
-#}
-
-###########################################################################
-#
-# RESOURCE.CFG - Sample Resource File for Nagios 3.2.3
-#
-# Last Modified: 09-10-2003
-#
-# You can define $USERx$ macros in this file, which can in turn be used
-# in command definitions in your host config file(s).  $USERx$ macros are
-# useful for storing sensitive information such as usernames, passwords,
-# etc.  They are also handy for specifying the path to plugins and
-# event handlers - if you decide to move the plugins or event handlers to
-# a different directory in the future, you can just update one or two
-# $USERx$ macros, instead of modifying a lot of command definitions.
-#
-# The CGIs will not attempt to read the contents of resource files, so
-# you can set restrictive permissions (600 or 660) on them.
-#
-# Nagios supports up to 32 $USERx$ macros ($USER1$ through $USER32$)
-#
-# Resource files may also be used to store configuration directives for
-# external data sources like MySQL...
-#
-###########################################################################
-
-# Sets $USER1$ to be the path to the plugins
-$USER1$={{plugins_dir}}
-
-# Sets $USER2$ to be the path to event handlers
-#$USER2$={{eventhandlers_dir}}
-
-# Store some usernames and passwords (hidden from the CGIs)
-#$USER3$=someuser
-#$USER4$=somepassword
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
index 9115fb3..c73b6d3 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
@@ -283,7 +283,7 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
     return ['JOURNALNODE', 'ZKFC', 'GANGLIA_MONITOR']
 
   def getNotPreferableOnServerComponents(self):
-    return ['GANGLIA_SERVER', 'NAGIOS_SERVER']
+    return ['GANGLIA_SERVER']
 
   def getCardinalitiesDict(self):
     return {

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0/role_command_order.json b/ambari-server/src/main/resources/stacks/HDP/2.0/role_command_order.json
index 9c5db5c..d007c4b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0/role_command_order.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0/role_command_order.json
@@ -3,8 +3,6 @@
   "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
   "general_deps" : {
     "_comment" : "dependencies for all cases",
-    "NAGIOS_SERVER-INSTALL" : ["HIVE_CLIENT-INSTALL", "HCAT-INSTALL",
-        "MAPREDUCE_CLIENT-INSTALL", "OOZIE_CLIENT-INSTALL"],
     "HBASE_MASTER-START": ["ZOOKEEPER_SERVER-START"],
     "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
     "OOZIE_SERVER-START": ["RESOURCEMANAGER-START", "NODEMANAGER-START"],
@@ -13,12 +11,6 @@
     "HIVE_SERVER-START": ["NODEMANAGER-START", "MYSQL_SERVER-START"],
     "HUE_SERVER-START": ["HIVE_SERVER-START", "HCAT-START", "OOZIE_SERVER-START"],
     "FLUME_HANDLER-START": ["OOZIE_SERVER-START"],
-    "NAGIOS_SERVER-START": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START",
-        "GANGLIA_SERVER-START", "GANGLIA_MONITOR-START", "HCAT-START",
-        "HIVE_SERVER-START", "HIVE_METASTORE-START", "HUE_SERVER-START",
-        "RESOURCEMANAGER-START", "NODEMANAGER-START", "ZOOKEEPER_SERVER-START",
-        "MYSQL_SERVER-START", "OOZIE_SERVER-START", "PIG-START", "SQOOP-START",
-        "WEBHCAT_SERVER-START", "FLUME_HANDLER-START"],
     "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START", "NODEMANAGER-START"],
     "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START", "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK"],
     "WEBHCAT_SERVICE_CHECK-SERVICE_CHECK": ["WEBHCAT_SERVER-START"],
@@ -46,8 +38,6 @@
     "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START"],
     "HIVE_SERVER-START": ["DATANODE-START"],
     "WEBHCAT_SERVER-START": ["DATANODE-START"],
-    "NAGIOS_SERVER-START": ["NAMENODE-START", "SECONDARY_NAMENODE-START",
-        "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START", "HISTORYSERVER-START"],
     "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START",
         "SECONDARY_NAMENODE-START"],
     "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START",
@@ -63,12 +53,10 @@
   "_comment" : "Dependencies that are used in HA NameNode cluster",
   "namenode_optional_ha": {
     "NAMENODE-START": ["ZKFC-START", "JOURNALNODE-START", "ZOOKEEPER_SERVER-START"],
-    "ZKFC-START": ["ZOOKEEPER_SERVER-START"],
-    "NAGIOS_SERVER-START": ["ZKFC-START", "JOURNALNODE-START"]
+    "ZKFC-START": ["ZOOKEEPER_SERVER-START"]
   },
   "_comment" : "Dependencies that are used in ResourceManager HA cluster",
   "resourcemanager_optional_ha" : {
     "RESOURCEMANAGER-START": ["ZOOKEEPER_SERVER-START"]
   }
 }
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/blueprints/multinode-default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/blueprints/multinode-default.json b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/blueprints/multinode-default.json
index 7be13b7..7880107 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/blueprints/multinode-default.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/blueprints/multinode-default.json
@@ -1,10 +1,5 @@
 {
     "configurations" : [
-        {
-            "nagios-env" : {
-                "nagios_contact" : "admin@localhost"
-            }
-        }
     ],
     "host_groups" : [
         {
@@ -137,9 +132,6 @@
                     "name" : "AMBARI_SERVER"
                 },
                 {
-                    "name" : "NAGIOS_SERVER"
-                },
-                {
                     "name" : "ZOOKEEPER_CLIENT"
                 },
                 {

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/blueprints/singlenode-default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/blueprints/singlenode-default.json b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/blueprints/singlenode-default.json
index 501b5d0..84cf152 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/blueprints/singlenode-default.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/blueprints/singlenode-default.json
@@ -1,10 +1,5 @@
 {
     "configurations" : [
-        {
-            "nagios-env" : {
-                "nagios_contact" : "admin@localhost"
-            }
-        }
     ],
     "host_groups" : [
         {
@@ -89,9 +84,6 @@
                     "name" : "FALCON_CLIENT"
                 },
                 {
-                    "name" : "NAGIOS_SERVER"
-                },
-                {
                     "name" : "SECONDARY_NAMENODE"
                 },
                 {

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/role_command_order.json b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/role_command_order.json
index 82cbd79..7beaf7b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/role_command_order.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/role_command_order.json
@@ -3,8 +3,6 @@
   "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
   "general_deps" : {
     "_comment" : "dependencies for all cases",
-    "NAGIOS_SERVER-INSTALL" : ["HIVE_CLIENT-INSTALL", "HCAT-INSTALL",
-        "MAPREDUCE_CLIENT-INSTALL", "OOZIE_CLIENT-INSTALL"],
     "NIMBUS-START" : ["ZOOKEEPER_SERVER-START"],
     "SUPERVISOR-START" : ["NIMBUS-START"],
     "STORM_UI_SERVER-START" : ["NIMBUS-START"],
@@ -18,12 +16,6 @@
     "HIVE_SERVER-START": ["NODEMANAGER-START", "MYSQL_SERVER-START"],
     "HUE_SERVER-START": ["HIVE_SERVER-START", "HCAT-START", "OOZIE_SERVER-START"],
     "FLUME_HANDLER-START": ["OOZIE_SERVER-START"],
-    "NAGIOS_SERVER-START": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START",
-        "GANGLIA_SERVER-START", "GANGLIA_MONITOR-START", "HCAT-START",
-        "HIVE_SERVER-START", "HIVE_METASTORE-START", "HUE_SERVER-START",
-        "RESOURCEMANAGER-START", "NODEMANAGER-START", "ZOOKEEPER_SERVER-START",
-        "MYSQL_SERVER-START", "OOZIE_SERVER-START", "PIG-START", "SQOOP-START",
-        "WEBHCAT_SERVER-START", "FLUME_HANDLER-START"],
     "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START", "NODEMANAGER-START"],
     "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START"],
     "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
@@ -57,8 +49,6 @@
     "FALCON_SERVICE_CHECK-SERVICE_CHECK": ["FALCON_SERVER-START"],
     "HIVE_SERVER-START": ["DATANODE-START"],
     "WEBHCAT_SERVER-START": ["DATANODE-START"],
-    "NAGIOS_SERVER-START": ["NAMENODE-START", "SECONDARY_NAMENODE-START",
-        "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START", "HISTORYSERVER-START"],
     "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START",
         "SECONDARY_NAMENODE-START"],
     "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START",
@@ -74,8 +64,7 @@
   "_comment" : "Dependencies that are used in HA NameNode cluster",
   "namenode_optional_ha": {
     "NAMENODE-START": ["ZKFC-START", "JOURNALNODE-START", "ZOOKEEPER_SERVER-START"],
-    "ZKFC-START": ["ZOOKEEPER_SERVER-START"],
-    "NAGIOS_SERVER-START": ["ZKFC-START", "JOURNALNODE-START"]
+    "ZKFC-START": ["ZOOKEEPER_SERVER-START"]
   },
   "_comment" : "Dependencies that are used in ResourceManager HA cluster",
   "resourcemanager_optional_ha" : {

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.1/blueprints/multinode-default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1/blueprints/multinode-default.json b/ambari-server/src/main/resources/stacks/HDP/2.1/blueprints/multinode-default.json
index 08ce8d5..d0b97c0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1/blueprints/multinode-default.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1/blueprints/multinode-default.json
@@ -1,10 +1,5 @@
 {
     "configurations" : [
-        {
-            "nagios-env" : {
-                "nagios_contact" : "admin@localhost"
-            }
-        }
     ],
     "host_groups" : [
         {
@@ -132,9 +127,6 @@
                     "name" : "AMBARI_SERVER"
                 },
                 {
-                    "name" : "NAGIOS_SERVER"
-                },
-                {
                     "name" : "ZOOKEEPER_CLIENT"
                 },
                 {

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.1/blueprints/singlenode-default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1/blueprints/singlenode-default.json b/ambari-server/src/main/resources/stacks/HDP/2.1/blueprints/singlenode-default.json
index 1727ebb..9200053 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1/blueprints/singlenode-default.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1/blueprints/singlenode-default.json
@@ -1,10 +1,5 @@
 {
     "configurations" : [
-        {
-            "nagios-env" : {
-                "nagios_contact" : "admin@localhost"
-            }
-        }
     ],
     "host_groups" : [
         {
@@ -86,9 +81,6 @@
                     "name" : "FALCON_CLIENT"
                 },
                 {
-                    "name" : "NAGIOS_SERVER"
-                },
-                {
                     "name" : "SECONDARY_NAMENODE"
                 },
                 {

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.1/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1/role_command_order.json b/ambari-server/src/main/resources/stacks/HDP/2.1/role_command_order.json
index a958e2e..2d152ba 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1/role_command_order.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1/role_command_order.json
@@ -3,8 +3,6 @@
   "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
   "general_deps" : {
     "_comment" : "dependencies for all cases",
-    "NAGIOS_SERVER-INSTALL" : ["HIVE_CLIENT-INSTALL", "HCAT-INSTALL",
-        "MAPREDUCE_CLIENT-INSTALL", "OOZIE_CLIENT-INSTALL"],
     "NIMBUS-START" : ["ZOOKEEPER_SERVER-START"],
     "SUPERVISOR-START" : ["NIMBUS-START"],
     "STORM_UI_SERVER-START" : ["NIMBUS-START"],
@@ -18,12 +16,6 @@
     "HIVE_SERVER-START": ["NODEMANAGER-START", "MYSQL_SERVER-START"],
     "HUE_SERVER-START": ["HIVE_SERVER-START", "HCAT-START", "OOZIE_SERVER-START"],
     "FLUME_HANDLER-START": ["OOZIE_SERVER-START"],
-    "NAGIOS_SERVER-START": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START",
-        "GANGLIA_SERVER-START", "GANGLIA_MONITOR-START", "HCAT-START",
-        "HIVE_SERVER-START", "HIVE_METASTORE-START", "HUE_SERVER-START",
-        "NODEMANAGER-START", "RESOURCEMANAGER-START", "ZOOKEEPER_SERVER-START",
-        "MYSQL_SERVER-START", "OOZIE_SERVER-START", "PIG-START", "SQOOP-START",
-        "WEBHCAT_SERVER-START", "FLUME_HANDLER-START"],
     "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
     "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START"],
     "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
@@ -56,8 +48,6 @@
     "FALCON_SERVICE_CHECK-SERVICE_CHECK": ["FALCON_SERVER-START"],
     "HIVE_SERVER-START": ["DATANODE-START"],
     "WEBHCAT_SERVER-START": ["DATANODE-START"],
-    "NAGIOS_SERVER-START": ["NAMENODE-START", "SECONDARY_NAMENODE-START",
-        "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START", "HISTORYSERVER-START"],
     "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START",
         "SECONDARY_NAMENODE-START"],
     "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START",
@@ -73,12 +63,10 @@
   "_comment" : "Dependencies that are used in HA NameNode cluster",
   "namenode_optional_ha": {
     "NAMENODE-START": ["ZKFC-START", "JOURNALNODE-START", "ZOOKEEPER_SERVER-START"],
-    "ZKFC-START": ["ZOOKEEPER_SERVER-START"],
-    "NAGIOS_SERVER-START": ["ZKFC-START", "JOURNALNODE-START"]
+    "ZKFC-START": ["ZOOKEEPER_SERVER-START"]
   },
   "_comment" : "Dependencies that are used in ResourceManager HA cluster",
   "resourcemanager_optional_ha" : {
     "RESOURCEMANAGER-START": ["ZOOKEEPER_SERVER-START"]
   }
 }
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.1/services/NAGIOS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1/services/NAGIOS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.1/services/NAGIOS/metainfo.xml
deleted file mode 100644
index b6c0f8b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1/services/NAGIOS/metainfo.xml
+++ /dev/null
@@ -1,40 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>NAGIOS</name>
-      <version>3.5.0</version>
-      <components>
-        <component>
-           <name>NAGIOS_SERVER</name>
-            <dependencies>
-              <dependency>
-                <name>TEZ/TEZ_CLIENT</name>
-                <scope>host</scope>
-                <auto-deploy>
-                  <enabled>true</enabled>
-                </auto-deploy>
-              </dependency>
-            </dependencies>
-        </component>
-      </components>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
index 956a543..221656d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
@@ -54,7 +54,7 @@ class HDP21StackAdvisor(HDP206StackAdvisor):
                    + "m -Djava.net.preferIPv4Stack=true -XX:+UseNUMA -XX:+UseParallelGC")
 
   def getNotPreferableOnServerComponents(self):
-    return ['STORM_UI_SERVER', 'DRPC_SERVER', 'STORM_REST_API', 'NIMBUS', 'GANGLIA_SERVER', 'NAGIOS_SERVER']
+    return ['STORM_UI_SERVER', 'DRPC_SERVER', 'STORM_REST_API', 'NIMBUS', 'GANGLIA_SERVER']
 
   def getNotValuableComponents(self):
     return ['JOURNALNODE', 'ZKFC', 'GANGLIA_MONITOR', 'APP_TIMELINE_SERVER']

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.2/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/role_command_order.json b/ambari-server/src/main/resources/stacks/HDP/2.2/role_command_order.json
index a7d9c0f..72b49fa 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/role_command_order.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/role_command_order.json
@@ -3,8 +3,6 @@
   "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
   "general_deps" : {
     "_comment" : "dependencies for all cases",
-    "NAGIOS_SERVER-INSTALL" : ["HIVE_CLIENT-INSTALL", "HCAT-INSTALL",
-        "MAPREDUCE_CLIENT-INSTALL", "OOZIE_CLIENT-INSTALL"],
     "NIMBUS-START" : ["ZOOKEEPER_SERVER-START", "NODEMANAGER-START", "RESOURCEMANAGER-START"],
     "SUPERVISOR-START" : ["NIMBUS-START"],
     "STORM_UI_SERVER-START" : ["NIMBUS-START"],
@@ -19,12 +17,6 @@
     "HUE_SERVER-START": ["HIVE_SERVER-START", "HCAT-START", "OOZIE_SERVER-START"],
     "FLUME_HANDLER-START": ["OOZIE_SERVER-START"],
     "FALCON_SERVER-START": ["NAMENODE-START", "DATANODE-START", "OOZIE_SERVER-START"],
-    "NAGIOS_SERVER-START": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START",
-        "GANGLIA_SERVER-START", "GANGLIA_MONITOR-START", "HCAT-START",
-        "HIVE_SERVER-START", "HIVE_METASTORE-START", "HUE_SERVER-START",
-        "NODEMANAGER-START", "RESOURCEMANAGER-START", "ZOOKEEPER_SERVER-START",
-        "MYSQL_SERVER-START", "OOZIE_SERVER-START", "PIG-START", "SQOOP-START",
-        "WEBHCAT_SERVER-START", "FLUME_HANDLER-START"],
     "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
     "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START"],
     "WEBHCAT_SERVICE_CHECK-SERVICE_CHECK": ["WEBHCAT_SERVER-START"],
@@ -63,8 +55,6 @@
     "FALCON_SERVICE_CHECK-SERVICE_CHECK": ["FALCON_SERVER-START"],
     "HIVE_SERVER-START": ["DATANODE-START"],
     "WEBHCAT_SERVER-START": ["DATANODE-START"],
-    "NAGIOS_SERVER-START": ["NAMENODE-START", "SECONDARY_NAMENODE-START",
-        "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START", "HISTORYSERVER-START"],
     "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START",
         "SECONDARY_NAMENODE-START"],
     "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START",
@@ -80,12 +70,10 @@
   "_comment" : "Dependencies that are used in HA NameNode cluster",
   "namenode_optional_ha": {
     "NAMENODE-START": ["ZKFC-START", "JOURNALNODE-START", "ZOOKEEPER_SERVER-START"],
-    "ZKFC-START": ["ZOOKEEPER_SERVER-START"],
-    "NAGIOS_SERVER-START": ["ZKFC-START", "JOURNALNODE-START"]
+    "ZKFC-START": ["ZOOKEEPER_SERVER-START"]
   },
   "_comment" : "Dependencies that are used in ResourceManager HA cluster",
   "resourcemanager_optional_ha" : {
     "RESOURCEMANAGER-START": ["ZOOKEEPER_SERVER-START"]
   }
 }
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
index e80e3e5..2a0f3db 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
@@ -25,6 +25,7 @@ import static org.mockito.Mockito.atLeast;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.verify;
 
+import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -66,7 +67,6 @@ import org.slf4j.LoggerFactory;
 import com.google.inject.Guice;
 import com.google.inject.Injector;
 import com.google.inject.persist.PersistService;
-import java.util.ArrayList;
 
 public class TestHeartbeatMonitor {
 
@@ -95,12 +95,12 @@ public class TestHeartbeatMonitor {
   public void teardown() {
     injector.getInstance(PersistService.class).stop();
   }
-  
+
   private void setOsFamily(Host host, String osFamily, String osVersion) {
     Map<String, String> hostAttributes = new HashMap<String, String>();
     hostAttributes.put("os_family", osFamily);
     hostAttributes.put("os_release_version", osVersion);
-    
+
     host.setHostAttributes(hostAttributes);
   }
 
@@ -121,15 +121,15 @@ public class TestHeartbeatMonitor {
       add(hostname1);
       add(hostname2);
     }};
-    
+
     ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
     Config config = configFactory.createNew(cluster, "hadoop-env",
         new HashMap<String,String>() {{ put("a", "b"); }}, new HashMap<String, Map<String,String>>());
     config.setTag("version1");
     cluster.addConfig(config);
     cluster.addDesiredConfig("_test", Collections.singleton(config));
-    
-    
+
+
     clusters.mapHostsToCluster(hostNames, clusterName);
     Service hdfs = cluster.addService(serviceName);
     hdfs.persist();
@@ -165,7 +165,7 @@ public class TestHeartbeatMonitor {
     hb.setTimestamp(System.currentTimeMillis());
     hb.setResponseId(12);
     handler.handleHeartBeat(hb);
-    
+
     List<Alert> al = new ArrayList<Alert>();
     Alert alert = new Alert("host_alert", null, "AMBARI", null, hostname1, AlertState.OK);
     al.add(alert);
@@ -189,7 +189,7 @@ public class TestHeartbeatMonitor {
     assertEquals(true, containsDATANODEStatus);
     assertEquals(true, containsNAMENODEStatus);
     assertEquals(true, containsSECONDARY_NAMENODEStatus);
-    
+
     cmds = hm.generateStatusCommands(hostname2);
     assertTrue("HeartbeatMonitor should not generate StatusCommands for host2 because it has no services", cmds.isEmpty());
   }
@@ -405,7 +405,7 @@ public class TestHeartbeatMonitor {
     }
     assertEquals(fsm.getHost(hostname).getState(), HostState.HEARTBEAT_LOST);
   }
-  
+
   @Test
   public void testHeartbeatLossWithComponent() throws AmbariException, InterruptedException,
           InvalidStateTransitionException {
@@ -413,7 +413,7 @@ public class TestHeartbeatMonitor {
     clusters.addHost(hostname1);
     setOsFamily(clusters.getHost(hostname1), "redhat", "6.3");
     clusters.getHost(hostname1).persist();
-    
+
     clusters.addCluster(clusterName);
     Cluster cluster = clusters.getCluster(clusterName);
     cluster.setDesiredStackVersion(new StackId("HDP-0.1"));
@@ -434,12 +434,12 @@ public class TestHeartbeatMonitor {
     hdfs.getServiceComponent(Role.SECONDARY_NAMENODE.name()).addServiceComponentHost(hostname1).persist();
     hdfs.addServiceComponent(Role.HDFS_CLIENT.name()).persist();
     hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(hostname1);
-    
+
     ActionQueue aq = new ActionQueue();
     ActionManager am = mock(ActionManager.class);
     HeartbeatMonitor hm = new HeartbeatMonitor(clusters, aq, am, 10, injector);
     HeartBeatHandler handler = new HeartBeatHandler(clusters, aq, am, injector);
-    
+
     Register reg = new Register();
     reg.setHostname(hostname1);
     reg.setResponseId(12);
@@ -449,18 +449,18 @@ public class TestHeartbeatMonitor {
     hi.setOS("Centos5");
     reg.setHardwareProfile(hi);
     handler.handleRegistration(reg);
-    
+
     cluster = clusters.getClustersForHost(hostname1).iterator().next();
     for (ServiceComponentHost sch : cluster.getServiceComponentHosts(hostname1)) {
       if (sch.getServiceComponentName().equals("NAMENODE")) {
         // installing
         sch.handleEvent(new ServiceComponentHostInstallEvent(
             sch.getServiceComponentName(), sch.getHostName(), System.currentTimeMillis(), "HDP-0.1"));
-        
+
         // installed
         sch.handleEvent(new ServiceComponentHostOpSucceededEvent(sch.getServiceComponentName(),
             sch.getHostName(), System.currentTimeMillis()));
-        
+
         // started
         sch.handleEvent(new ServiceComponentHostStartedEvent(sch.getServiceComponentName(),
             sch.getHostName(), System.currentTimeMillis()));
@@ -483,14 +483,14 @@ public class TestHeartbeatMonitor {
           sch.getHostName(), System.currentTimeMillis()));
       }
     }
-    
+
     HeartBeat hb = new HeartBeat();
     hb.setHostname(hostname1);
     hb.setNodeStatus(new HostStatus(HostStatus.Status.HEALTHY, "cool"));
     hb.setTimestamp(System.currentTimeMillis());
     hb.setResponseId(12);
     handler.handleHeartBeat(hb);
-    
+
     hm.start();
     aq.enqueue(hostname1, new ExecutionCommand());
     //Heartbeat will expire and action queue will be flushed
@@ -498,72 +498,67 @@ public class TestHeartbeatMonitor {
       Thread.sleep(1);
     }
     hm.shutdown();
-    
+
 
     cluster = clusters.getClustersForHost(hostname1).iterator().next();
     for (ServiceComponentHost sch : cluster.getServiceComponentHosts(hostname1)) {
       Service s = cluster.getService(sch.getServiceName());
       ServiceComponent sc = s.getServiceComponent(sch.getServiceComponentName());
-      if (sch.getServiceComponentName().equals("NAMENODE"))
+      if (sch.getServiceComponentName().equals("NAMENODE")) {
         assertEquals(sch.getServiceComponentName(), State.UNKNOWN, sch.getState());
-      else if (sch.getServiceComponentName().equals("DATANODE"))
+      } else if (sch.getServiceComponentName().equals("DATANODE")) {
         assertEquals(sch.getServiceComponentName(), State.INSTALLING, sch.getState());
-      else if (sc.isClientComponent())
+      } else if (sc.isClientComponent()) {
         assertEquals(sch.getServiceComponentName(), State.INIT, sch.getState());
-      else if (sch.getServiceComponentName().equals("SECONDARY_NAMENODE"))
+      } else if (sch.getServiceComponentName().equals("SECONDARY_NAMENODE")) {
         assertEquals(sch.getServiceComponentName(), State.DISABLED,
           sch.getState());
+      }
     }
   }
-  
+
   @Test
   public void testStateCommandsWithAlertsGeneration() throws AmbariException, InterruptedException,
           InvalidStateTransitionException {
     Clusters clusters = injector.getInstance(Clusters.class);
-    
+
     clusters.addHost(hostname1);
     setOsFamily(clusters.getHost(hostname1), "redhat", "6.3");
     clusters.getHost(hostname1).persist();
-    
+
     clusters.addHost(hostname2);
     setOsFamily(clusters.getHost(hostname2), "redhat", "6.3");
     clusters.getHost(hostname2).persist();
     clusters.addCluster(clusterName);
-    
+
     Cluster cluster = clusters.getCluster(clusterName);
     cluster.setDesiredStackVersion(new StackId("HDP-2.0.7"));
     Set<String> hostNames = new HashSet<String>(){{
       add(hostname1);
       add(hostname2);
     }};
-    
+
     clusters.mapHostsToCluster(hostNames, clusterName);
-    
+
     Service hdfs = cluster.addService(serviceName);
-    Service nagios = cluster.addService("NAGIOS");
-    
     hdfs.persist();
-    nagios.persist();
-    
+
     hdfs.addServiceComponent(Role.DATANODE.name()).persist();
     hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(hostname1).persist();
     hdfs.addServiceComponent(Role.NAMENODE.name()).persist();
     hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(hostname1).persist();
     hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name()).persist();
     hdfs.getServiceComponent(Role.SECONDARY_NAMENODE.name()).addServiceComponentHost(hostname1).persist();
-    nagios.addServiceComponent(Role.NAGIOS_SERVER.name()).persist();
-    nagios.getServiceComponent(Role.NAGIOS_SERVER.name()).addServiceComponentHost(hostname1).persist();
 
     hdfs.getServiceComponent(Role.DATANODE.name()).getServiceComponentHost(hostname1).setState(State.INSTALLED);
     hdfs.getServiceComponent(Role.NAMENODE.name()).getServiceComponentHost(hostname1).setState(State.INSTALLED);
     hdfs.getServiceComponent(Role.SECONDARY_NAMENODE.name()).getServiceComponentHost(hostname1).setState(State.INSTALLED);
-    nagios.getServiceComponent(Role.NAGIOS_SERVER.name()).getServiceComponentHost(hostname1).setState(State.INSTALLED);
-    
 
     Alert alert = new Alert("datanode_madeup", null, "HDFS", "DATANODE",
      hostname1, AlertState.CRITICAL);
+
     cluster.addAlerts(Collections.singleton(alert));
-        
+
     ActionQueue aq = new ActionQueue();
     ActionManager am = mock(ActionManager.class);
     HeartbeatMonitor hm = new HeartbeatMonitor(clusters, aq, am,
@@ -587,21 +582,11 @@ public class TestHeartbeatMonitor {
     handler.handleHeartBeat(hb);
 
     List<StatusCommand> cmds = hm.generateStatusCommands(hostname1);
-    assertEquals("HeartbeatMonitor should generate StatusCommands for host1", 4, cmds.size());
+    assertEquals("HeartbeatMonitor should generate StatusCommands for host1",
+        3, cmds.size());
     assertEquals("HDFS", cmds.get(0).getServiceName());
 
-    boolean  containsNAGIOSStatus = false;
-    for (StatusCommand cmd : cmds) {
-      if (cmd.getComponentName().equals(Role.NAGIOS_SERVER.name())) {
-        containsNAGIOSStatus = true;
-        assertTrue(cmd.getClass().equals(NagiosAlertCommand.class));
-        assertEquals(1, ((NagiosAlertCommand) cmd).getAlerts().size());
-      }
-      
-    }
-    assertTrue(containsNAGIOSStatus);
-    
     cmds = hm.generateStatusCommands(hostname2);
     assertTrue("HeartbeatMonitor should not generate StatusCommands for host2 because it has no services", cmds.isEmpty());
-  }  
+  }
 }


[02/17] ambari git commit: AMBARI-8276 - Alerts: Remove Nagios Service From The Stack (jonathanhurley)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/resources/stacks/HDP/2.0.7/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/role_command_order.json b/ambari-server/src/test/resources/stacks/HDP/2.0.7/role_command_order.json
index c45ba07..1404ef6 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.7/role_command_order.json
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/role_command_order.json
@@ -3,8 +3,6 @@
   "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
   "general_deps" : {
     "_comment" : "dependencies for all cases",
-    "NAGIOS_SERVER-INSTALL" : ["HIVE_CLIENT-INSTALL", "HCAT-INSTALL",
-        "MAPREDUCE_CLIENT-INSTALL", "OOZIE_CLIENT-INSTALL"],
     "HBASE_MASTER-START": ["ZOOKEEPER_SERVER-START"],
     "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
     "OOZIE_SERVER-START": ["JOBTRACKER-START", "TASKTRACKER-START"],
@@ -13,12 +11,6 @@
     "HIVE_SERVER-START": ["TASKTRACKER-START", "MYSQL_SERVER-START"],
     "HUE_SERVER-START": ["HIVE_SERVER-START", "HCAT-START", "OOZIE_SERVER-START"],
     "FLUME_HANDLER-START": ["OOZIE_SERVER-START"],
-    "NAGIOS_SERVER-START": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START",
-        "GANGLIA_SERVER-START", "GANGLIA_MONITOR-START", "HCAT-START",
-        "HIVE_SERVER-START", "HIVE_METASTORE-START", "HUE_SERVER-START",
-        "JOBTRACKER-START", "TASKTRACKER-START", "ZOOKEEPER_SERVER-START",
-        "MYSQL_SERVER-START", "OOZIE_SERVER-START", "PIG-START", "SQOOP-START",
-        "WEBHCAT_SERVER-START", "FLUME_HANDLER-START"],
     "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"],
     "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START"],
     "WEBHCAT_SERVICE_CHECK-SERVICE_CHECK": ["WEBHCAT_SERVER-START"],
@@ -48,8 +40,7 @@
     "WEBHCAT_SERVER-UPGRADE" : ["OOZIE_CLIENT-UPGRADE"],
     "PIG-UPGRADE" : ["WEBHCAT_SERVER-UPGRADE"],
     "SQOOP-UPGRADE" : ["PIG-UPGRADE"],
-    "NAGIOS_SERVER-UPGRADE" : ["SQOOP-UPGRADE"],
-    "GANGLIA_SERVER-UPGRADE" : ["NAGIOS_SERVER-UPGRADE"],
+    "GANGLIA_SERVER-UPGRADE" : ["SQOOP-UPGRADE"],
     "GANGLIA_MONITOR-UPGRADE" : ["GANGLIA_SERVER-UPGRADE"]
   },
   "_comment" : "GLUSTERFS-specific dependencies",
@@ -71,8 +62,6 @@
     "TASKTRACKER-START": ["NAMENODE-START", "DATANODE-START"],
     "HIVE_SERVER-START": ["DATANODE-START"],
     "WEBHCAT_SERVER-START": ["DATANODE-START"],
-    "NAGIOS_SERVER-START": ["NAMENODE-START", "SECONDARY_NAMENODE-START",
-        "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START", "HISTORYSERVER-START"],
     "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START",
         "SECONDARY_NAMENODE-START"],
     "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START",
@@ -93,7 +82,6 @@
   "namenode_optional_ha": {
     "NAMENODE-START": ["JOURNALNODE-START", "ZOOKEEPER_SERVER-START"],
     "ZKFC-START": ["NAMENODE-START"],
-    "NAGIOS_SERVER-START": ["ZKFC-START", "JOURNALNODE-START"],
     "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["ZKFC-START"]
   },
   "_comment" : "Dependencies that are used in ResourceManager HA cluster",

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/NAGIOS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/NAGIOS/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/NAGIOS/metainfo.xml
deleted file mode 100644
index f9cd4ed..0000000
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/NAGIOS/metainfo.xml
+++ /dev/null
@@ -1,136 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>NAGIOS</name>
-      <comment>Nagios Monitoring and Alerting system</comment>
-      <version>3.5.0</version>
-      <components>
-        <component>
-           <name>NAGIOS_SERVER</name>
-           <category>MASTER</category>
-           <cardinality>1</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>OOZIE/OOZIE_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>YARN/YARN_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>TEZ/TEZ_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>HCATALOG/HCAT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-           <commandScript>
-             <script>scripts/nagios_server.py</script>
-             <scriptType>PYTHON</scriptType>
-             <timeout>600</timeout>
-           </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>perl</name>
-            </package>
-            <package>
-              <name>nagios-plugins-1.4.9</name>
-            </package>
-            <package>
-              <name>nagios-3.5.0-99</name>
-            </package>
-            <package>
-              <name>nagios-www-3.5.0-99</name>
-            </package>
-            <package>
-              <name>nagios-devel-3.5.0-99</name>
-            </package>
-            <package>
-              <name>fping</name>
-            </package>
-            <package>
-              <name>hdp_mon_nagios_addons</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>suse11</osFamily>
-          <packages>
-            <package>
-              <name>php5-json</name>
-            </package>
-            <package>
-              <name>apache2?mod_php*</name>
-            </package>
-            <package>
-              <name>php-curl</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>redhat5</osFamily>
-          <packages>
-            <package>
-              <name>php-pecl-json.x86_64</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <configuration-dependencies>
-        <config-type>global</config-type>
-      </configuration-dependencies>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/resources/stacks/HDP/2.0.8/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.8/role_command_order.json b/ambari-server/src/test/resources/stacks/HDP/2.0.8/role_command_order.json
index c45ba07..1404ef6 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.8/role_command_order.json
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.8/role_command_order.json
@@ -3,8 +3,6 @@
   "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
   "general_deps" : {
     "_comment" : "dependencies for all cases",
-    "NAGIOS_SERVER-INSTALL" : ["HIVE_CLIENT-INSTALL", "HCAT-INSTALL",
-        "MAPREDUCE_CLIENT-INSTALL", "OOZIE_CLIENT-INSTALL"],
     "HBASE_MASTER-START": ["ZOOKEEPER_SERVER-START"],
     "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
     "OOZIE_SERVER-START": ["JOBTRACKER-START", "TASKTRACKER-START"],
@@ -13,12 +11,6 @@
     "HIVE_SERVER-START": ["TASKTRACKER-START", "MYSQL_SERVER-START"],
     "HUE_SERVER-START": ["HIVE_SERVER-START", "HCAT-START", "OOZIE_SERVER-START"],
     "FLUME_HANDLER-START": ["OOZIE_SERVER-START"],
-    "NAGIOS_SERVER-START": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START",
-        "GANGLIA_SERVER-START", "GANGLIA_MONITOR-START", "HCAT-START",
-        "HIVE_SERVER-START", "HIVE_METASTORE-START", "HUE_SERVER-START",
-        "JOBTRACKER-START", "TASKTRACKER-START", "ZOOKEEPER_SERVER-START",
-        "MYSQL_SERVER-START", "OOZIE_SERVER-START", "PIG-START", "SQOOP-START",
-        "WEBHCAT_SERVER-START", "FLUME_HANDLER-START"],
     "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"],
     "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START"],
     "WEBHCAT_SERVICE_CHECK-SERVICE_CHECK": ["WEBHCAT_SERVER-START"],
@@ -48,8 +40,7 @@
     "WEBHCAT_SERVER-UPGRADE" : ["OOZIE_CLIENT-UPGRADE"],
     "PIG-UPGRADE" : ["WEBHCAT_SERVER-UPGRADE"],
     "SQOOP-UPGRADE" : ["PIG-UPGRADE"],
-    "NAGIOS_SERVER-UPGRADE" : ["SQOOP-UPGRADE"],
-    "GANGLIA_SERVER-UPGRADE" : ["NAGIOS_SERVER-UPGRADE"],
+    "GANGLIA_SERVER-UPGRADE" : ["SQOOP-UPGRADE"],
     "GANGLIA_MONITOR-UPGRADE" : ["GANGLIA_SERVER-UPGRADE"]
   },
   "_comment" : "GLUSTERFS-specific dependencies",
@@ -71,8 +62,6 @@
     "TASKTRACKER-START": ["NAMENODE-START", "DATANODE-START"],
     "HIVE_SERVER-START": ["DATANODE-START"],
     "WEBHCAT_SERVER-START": ["DATANODE-START"],
-    "NAGIOS_SERVER-START": ["NAMENODE-START", "SECONDARY_NAMENODE-START",
-        "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START", "HISTORYSERVER-START"],
     "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START",
         "SECONDARY_NAMENODE-START"],
     "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START",
@@ -93,7 +82,6 @@
   "namenode_optional_ha": {
     "NAMENODE-START": ["JOURNALNODE-START", "ZOOKEEPER_SERVER-START"],
     "ZKFC-START": ["NAMENODE-START"],
-    "NAGIOS_SERVER-START": ["ZKFC-START", "JOURNALNODE-START"],
     "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["ZKFC-START"]
   },
   "_comment" : "Dependencies that are used in ResourceManager HA cluster",

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/resources/stacks/HDP/2.0.8/services/FAKENAGIOS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.8/services/FAKENAGIOS/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.8/services/FAKENAGIOS/metainfo.xml
new file mode 100644
index 0000000..4edddbb
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.8/services/FAKENAGIOS/metainfo.xml
@@ -0,0 +1,51 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>FAKENAGIOS</name>
+      <displayName>Monitoring Service</displayName>
+      <comment>A test service for testing out how the API handles a monitoring service</comment>
+      <version>1.0</version>
+      <monitoringService>true</monitoringService>
+      <components>
+        <component>
+          <name>Monitoring Client</name>
+          <name>FAKE_MONITORING_CLIENT</name>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/fake_monitoring.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+        <component>
+          <name>Monitoring Server</name>
+          <name>FAKE_MONITORING_SERVER</name>
+          <category>SERVER</category>
+          <commandScript>
+            <script>scripts/fakeios.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>        
+      </components>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/resources/stacks/HDP/2.1.1/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.1.1/role_command_order.json b/ambari-server/src/test/resources/stacks/HDP/2.1.1/role_command_order.json
index c45ba07..1404ef6 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.1.1/role_command_order.json
+++ b/ambari-server/src/test/resources/stacks/HDP/2.1.1/role_command_order.json
@@ -3,8 +3,6 @@
   "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
   "general_deps" : {
     "_comment" : "dependencies for all cases",
-    "NAGIOS_SERVER-INSTALL" : ["HIVE_CLIENT-INSTALL", "HCAT-INSTALL",
-        "MAPREDUCE_CLIENT-INSTALL", "OOZIE_CLIENT-INSTALL"],
     "HBASE_MASTER-START": ["ZOOKEEPER_SERVER-START"],
     "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
     "OOZIE_SERVER-START": ["JOBTRACKER-START", "TASKTRACKER-START"],
@@ -13,12 +11,6 @@
     "HIVE_SERVER-START": ["TASKTRACKER-START", "MYSQL_SERVER-START"],
     "HUE_SERVER-START": ["HIVE_SERVER-START", "HCAT-START", "OOZIE_SERVER-START"],
     "FLUME_HANDLER-START": ["OOZIE_SERVER-START"],
-    "NAGIOS_SERVER-START": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START",
-        "GANGLIA_SERVER-START", "GANGLIA_MONITOR-START", "HCAT-START",
-        "HIVE_SERVER-START", "HIVE_METASTORE-START", "HUE_SERVER-START",
-        "JOBTRACKER-START", "TASKTRACKER-START", "ZOOKEEPER_SERVER-START",
-        "MYSQL_SERVER-START", "OOZIE_SERVER-START", "PIG-START", "SQOOP-START",
-        "WEBHCAT_SERVER-START", "FLUME_HANDLER-START"],
     "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"],
     "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START"],
     "WEBHCAT_SERVICE_CHECK-SERVICE_CHECK": ["WEBHCAT_SERVER-START"],
@@ -48,8 +40,7 @@
     "WEBHCAT_SERVER-UPGRADE" : ["OOZIE_CLIENT-UPGRADE"],
     "PIG-UPGRADE" : ["WEBHCAT_SERVER-UPGRADE"],
     "SQOOP-UPGRADE" : ["PIG-UPGRADE"],
-    "NAGIOS_SERVER-UPGRADE" : ["SQOOP-UPGRADE"],
-    "GANGLIA_SERVER-UPGRADE" : ["NAGIOS_SERVER-UPGRADE"],
+    "GANGLIA_SERVER-UPGRADE" : ["SQOOP-UPGRADE"],
     "GANGLIA_MONITOR-UPGRADE" : ["GANGLIA_SERVER-UPGRADE"]
   },
   "_comment" : "GLUSTERFS-specific dependencies",
@@ -71,8 +62,6 @@
     "TASKTRACKER-START": ["NAMENODE-START", "DATANODE-START"],
     "HIVE_SERVER-START": ["DATANODE-START"],
     "WEBHCAT_SERVER-START": ["DATANODE-START"],
-    "NAGIOS_SERVER-START": ["NAMENODE-START", "SECONDARY_NAMENODE-START",
-        "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START", "HISTORYSERVER-START"],
     "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START",
         "SECONDARY_NAMENODE-START"],
     "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START",
@@ -93,7 +82,6 @@
   "namenode_optional_ha": {
     "NAMENODE-START": ["JOURNALNODE-START", "ZOOKEEPER_SERVER-START"],
     "ZKFC-START": ["NAMENODE-START"],
-    "NAGIOS_SERVER-START": ["ZKFC-START", "JOURNALNODE-START"],
     "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["ZKFC-START"]
   },
   "_comment" : "Dependencies that are used in ResourceManager HA cluster",

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/resources/stacks/OTHER/1.0/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/OTHER/1.0/role_command_order.json b/ambari-server/src/test/resources/stacks/OTHER/1.0/role_command_order.json
index c45ba07..1404ef6 100644
--- a/ambari-server/src/test/resources/stacks/OTHER/1.0/role_command_order.json
+++ b/ambari-server/src/test/resources/stacks/OTHER/1.0/role_command_order.json
@@ -3,8 +3,6 @@
   "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
   "general_deps" : {
     "_comment" : "dependencies for all cases",
-    "NAGIOS_SERVER-INSTALL" : ["HIVE_CLIENT-INSTALL", "HCAT-INSTALL",
-        "MAPREDUCE_CLIENT-INSTALL", "OOZIE_CLIENT-INSTALL"],
     "HBASE_MASTER-START": ["ZOOKEEPER_SERVER-START"],
     "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
     "OOZIE_SERVER-START": ["JOBTRACKER-START", "TASKTRACKER-START"],
@@ -13,12 +11,6 @@
     "HIVE_SERVER-START": ["TASKTRACKER-START", "MYSQL_SERVER-START"],
     "HUE_SERVER-START": ["HIVE_SERVER-START", "HCAT-START", "OOZIE_SERVER-START"],
     "FLUME_HANDLER-START": ["OOZIE_SERVER-START"],
-    "NAGIOS_SERVER-START": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START",
-        "GANGLIA_SERVER-START", "GANGLIA_MONITOR-START", "HCAT-START",
-        "HIVE_SERVER-START", "HIVE_METASTORE-START", "HUE_SERVER-START",
-        "JOBTRACKER-START", "TASKTRACKER-START", "ZOOKEEPER_SERVER-START",
-        "MYSQL_SERVER-START", "OOZIE_SERVER-START", "PIG-START", "SQOOP-START",
-        "WEBHCAT_SERVER-START", "FLUME_HANDLER-START"],
     "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"],
     "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START"],
     "WEBHCAT_SERVICE_CHECK-SERVICE_CHECK": ["WEBHCAT_SERVER-START"],
@@ -48,8 +40,7 @@
     "WEBHCAT_SERVER-UPGRADE" : ["OOZIE_CLIENT-UPGRADE"],
     "PIG-UPGRADE" : ["WEBHCAT_SERVER-UPGRADE"],
     "SQOOP-UPGRADE" : ["PIG-UPGRADE"],
-    "NAGIOS_SERVER-UPGRADE" : ["SQOOP-UPGRADE"],
-    "GANGLIA_SERVER-UPGRADE" : ["NAGIOS_SERVER-UPGRADE"],
+    "GANGLIA_SERVER-UPGRADE" : ["SQOOP-UPGRADE"],
     "GANGLIA_MONITOR-UPGRADE" : ["GANGLIA_SERVER-UPGRADE"]
   },
   "_comment" : "GLUSTERFS-specific dependencies",
@@ -71,8 +62,6 @@
     "TASKTRACKER-START": ["NAMENODE-START", "DATANODE-START"],
     "HIVE_SERVER-START": ["DATANODE-START"],
     "WEBHCAT_SERVER-START": ["DATANODE-START"],
-    "NAGIOS_SERVER-START": ["NAMENODE-START", "SECONDARY_NAMENODE-START",
-        "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START", "HISTORYSERVER-START"],
     "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START",
         "SECONDARY_NAMENODE-START"],
     "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START",
@@ -93,7 +82,6 @@
   "namenode_optional_ha": {
     "NAMENODE-START": ["JOURNALNODE-START", "ZOOKEEPER_SERVER-START"],
     "ZKFC-START": ["NAMENODE-START"],
-    "NAGIOS_SERVER-START": ["ZKFC-START", "JOURNALNODE-START"],
     "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["ZKFC-START"]
   },
   "_comment" : "Dependencies that are used in ResourceManager HA cluster",

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/resources/stacks_with_cycle/OTHER/1.0/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_cycle/OTHER/1.0/role_command_order.json b/ambari-server/src/test/resources/stacks_with_cycle/OTHER/1.0/role_command_order.json
index c45ba07..1404ef6 100644
--- a/ambari-server/src/test/resources/stacks_with_cycle/OTHER/1.0/role_command_order.json
+++ b/ambari-server/src/test/resources/stacks_with_cycle/OTHER/1.0/role_command_order.json
@@ -3,8 +3,6 @@
   "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
   "general_deps" : {
     "_comment" : "dependencies for all cases",
-    "NAGIOS_SERVER-INSTALL" : ["HIVE_CLIENT-INSTALL", "HCAT-INSTALL",
-        "MAPREDUCE_CLIENT-INSTALL", "OOZIE_CLIENT-INSTALL"],
     "HBASE_MASTER-START": ["ZOOKEEPER_SERVER-START"],
     "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
     "OOZIE_SERVER-START": ["JOBTRACKER-START", "TASKTRACKER-START"],
@@ -13,12 +11,6 @@
     "HIVE_SERVER-START": ["TASKTRACKER-START", "MYSQL_SERVER-START"],
     "HUE_SERVER-START": ["HIVE_SERVER-START", "HCAT-START", "OOZIE_SERVER-START"],
     "FLUME_HANDLER-START": ["OOZIE_SERVER-START"],
-    "NAGIOS_SERVER-START": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START",
-        "GANGLIA_SERVER-START", "GANGLIA_MONITOR-START", "HCAT-START",
-        "HIVE_SERVER-START", "HIVE_METASTORE-START", "HUE_SERVER-START",
-        "JOBTRACKER-START", "TASKTRACKER-START", "ZOOKEEPER_SERVER-START",
-        "MYSQL_SERVER-START", "OOZIE_SERVER-START", "PIG-START", "SQOOP-START",
-        "WEBHCAT_SERVER-START", "FLUME_HANDLER-START"],
     "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"],
     "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START"],
     "WEBHCAT_SERVICE_CHECK-SERVICE_CHECK": ["WEBHCAT_SERVER-START"],
@@ -48,8 +40,7 @@
     "WEBHCAT_SERVER-UPGRADE" : ["OOZIE_CLIENT-UPGRADE"],
     "PIG-UPGRADE" : ["WEBHCAT_SERVER-UPGRADE"],
     "SQOOP-UPGRADE" : ["PIG-UPGRADE"],
-    "NAGIOS_SERVER-UPGRADE" : ["SQOOP-UPGRADE"],
-    "GANGLIA_SERVER-UPGRADE" : ["NAGIOS_SERVER-UPGRADE"],
+    "GANGLIA_SERVER-UPGRADE" : ["SQOOP-UPGRADE"],
     "GANGLIA_MONITOR-UPGRADE" : ["GANGLIA_SERVER-UPGRADE"]
   },
   "_comment" : "GLUSTERFS-specific dependencies",
@@ -71,8 +62,6 @@
     "TASKTRACKER-START": ["NAMENODE-START", "DATANODE-START"],
     "HIVE_SERVER-START": ["DATANODE-START"],
     "WEBHCAT_SERVER-START": ["DATANODE-START"],
-    "NAGIOS_SERVER-START": ["NAMENODE-START", "SECONDARY_NAMENODE-START",
-        "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START", "HISTORYSERVER-START"],
     "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START",
         "SECONDARY_NAMENODE-START"],
     "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START",
@@ -93,7 +82,6 @@
   "namenode_optional_ha": {
     "NAMENODE-START": ["JOURNALNODE-START", "ZOOKEEPER_SERVER-START"],
     "ZKFC-START": ["NAMENODE-START"],
-    "NAGIOS_SERVER-START": ["ZKFC-START", "JOURNALNODE-START"],
     "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["ZKFC-START"]
   },
   "_comment" : "Dependencies that are used in ResourceManager HA cluster",

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/resources/test_api.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/test_api.sh b/ambari-server/src/test/resources/test_api.sh
index 93d378f..89dd9e2 100644
--- a/ambari-server/src/test/resources/test_api.sh
+++ b/ambari-server/src/test/resources/test_api.sh
@@ -20,7 +20,6 @@ curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/MAPREDUCE
 curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/ZOOKEEPER
 curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/HBASE
 curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/GANGLIA
-curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/NAGIOS
 
 curl -i -X PUT -d '{"Clusters": {"desired_config": {"type": "core-site", "tag": "version1", "properties" : { "fs.default.name" : "localhost:8020"}}}}' http://localhost:8080/api/v1/clusters/c1
 curl -i -X PUT -d '{"Clusters": {"desired_config": {"type": "core-site", "tag": "version2", "properties" : { "fs.default.name" : "localhost:8020"}}}}' http://localhost:8080/api/v1/clusters/c1
@@ -30,8 +29,6 @@ curl -i -X PUT -d '{"Clusters": {"desired_config": {"type": "global", "tag": "ve
 curl -i -X PUT -d '{"Clusters": {"desired_config": {"type": "mapred-site", "tag": "version1", "properties" : { "mapred.job.tracker" : "localhost:50300", "mapreduce.history.server.embedded": "false", "mapreduce.history.server.http.address": "localhost:51111"}}}}' http://localhost:8080/api/v1/clusters/c1
 curl -i -X PUT -d '{"Clusters": {"desired_config": {"type": "hbase-site", "tag": "version1", "properties" : { "hbase.rootdir" : "hdfs://localhost:8020/apps/hbase/", "hbase.cluster.distributed" : "true", "hbase.zookeeper.quorum": "localhost", "zookeeper.session.timeout": "60000" }}}}' http://localhost:8080/api/v1/clusters/c1
 curl -i -X PUT -d '{"Clusters": {"desired_config": {"type": "hbase-env", "tag": "version1", "properties" : { "hbase_hdfs_root_dir" : "/apps/hbase/"}}}}' http://localhost:8080/api/v1/clusters/c1
-curl -i -X PUT -d '{"Clusters": {"desired_config": {"type": "nagios-global", "tag": "version2", "properties" : { "nagios_web_login" : "nagiosadmin", "nagios_web_password" : "password", "nagios_contact": "a\u0040b.c" }}}}' http://localhost:8080/api/v1/clusters/c1
-curl -i -X PUT -d '{"Clusters": {"desired_config": {"type": "nagios-global", "tag": "version1", "properties" : { "nagios_web_login" : "nagiosadmin", "nagios_web_password" : "password"  }}}}' http://localhost:8080/api/v1/clusters/c1
 
 curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/HDFS/components/NAMENODE
 curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/HDFS/components/SECONDARY_NAMENODE
@@ -45,7 +42,6 @@ curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/HBASE/componen
 curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/HBASE/components/HBASE_CLIENT
 curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/GANGLIA/components/GANGLIA_SERVER
 curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/GANGLIA/components/GANGLIA_MONITOR
-curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/NAGIOS/components/NAGIOS_SERVER
 curl -i -X POST http://localhost:8080/api/v1/clusters/c1/hosts/$AGENT_HOST
 curl -i -X POST http://localhost:8080/api/v1/clusters/c1/hosts/$AGENT_HOST/host_components/NAMENODE
 curl -i -X POST http://localhost:8080/api/v1/clusters/c1/hosts/$AGENT_HOST/host_components/SECONDARY_NAMENODE
@@ -59,8 +55,6 @@ curl -i -X POST http://localhost:8080/api/v1/clusters/c1/hosts/$AGENT_HOST/host_
 curl -i -X POST http://localhost:8080/api/v1/clusters/c1/hosts/$AGENT_HOST/host_components/HBASE_MASTER
 curl -i -X POST http://localhost:8080/api/v1/clusters/c1/hosts/$AGENT_HOST/host_components/HBASE_REGIONSERVER
 curl -i -X POST http://localhost:8080/api/v1/clusters/c1/hosts/$AGENT_HOST/host_components/HBASE_CLIENT
-curl -i -X POST http://localhost:8080/api/v1/clusters/c1/hosts/$AGENT_HOST/host_components/NAGIOS_SERVER
-curl -i -X PUT -d '{"Hosts": {"desired_config": {"type": "core-site", "tag": "version3", "properties" : { "nagios_web_login" : "nagiosadmin", "nagios_web_password" : "password"  }}}}' http://localhost:8080/api/v1/clusters/c1/hosts/$AGENT_HOST
 curl -i -X PUT  -d '{"ServiceInfo": {"state" : "INSTALLED"}}'   http://localhost:8080/api/v1/clusters/c1/services?state=INIT
 #curl -i -X PUT  -d '{"ServiceInfo": {"state" : "STARTED"}}'   http://localhost:8080/api/v1/clusters/c1/services?state=INSTALLED
 # http://localhost:8080/api/v1/clusters/c1/requests/2

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/resources/test_multnode_api.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/test_multnode_api.sh b/ambari-server/src/test/resources/test_multnode_api.sh
index 8083654..ec27fb3 100644
--- a/ambari-server/src/test/resources/test_multnode_api.sh
+++ b/ambari-server/src/test/resources/test_multnode_api.sh
@@ -24,19 +24,15 @@ echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/services/MAPRED
 echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/services/ZOOKEEPER
 echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/services/HBASE
 echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/services/GANGLIA
-echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/services/NAGIOS
 echo curl -i -X POST -d '{"type": "core-site", "tag": "version1", "properties" : { "fs.default.name" : "hdfs://'${AGENT_HOSTS[0]}':8020"}}' http://$SERVER_HOST:8080/api/v1/clusters/c1/configurations
 echo curl -i -X POST -d '{"type": "hdfs-site", "tag": "version1", "properties" : { "dfs.datanode.data.dir.perm" : "750"}}' http://$SERVER_HOST:8080/api/v1/clusters/c1/configurations
 echo curl -i -X POST -d '{"type": "global", "tag": "version1", "properties" : { "hbase_hdfs_root_dir" : "/apps/hbase/"}}' http://$SERVER_HOST:8080/api/v1/clusters/c1/configurations
 echo curl -i -X POST -d '{"type": "mapred-site", "tag": "version1", "properties" : { "mapred.job.tracker" : "'${AGENT_HOSTS[0]}':50300", "mapreduce.history.server.embedded": "false", "mapreduce.history.server.http.address": "'${AGENT_HOSTS[0]}':51111"}}' http://$SERVER_HOST:8080/api/v1/clusters/c1/configurations
 echo curl -i -X POST -d '{"type": "hbase-site", "tag": "version1", "properties" : { "hbase.rootdir" : "hdfs://'${AGENT_HOSTS[0]}':8020/apps/hbase/", "hbase.cluster.distributed" : "true", "hbase.zookeeper.quorum": "'${AGENT_HOSTS[0]}'", "zookeeper.session.timeout": "60000" }}' http://$SERVER_HOST:8080/api/v1/clusters/c1/configurations
 echo curl -i -X POST -d '{"type": "hbase-env", "tag": "version1", "properties" : { "hbase_hdfs_root_dir" : "/apps/hbase/"}}' http://$SERVER_HOST:8080/api/v1/clusters/c1/configurations
-echo curl -i -X POST -d '{"type": "nagios-global", "tag": "version2", "properties" : { "nagios_web_login" : "nagiosadmin", "nagios_web_password" : "password", "nagios_contact": "a\u0040b.c" }}' http://$SERVER_HOST:8080/api/v1/clusters/c1/configurations
-echo curl -i -X POST -d '{"type": "nagios-global", "tag": "version1", "properties" : { "nagios_web_login" : "nagiosadmin", "nagios_web_password" : "password"  }}' http://$SERVER_HOST:8080/api/v1/clusters/c1/configurations
 echo curl -i -X PUT -d '{"config": {"core-site": "version1", "hdfs-site": "version1", "global" : "version1" }}'  http://$SERVER_HOST:8080/api/v1/clusters/c1/services/HDFS
 echo curl -i -X PUT -d '{"config": {"core-site": "version1", "mapred-site": "version1"}}'  http://$SERVER_HOST:8080/api/v1/clusters/c1/services/MAPREDUCE
 echo curl -i -X PUT -d '{"config": {"hbase-site": "version1", "hbase-env": "version1"}}'  http://$SERVER_HOST:8080/api/v1/clusters/c1/services/HBASE
-echo curl -i -X PUT -d '{"config": {"nagios-global": "version2" }}'  http://$SERVER_HOST:8080/api/v1/clusters/c1/services/NAGIOS
 echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/services/HDFS/components/NAMENODE
 echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/services/HDFS/components/SECONDARY_NAMENODE
 echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/services/HDFS/components/DATANODE
@@ -49,7 +45,6 @@ echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/services/HBASE/
 echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/services/HBASE/components/HBASE_CLIENT
 echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/services/GANGLIA/components/GANGLIA_SERVER
 echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/services/GANGLIA/components/GANGLIA_MONITOR
-echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/services/NAGIOS/components/NAGIOS_SERVER
 echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/hosts/${AGENT_HOSTS[0]}
 echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/hosts/${AGENT_HOSTS[0]}/host_components/NAMENODE
 echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/hosts/${AGENT_HOSTS[0]}/host_components/SECONDARY_NAMENODE
@@ -63,7 +58,6 @@ echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/hosts/${AGENT_H
 echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/hosts/${AGENT_HOSTS[0]}/host_components/HBASE_MASTER
 echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/hosts/${AGENT_HOSTS[0]}/host_components/HBASE_REGIONSERVER
 echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/hosts/${AGENT_HOSTS[0]}/host_components/HBASE_CLIENT
-echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/hosts/${AGENT_HOSTS[0]}/host_components/NAGIOS_SERVER
 echo 
 
 len=${#AGENT_HOSTS[@]}

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/contrib/addons/src/addOns/nagios/conf.d/hdp_mon_nagios_addons.conf
----------------------------------------------------------------------
diff --git a/contrib/addons/src/addOns/nagios/conf.d/hdp_mon_nagios_addons.conf b/contrib/addons/src/addOns/nagios/conf.d/hdp_mon_nagios_addons.conf
deleted file mode 100644
index fbaeb2a..0000000
--- a/contrib/addons/src/addOns/nagios/conf.d/hdp_mon_nagios_addons.conf
+++ /dev/null
@@ -1,7 +0,0 @@
-Alias /ambarinagios /usr/share/hdp
-<Directory /usr/share/hdp>
-  Options None
-  AllowOverride None
-  Order allow,deny
-  Allow from all
-</Directory>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/contrib/addons/src/addOns/nagios/plugins/check_aggregate.php
----------------------------------------------------------------------
diff --git a/contrib/addons/src/addOns/nagios/plugins/check_aggregate.php b/contrib/addons/src/addOns/nagios/plugins/check_aggregate.php
deleted file mode 100644
index 8eaee53..0000000
--- a/contrib/addons/src/addOns/nagios/plugins/check_aggregate.php
+++ /dev/null
@@ -1,195 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-  $options = getopt ("f:s:n:w:c:t:");
-  if (!array_key_exists('t', $options) || !array_key_exists('f', $options) || !array_key_exists('w', $options) 
-      || !array_key_exists('c', $options) || !array_key_exists('s', $options)) {
-    usage();
-    exit(3);
-  }
-  $status_file=$options['f'];
-  $status_code=$options['s'];
-  $type=$options['t'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  if ($type == "service" && !array_key_exists('n', $options)) {
-    echo "Service description not provided -n option\n";
-    exit(3);
-  } 
-  if ($type == "service") {
-    $service_name=$options['n'];
-    /* echo "DESC: " . $service_name . "\n"; */
-  }
-  
-  $result = array();
-  $status_file_content = file_get_contents($status_file);
-
-  $counts;
-  if ($type == "service") {
-    $counts=query_alert_count($status_file_content, $service_name, $status_code);
-  } else {
-    $counts=query_host_count($status_file_content, $status_code);
-  }
-
-  if ($counts['total'] == 0) {
-    $percent = 0;
-  } else {
-    $percent = ($counts['actual']/$counts['total'])*100;
-  }
-  if ($percent >= $crit) {
-    echo "CRITICAL: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
-    exit (2);
-  }
-  if ($percent >= $warn) {
-    echo "WARNING: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
-    exit (1);
-  }
-  echo "OK: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
-  exit(0);
-
-
-  # Functions 
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -f <status_file_path> -t type(host/service) -s <status_codes> -n <service description> -w <warn%> -c <crit%>\n";
-  }
-
-  /* Query host count */
-  function query_host_count ($status_file_content, $status_code) {
-    $num_matches = preg_match_all("/hoststatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    $hostcounts_object = array ();
-    $total_hosts = 0;
-    $hosts = 0;
-    foreach ($matches[0] as $object) {
-      $total_hosts++;
-      if (getParameter($object, "current_state") == $status_code) {
-        $hosts++;
-      } 
-    }
-    $hostcounts_object['total'] = $total_hosts;
-    $hostcounts_object['actual'] = $hosts;
-    return $hostcounts_object;
-  }
-
-  /* Query Alert counts */
-  function query_alert_count ($status_file_content, $service_name, $status_code) {
-    $num_matches = preg_match_all("/servicestatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    $alertcounts_objects = array ();
-    $total_alerts=0;
-    $alerts=0;
-    foreach ($matches[0] as $object) {
-      if (getParameter($object, "service_description") == $service_name) {
-        $total_alerts++;
-        if (getParameter($object, "current_state") >= $status_code) {
-          $alerts++;
-        } 
-      }
-    }
-    $alertcounts_objects['total'] = $total_alerts;
-    $alertcounts_objects['actual'] = $alerts;
-    return $alertcounts_objects;
-  }
-
-  function get_service_type($service_description)
-  {
-    $pieces = explode("::", $service_description);
-    switch ($pieces[0]) {
-      case "NAMENODE":
-        $pieces[0] = "HDFS";
-        break; 
-      case "JOBTRACKER":
-        $pieces[0] = "MAPREDUCE";
-        break; 
-      case "HBASEMASTER":
-        $pieces[0] = "HBASE";
-        break; 
-      case "SYSTEM":
-      case "HDFS":
-      case "MAPREDUCE":
-      case "HBASE":
-        break; 
-      default:
-        $pieces[0] = "UNKNOWN";
-    }
-    return $pieces[0];
-  }
-
-  function getParameter($object, $key)
-  {
-    $pattern="/\s" . $key . "[\s= ]*([\S, ]*)\n/";
-    $num_mat = preg_match($pattern, $object, $matches);
-    $value = "";
-    if ($num_mat) {
-      $value = $matches[1];
-    }
-    return $value;
-  }
-
-function indent($json) {
-
-    $result      = '';
-    $pos         = 0;
-    $strLen      = strlen($json);
-    $indentStr   = '  ';
-    $newLine     = "\n";
-    $prevChar    = '';
-    $outOfQuotes = true;
-
-    for ($i=0; $i<=$strLen; $i++) {
-
-        // Grab the next character in the string.
-        $char = substr($json, $i, 1);
-
-        // Are we inside a quoted string?
-        if ($char == '"' && $prevChar != '\\') {
-            $outOfQuotes = !$outOfQuotes;
-
-        // If this character is the end of an element,
-        // output a new line and indent the next line.
-        } else if(($char == '}' || $char == ']') && $outOfQuotes) {
-            $result .= $newLine;
-            $pos --;
-            for ($j=0; $j<$pos; $j++) {
-                $result .= $indentStr;
-            }
-        }
-
-        // Add the character to the result string.
-        $result .= $char;
-
-        // If the last character was the beginning of an element,
-        // output a new line and indent the next line.
-        if (($char == ',' || $char == '{' || $char == '[') && $outOfQuotes) {
-            $result .= $newLine;
-            if ($char == '{' || $char == '[') {
-                $pos ++;
-            }
-
-            for ($j = 0; $j < $pos; $j++) {
-                $result .= $indentStr;
-            }
-        }
-
-        $prevChar = $char;
-    }
-
-    return $result;
-}
-?>
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/contrib/addons/src/addOns/nagios/plugins/check_hadoop.sh
----------------------------------------------------------------------
diff --git a/contrib/addons/src/addOns/nagios/plugins/check_hadoop.sh b/contrib/addons/src/addOns/nagios/plugins/check_hadoop.sh
deleted file mode 100644
index 8bbc850..0000000
--- a/contrib/addons/src/addOns/nagios/plugins/check_hadoop.sh
+++ /dev/null
@@ -1,96 +0,0 @@
-#!/bin/bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-user=""
-secure="false"
-keytab=""
-kinit_path="/usr/kerberos/bin/kinit"
-while getopts ":u:k:s" opt; do
-  case $opt in
-    u)
-      user=$OPTARG;
-      ;;
-    k)
-      keytab=$OPTARG;
-      ;;
-    s)
-      secure="true";
-      ;;
-    \?)
-      echo "Invalid option: -$OPTARG" >&2
-      exit 3
-      ;;
-    :)
-      echo "UNKNOWNOption -$OPTARG requires an argument." >&2
-      exit 3
-      ;;
-  esac
-done
-
-outfile="/tmp/nagios-hadoop-check.out"
-curtime=`date +"%F-%H-%M-%S"`
-fname="nagios-hadoop-check-${curtime}"
-
-if [[ "$user" == "" ]]; then
-  echo "INVALID: user argument not specified";
-  exit 3;
-fi
-if [[ "$keytab" == "" ]]; then 
-  keytab="/homes/$user/$user.headless.keytab"
-fi
-
-if [[ ! -f "$kinit_path" ]]; then
-  kinit_path="kinit"
-fi
-
-if [[ "$secure" == "true" ]]; then
-  sudo -u $user -i "$kinit_path -kt $keytab $user" > ${outfile} 2>&1
-fi
-
-sudo -u $user -i "hadoop dfs -copyFromLocal /etc/passwd ${fname}.input " > ${outfile} 2>&1
-if [[ "$?" -ne "0" ]]; then 
-  echo "CRITICAL: Error copying file to HDFS. See error output in ${outfile} on nagios server";
-  exit 2; 
-fi
-sudo -u $user -i "hadoop dfs -ls" > ${outfile} 2>&1
-if [[ "$?" -ne "0" ]]; then 
-  echo "CRITICAL: Error listing HDFS files. See error output in ${outfile} on nagios server";
-  exit 2; 
-fi
-sudo -u $user -i "hadoop jar /usr/share/hadoop/hadoop-examples-*.jar wordcount ${fname}.input ${fname}.out" >> ${outfile} 2>&1
-if [[ "$?" -ne "0" ]]; then 
-  echo "CRITICAL: Error running M/R job. See error output in ${outfile} on nagios server";
-  exit 2; 
-fi
-sudo -u $user -i "hadoop fs -rmr -skipTrash ${fname}.out" >> ${outfile} 2>&1
-if [[ "$?" -ne "0" ]]; then 
-  echo "CRITICAL: Error removing M/R job output. See error output in ${outfile} on nagios server";
-  exit 2; 
-fi
-sudo -u $user -i "hadoop fs -rm -skipTrash ${fname}.input" >> ${outfile} 2>&1
-if [[ "$?" -ne "0" ]]; then 
-  echo "CRITICAL: Error removing M/R job input. See error output in ${outfile} on nagios server";
-  exit 2; 
-fi
-
-echo "OK: M/R WordCount Job ran successfully"
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/contrib/addons/src/addOns/nagios/plugins/check_hbase.sh
----------------------------------------------------------------------
diff --git a/contrib/addons/src/addOns/nagios/plugins/check_hbase.sh b/contrib/addons/src/addOns/nagios/plugins/check_hbase.sh
deleted file mode 100644
index 2bcf855..0000000
--- a/contrib/addons/src/addOns/nagios/plugins/check_hbase.sh
+++ /dev/null
@@ -1,91 +0,0 @@
-#!/bin/bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-user=""
-secure="false"
-keytab=""
-kinit_path="/usr/kerberos/bin/kinit"
-while getopts ":u:k:s" opt; do
-  case $opt in
-    u)
-      user=$OPTARG;
-      ;;
-    k)
-      keytab=$OPTARG;
-      ;;
-    s)
-      secure="true";
-      ;;
-    \?)
-      echo "Invalid option: -$OPTARG" >&2
-      exit 3
-      ;;
-    :)
-      echo "UNKNOWNOption -$OPTARG requires an argument." >&2
-      exit 3
-      ;;
-  esac
-done
-
-outfile="/tmp/nagios-hbase-check.out"
-curtime=`date +"%F-%H-%M-%S"`
-fname="nagios-hbase-check-${curtime}"
-
-if [[ "$user" == "" ]]; then
-  echo "INVALID: user argument not specified";
-  exit 3;
-fi
-if [[ "$keytab" == "" ]]; then 
-  keytab="/homes/$user/$user.headless.keytab"
-fi
-
-if [[ ! -f "$kinit_path" ]]; then
-  kinit_path="kinit"
-fi
-
-if [[ "$secure" == "true" ]]; then
-  sudo -u $user -i "$kinit_path -kt $keytab $user" > ${outfile} 2>&1
-fi
-
-output=`sudo -u $user -i "echo status | /usr/bin/hbase --config /etc/hbase shell"`
-(IFS='')
-tmpOutput=$(echo $output | grep -v '0 servers')
-if [[ "$?" -ne "0" ]]; then 
-  echo "CRITICAL: No region servers are running";
-  exit 2; 
-fi
-sudo -u $user -i "echo disable \'nagios_test_table\' | /usr/bin/hbase --config /etc/hbase shell" > ${outfile} 2>&1
-sudo -u $user -i "echo drop \'nagios_test_table\' | /usr/bin/hbase --config /etc/hbase shell" > ${outfile} 2>&1
-sudo -u $user -i "echo create \'nagios_test_table\', \'family\' | /usr/bin/hbase --config /etc/hbase shell" > ${outfile} 2>&1
-sudo -u $user -i "echo put \'nagios_test_table\', \'row01\', \'family:col01\', \'value1\' | /usr/bin/hbase --config /etc/hbase shell" > ${outfile} 2>&1
-output=`sudo -u $user -i "echo scan \'nagios_test_table\' | /usr/bin/hbase --config /etc/hbase shell"`
-(IFS='')
-tmpOutput=$(echo $output | grep -v '1 row(s) in')
-if [[ "$?" -ne "1" ]]; then 
-  echo "CRITICAL: Error populating HBase table";
-  exit 2; 
-fi
-sudo -u $user -i "echo disable \'nagios_test_table\' | /usr/bin/hbase --config /etc/hbase shell" > ${outfile} 2>&1
-sudo -u $user -i "echo drop \'nagios_test_table\' | /usr/bin/hbase --config /etc/hbase shell" > ${outfile} 2>&1
-
-echo "OK: HBase transaction completed successfully"
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/contrib/addons/src/addOns/nagios/plugins/check_hdfs_blocks.php
----------------------------------------------------------------------
diff --git a/contrib/addons/src/addOns/nagios/plugins/check_hdfs_blocks.php b/contrib/addons/src/addOns/nagios/plugins/check_hdfs_blocks.php
deleted file mode 100644
index c20d406..0000000
--- a/contrib/addons/src/addOns/nagios/plugins/check_hdfs_blocks.php
+++ /dev/null
@@ -1,72 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * check the corrupt or missing blocks % is > threshod
- * check_jmx -H hostaddress -p port -w 1% -c 1%
- */
-
-  $options = getopt ("h:p:w:c:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options) 
-      || !array_key_exists('c', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-
-  /* Get the json document */
-  $json_string = file_get_contents("http://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=FSNamesystemMetrics");
-  $json_array = json_decode($json_string, true);
-  $m_percent = 0;
-  $c_percent = 0;
-  $object = $json_array['beans'][0];
-  $missing_blocks = $object['MissingBlocks'];
-  $corrupt_blocks = $object['CorruptBlocks'];
-  $total_blocks = $object['BlocksTotal'];
-  if($total_blocks == 0) {
-    $m_percent = 0;
-    $c_percent = 0;
-  } else {
-    $m_percent = ($missing_blocks/$total_blocks)*100;
-    $c_percent = ($corrupt_blocks/$total_blocks)*100;
-  }
-  $out_msg = "corrupt_blocks:<" . $corrupt_blocks . 
-             ">, missing_blocks:<" . $missing_blocks . 
-             ">, total_blocks:<" . $total_blocks . ">";
-  
-  if ($m_percent > $crit || $c_percent > $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($m_percent > $warn || $c_percent > $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%>\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/contrib/addons/src/addOns/nagios/plugins/check_hdfs_capacity.php
----------------------------------------------------------------------
diff --git a/contrib/addons/src/addOns/nagios/plugins/check_hdfs_capacity.php b/contrib/addons/src/addOns/nagios/plugins/check_hdfs_capacity.php
deleted file mode 100644
index a2686c5..0000000
--- a/contrib/addons/src/addOns/nagios/plugins/check_hdfs_capacity.php
+++ /dev/null
@@ -1,68 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * check the % HDFS capacity used >= warn and critical limits.
- * check_jmx -H hostaddress -p port -w 1 -c 1
- */
-
-  $options = getopt ("h:p:w:c:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options) 
-      || !array_key_exists('c', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-
-  /* Get the json document */
-  $json_string = file_get_contents("http://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=FSNamesystemState");
-  $json_array = json_decode($json_string, true);
-  $percent = 0;
-  $object = $json_array['beans'][0];
-  $CapacityUsed = $object['CapacityUsed'];
-  $CapacityRemaining = $object['CapacityRemaining'];
-  $CapacityTotal = $CapacityUsed + $CapacityRemaining;
-  if($CapacityTotal == 0) {
-    $percent = 0;
-  } else {
-    $percent = ($CapacityUsed/$CapacityTotal)*100;
-  }
-  $out_msg = "DFSUsedGB:<" . round ($CapacityUsed/(1024*1024*1024),1) . 
-             ">, DFSTotalGB:<" . round($CapacityTotal/(1024*1024*1024),1) . ">";
-  
-  if ($percent >= $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($percent >= $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%>\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/contrib/addons/src/addOns/nagios/plugins/check_hive_metastore_status.sh
----------------------------------------------------------------------
diff --git a/contrib/addons/src/addOns/nagios/plugins/check_hive_metastore_status.sh b/contrib/addons/src/addOns/nagios/plugins/check_hive_metastore_status.sh
deleted file mode 100644
index aa7d193..0000000
--- a/contrib/addons/src/addOns/nagios/plugins/check_hive_metastore_status.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#The uri is of the form thrift://<hostname>:<port>
-HOST=$1
-PORT=$2
-HCAT_URL=-Dhive.metastore.uris="thrift://$HOST:$PORT"
-out=`hcat $HCAT_URL -e "show databases" 2>&1`
-if [[ "$?" -ne 0 ]]; then 
-  echo "CRITICAL: Error accessing Hive Metastore status [$out]";
-  exit 2;
-fi
-echo "OK: Hive Metastore status OK";
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/contrib/addons/src/addOns/nagios/plugins/check_name_dir_status.php
----------------------------------------------------------------------
diff --git a/contrib/addons/src/addOns/nagios/plugins/check_name_dir_status.php b/contrib/addons/src/addOns/nagios/plugins/check_name_dir_status.php
deleted file mode 100644
index 7e44ea7..0000000
--- a/contrib/addons/src/addOns/nagios/plugins/check_name_dir_status.php
+++ /dev/null
@@ -1,59 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to namenode, get the jmx-json document
- * check the NameDirStatuses to find any offline (failed) directories
- * check_jmx -H hostaddress -p port
- */
-
-  $options = getopt ("h:p:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-
-  /* Get the json document */
-  $json_string = file_get_contents("http://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo");
-  $json_array = json_decode($json_string, true);
-  $object = $json_array['beans'][0];
-  if ($object['NameDirStatuses'] == "") {
-    echo "UNKNOWN: NameNode directory status not available via http://<nn_host>:port/jmx url" . "\n";
-    exit(3);
-  }
-  $NameDirStatuses = json_decode($object['NameDirStatuses'], true);
-  $failed_dir_count = count($NameDirStatuses['failed']);
-  $out_msg = "CRITICAL: Offline NameNode directories: ";
-  if ($failed_dir_count > 0) {
-    foreach ($NameDirStatuses['failed'] as $key => $value) {
-      $out_msg = $out_msg . $key . ":" . $value . ", ";
-    }
-    echo $out_msg . "\n";
-    exit (2);
-  }
-  echo "OK: All NameNode directories are active" . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/contrib/addons/src/addOns/nagios/plugins/check_oozie_status.sh
----------------------------------------------------------------------
diff --git a/contrib/addons/src/addOns/nagios/plugins/check_oozie_status.sh b/contrib/addons/src/addOns/nagios/plugins/check_oozie_status.sh
deleted file mode 100644
index e943bbe..0000000
--- a/contrib/addons/src/addOns/nagios/plugins/check_oozie_status.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-# OOZIE_URL is of the form http://<hostname>:<port>/oozie
-# OOZIE_URL: http://host1.localdomain:11000/oozie
-HOST=$1
-PORT=$2
-JAVA_HOME=$3
-OOZIE_URL="http://$HOST:$PORT/oozie"
-export JAVA_HOME=$JAVA_HOME
-out=`oozie admin -oozie ${OOZIE_URL} -status 2>&1`
-if [[ "$?" -ne 0 ]]; then 
-  echo "CRITICAL: Error accessing Oozie Server status [$out]";
-  exit 2;
-fi
-echo "OK: Oozie Server status [$out]";
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/contrib/addons/src/addOns/nagios/plugins/check_rpcq_latency.php
----------------------------------------------------------------------
diff --git a/contrib/addons/src/addOns/nagios/plugins/check_rpcq_latency.php b/contrib/addons/src/addOns/nagios/plugins/check_rpcq_latency.php
deleted file mode 100644
index 9ec28f7..0000000
--- a/contrib/addons/src/addOns/nagios/plugins/check_rpcq_latency.php
+++ /dev/null
@@ -1,67 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * It checks the rpc wait time in the queue, RpcQueueTime_avg_time
- * check_rpcq_latency -h hostaddress -p port -t ServiceName -w 1 -c 1
- * Warning and Critical values are in seconds
- * Service Name = JobTracker, NameNode
- */
-
-  $options = getopt ("h:p:w:c:n:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options) 
-      || !array_key_exists('c', $options) || !array_key_exists('n', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-  $master=$options['n'];
-  $warn=$options['w']; 
-  $crit=$options['c']; 
-
-  /* Get the json document */
-  $json_string = file_get_contents("http://".$host.":".$port."/jmx?qry=Hadoop:service=".$master.",name=RpcActivityForPort*");
-  $json_array = json_decode($json_string, true);
-  $object = $json_array['beans'][0];
-  
-  $RpcQueueTime_avg_time = $object['RpcQueueTime_avg_time'];
-  $RpcProcessingTime_avg_time = $object['RpcProcessingTime_avg_time'];
-
-  $out_msg = "RpcQueueTime_avg_time:<" . $RpcQueueTime_avg_time . 
-             "> Secs, RpcProcessingTime_avg_time:<" . $RpcProcessingTime_avg_time . 
-             "> Secs";
-  
-  if ($RpcQueueTime_avg_time >= $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($RpcQueueTime_avg_time >= $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -n <JobTracker/NameNode> -w <warn_in_sec> -c <crit_in_sec>\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/contrib/addons/src/addOns/nagios/plugins/check_webui.sh
----------------------------------------------------------------------
diff --git a/contrib/addons/src/addOns/nagios/plugins/check_webui.sh b/contrib/addons/src/addOns/nagios/plugins/check_webui.sh
deleted file mode 100644
index 57b9239..0000000
--- a/contrib/addons/src/addOns/nagios/plugins/check_webui.sh
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/bin/bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-checkurl () {
-  url=$1
-  /usr/bin/wget -q $url -O /dev/null
-  echo $?
-}
-
-service=$1
-host=$2
-
-if [[ -z "$service" || -z "$host" ]]; then
-  echo "UNKNOWN: Invalid arguments; Usage: check_webui.sh service_name host_name";
-  exit 3;
-fi
-
-case "$service" in
-
-jobtracker) 
-    jtweburl="http://$host:50030"
-    if [[ `checkurl "$jtweburl"` -ne 0 ]]; then 
-      echo "WARNING: Jobtracker web UI not accessible : $jtweburl";
-      exit 1;
-    fi
-    ;;
-namenode)
-    nnweburl="http://$host:50070"
-    if [[ `checkurl "$nnweburl"` -ne 0 ]] ; then 
-      echo "WARNING: NameNode Web UI not accessible : $nnweburl";
-      exit 1;
-    fi
-    ;;
-jobhistory)
-    jhweburl="http://$host:51111/jobhistoryhome.jsp"
-    if [[ `checkurl "$jhweburl"` -ne 0 ]]; then 
-      echo "WARNING: JobHistory Web UI not accessible : $jhweburl";
-      exit 1;
-    fi
-    ;;
-hbase)
-    hbaseweburl="http://$host:60010/master-status"
-    jhweburl="http://domU-12-31-39-16-DC-FB.compute-1.internal:51111/jobhistoryhome.jsp"
-    if [[ `checkurl "$hbaseweburl"` -ne 0 ]]; then 
-      echo "WARNING: HBase Master Web UI not accessible : $hbaseweburl";
-      exit 1;
-    fi
-    ;;
-*) echo "UNKNOWN: Invalid service name [$service], valid options [jobtracker|jobhistory|hbase|namenode]"
-   exit 3
-   ;;
-esac
-
-echo "OK: Successfully accessed $service Web UI"
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/contrib/addons/src/addOns/nagios/plugins/sys_logger.py
----------------------------------------------------------------------
diff --git a/contrib/addons/src/addOns/nagios/plugins/sys_logger.py b/contrib/addons/src/addOns/nagios/plugins/sys_logger.py
deleted file mode 100644
index 2e353f4..0000000
--- a/contrib/addons/src/addOns/nagios/plugins/sys_logger.py
+++ /dev/null
@@ -1,197 +0,0 @@
-#!/usr/bin/python
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import sys
-import syslog
-
-# dictionary of state->severity mappings
-severities = {'UP':'OK', 'DOWN':'Critical', 'UNREACHABLE':'Critical', 'OK':'OK',
-              'WARNING':'Warning', 'UNKNOWN':'Warning', 'CRITICAL':'Critical'}
-
-# List of services which can result in events at the Degraded severity
-degraded_alert_services = ['HBASEMASTER::HBaseMaster CPU utilization',
-                           'HDFS::Namenode RPC Latency',
-                           'MAPREDUCE::JobTracker RPC Latency',
-                           'JOBTRACKER::Jobtracker CPU utilization']
-
-# List of services which can result in events at the Fatal severity
-fatal_alert_services = ['NAMENODE::Namenode Process down',
-                        'NAMENODE::NameNode process']
-
-# dictionary of service->msg_id mappings
-msg_ids = {'Host::Ping':'host_down',
-           'HBASEMASTER::HBaseMaster CPU utilization':'master_cpu_utilization',
-           'HDFS::HDFS Capacity utilization':'hdfs_percent_capacity',
-           'HDFS::Corrupt/Missing blocks':'hdfs_block',
-           'NAMENODE::Namenode Edit logs directory status':'namenode_edit_log_write',
-           'HDFS::Percent DataNodes down':'datanode_down',
-           'DATANODE::Process down':'datanode_process_down',
-           'HDFS::Percent DataNodes storage full':'datanodes_percent_storage_full',
-           'NAMENODE::Namenode Process down':'namenode_process_down',
-           'HDFS::Namenode RPC Latency':'namenode_rpc_latency',
-           'DATANODE::Storage full':'datanodes_storage_full',
-           'JOBTRACKER::Jobtracker Process down':'jobtracker_process_down',
-           'MAPREDUCE::JobTracker RPC Latency':'jobtracker_rpc_latency',
-           'MAPREDUCE::Percent TaskTrackers down':'tasktrackers_down',
-           'TASKTRACKER::Process down':'tasktracker_process_down',
-           'HBASEMASTER::HBaseMaster Process down':'hbasemaster_process_down',
-           'REGIONSERVER::Process down':'regionserver_process_down',
-           'HBASE::Percent region servers down':'regionservers_down',
-           'HIVE-METASTORE::HIVE-METASTORE status check':'hive_metastore_process_down',
-           'ZOOKEEPER::Percent zookeeper servers down':'zookeepers_down',
-           'ZKSERVERS::ZKSERVERS Process down':'zookeeper_process_down',
-           'OOZIE::Oozie status check':'oozie_down',
-           'TEMPLETON::Templeton status check':'templeton_down',
-           'PUPPET::Puppet agent down':'puppet_down',
-           'NAGIOS::Nagios status log staleness':'nagios_status_log_stale',
-           'GANGLIA::Ganglia [gmetad] Process down':'ganglia_process_down',
-           'GANGLIA::Ganglia collector [gmond] Process down alert for hbasemaster':'ganglia_collector_process_down',
-           'GANGLIA::Ganglia collector [gmond] Process down alert for jobtracker':'ganglia_collector_process_down',
-           'GANGLIA::Ganglia collector [gmond] Process down alert for namenode':'ganglia_collector_process_down',
-           'GANGLIA::Ganglia collector [gmond] Process down alert for slaves':'ganglia_collector_process_down',
-           'NAMENODE::Secondary Namenode Process down':'secondary_namenode_process_down',
-           'JOBTRACKER::Jobtracker CPU utilization':'jobtracker_cpu_utilization',
-           'HBASEMASTER::HBase Web UI down':'hbase_ui_down',
-           'NAMENODE::Namenode Web UI down':'namenode_ui_down',
-           'JOBTRACKER::JobHistory Web UI down':'jobhistory_ui_down',
-           'JOBTRACKER::JobTracker Web UI down':'jobtracker_ui_down',
-
-           'HBASEMASTER::HBase Master CPU utilization':'master_cpu_utilization',
-           'HDFS::HDFS capacity utilization':'hdfs_percent_capacity',
-           'NAMENODE::NameNode edit logs directory status':'namenode_edit_log_write',
-           'DATANODE::DataNode process down':'datanode_process_down',
-           'NAMENODE::NameNode process down':'namenode_process_down',
-           'HDFS::NameNode RPC latency':'namenode_rpc_latency',
-           'DATANODE::DataNode storage full':'datanodes_storage_full',
-           'JOBTRACKER::JobTracker process down':'jobtracker_process_down',
-           'MAPREDUCE::JobTracker RPC latency':'jobtracker_rpc_latency',
-           'TASKTRACKER::TaskTracker process down':'tasktracker_process_down',
-           'HBASEMASTER::HBase Master process down':'hbasemaster_process_down',
-           'REGIONSERVER::RegionServer process down':'regionserver_process_down',
-           'HBASE::Percent RegionServers down':'regionservers_down',
-           'HIVE-METASTORE::Hive Metastore status check':'hive_metastore_process_down',
-           'HIVE-METASTORE::Hive Metastore process':'hive_metastore_process_down',
-           'ZOOKEEPER::Percent ZooKeeper Servers down':'zookeepers_down',
-           'ZOOKEEPER::ZooKeeper Server process down':'zookeeper_process_down',
-           'OOZIE::Oozie Server status check':'oozie_down',
-           'WEBHCAT::WebHCat Server status check':'templeton_down',
-           'GANGLIA::Ganglia [gmetad] process down':'ganglia_process_down',
-           'GANGLIA::Ganglia Collector [gmond] process down alert for HBase Master':'ganglia_collector_process_down',
-           'GANGLIA::Ganglia Collector [gmond] process down alert for JobTracker':'ganglia_collector_process_down',
-           'GANGLIA::Ganglia Collector [gmond] process down alert for NameNode':'ganglia_collector_process_down',
-           'GANGLIA::Ganglia Collector [gmond] process down alert for slaves':'ganglia_collector_process_down',
-           'NAMENODE::Secondary NameNode process down':'secondary_namenode_process_down',
-           'JOBTRACKER::JobTracker CPU utilization':'jobtracker_cpu_utilization',
-           'HBASEMASTER::HBase Master Web UI down':'hbase_ui_down',
-           'NAMENODE::NameNode Web UI down':'namenode_ui_down',
-           'Oozie status check':'oozie_down',
-           'WEBHCAT::WebHcat status check':'templeton_down',
-
-           # Ambari Nagios service check descriptions
-           'DATANODE::DataNode process':'datanode_process',
-           'NAMENODE::NameNode process':'namenode_process',
-           'NAMENODE::Secondary NameNode process':'secondary_namenode_process',
-           'JOURNALNODE::JournalNode process':'journalnode_process',
-           'ZOOKEEPER::ZooKeeper Server process':'zookeeper_process_down',
-           'JOBTRACKER::JobTracker process':'jobtracker_process',
-           'TASKTRACKER::TaskTracker process':'tasktracker_process',
-           'GANGLIA::Ganglia Server process':'ganglia_server_process',
-           'GANGLIA::Ganglia Monitor process for Slaves':'ganglia_monitor_process',
-           'GANGLIA::Ganglia Monitor process for NameNode':'ganglia_monitor_process',
-           'GANGLIA::Ganglia Monitor process for JobTracker':'ganglia_monitor_process',
-           'GANGLIA::Ganglia Monitor process for HBase Master':'ganglia_monitor_process',
-           'GANGLIA::Ganglia Monitor process for ResourceManager':'ganglia_monitor_process',
-           'GANGLIA::Ganglia Monitor process for HistoryServer':'ganglia_monitor_process',
-           'HBASEMASTER::HBase Master process':'hbase_master_process',
-           'HBASE::Percent RegionServers live':'regionservers_down',
-           'REGIONSERVER::RegionServer process':'regionserver_process',
-           'NAGIOS::Nagios status log freshness':'nagios_process',
-           'FLUME::Flume Agent process':'flume_agent_process',
-           'OOZIE::Oozie Server status':'oozie_down',
-           'HIVE-METASTORE::Hive Metastore status':'hive_metastore_process',
-           'WEBHCAT::WebHCat Server status':'webhcat_down',
-           'RESOURCEMANAGER::ResourceManager process':'resourcemanager_process_down',
-           'RESOURCEMANAGER::ResourceManager RPC latency':'resourcemanager_rpc_latency',
-           'RESOURCEMANAGER::ResourceManager CPU utilization':'resourcemanager_cpu_utilization',
-           'RESOURCEMANAGER::ResourceManager Web UI':'recourcemanager_ui',
-           'NODEMANAGER::NodeManager process':'nodemanager_process_down',
-           'NODEMANAGER::NodeManager health':'nodemanager_health',
-           'NODEMANAGER::Percent NodeManagers live':'nodemanagers_down',
-           'APP_TIMELINE_SERVER::App Timeline Server process':'timelineserver_process',
-           'JOBHISTORY::HistoryServer RPC latency':'historyserver_rpc_latency',
-           'JOBHISTORY::HistoryServer CPU utilization':'historyserver_cpu_utilization',
-           'JOBHISTORY::HistoryServer Web UI':'historyserver_ui',
-           'JOBHISTORY::HistoryServer process':'historyserver_process'}
-
-# Determine the severity of the TVI alert based on the Nagios alert state.
-def determine_severity(state, service):
-  if severities.has_key(state):
-    severity = severities[state]
-  else: severity = 'Warning'
-
-  # For some alerts, warning should be converted to Degraded
-  if severity == 'Warning' and service in degraded_alert_services:
-    severity = 'Degraded'
-  elif severity != 'OK' and service in fatal_alert_services:
-    severity = 'Fatal'
-
-  return severity
-
-
-# Determine the msg id for the TVI alert from based on the service which generates the Nagios alert.
-# The msg id is used to correlate a log msg to a TVI rule.
-def determine_msg_id(service, severity):
-  for k, v in msg_ids.iteritems():
-    if(k in service):
-      msg_id = v
-      if severity == 'OK':
-        msg_id = '{0}_ok'.format(msg_id)
-      return msg_id
-  return 'HADOOP_UNKNOWN_MSG'
-
-
-# Determine the domain.  Currently the domain is always 'Hadoop'.
-def determine_domain():
-  return 'Hadoop'
-
-
-# log the TVI msg to the syslog
-def log_tvi_msg(msg):
-  syslog.openlog('nagios', syslog.LOG_PID)
-  syslog.syslog(msg)
-
-
-# generate a tvi log msg from a Hadoop alert
-def generate_tvi_log_msg(alert_type, attempt, state, service, msg):
-  # Determine the TVI msg contents
-  severity = determine_severity(state, service)  # The TVI alert severity.
-  domain   = determine_domain()                  # The domain specified in the TVI alert.
-  msg_id   = determine_msg_id(service, severity) # The msg_id used to correlate to a TVI rule.
-
-  # Only log HARD alerts
-  if alert_type == 'HARD':
-    # Format and log msg
-    log_tvi_msg('{0}: {1}: {2}# {3}'.format(severity, domain, msg_id, msg))
-
-
-# main method which is called when invoked on the command line
-def main():
-  generate_tvi_log_msg(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])
-
-
-# run the main method
-if __name__ == '__main__':
-  main()
-  sys.exit(0)
\ No newline at end of file


[05/17] ambari git commit: AMBARI-8276 - Alerts: Remove Nagios Service From The Stack (jonathanhurley)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/java/org/apache/ambari/server/metadata/RoleCommandOrderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/metadata/RoleCommandOrderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/metadata/RoleCommandOrderTest.java
index 8095048..ff5539f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/metadata/RoleCommandOrderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/metadata/RoleCommandOrderTest.java
@@ -20,8 +20,8 @@ package org.apache.ambari.server.metadata;
 
 import static junit.framework.Assert.assertEquals;
 import static junit.framework.Assert.assertFalse;
-import static junit.framework.Assert.assertTrue;
 import static junit.framework.Assert.assertNotNull;
+import static junit.framework.Assert.assertTrue;
 import static org.easymock.EasyMock.createMock;
 import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.replay;
@@ -31,11 +31,11 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.util.Collections;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
 
 import junit.framework.Assert;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.Role;
 import org.apache.ambari.server.RoleCommand;
@@ -52,7 +52,6 @@ import org.codehaus.jackson.annotate.JsonAutoDetect;
 import org.codehaus.jackson.annotate.JsonMethod;
 import org.codehaus.jackson.map.ObjectMapper;
 import org.codehaus.jackson.type.TypeReference;
-import org.easymock.IExpectationSetters;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -274,16 +273,16 @@ public class RoleCommandOrderTest {
 
     InputStream testJsonIS = getClass().getClassLoader().
             getResourceAsStream(TEST_RCO_DATA_FILE);
-    
+
     ObjectMapper mapper = new ObjectMapper();
     Map<String,Object> testData = mapper.readValue(testJsonIS,
         new TypeReference<Map<String,Object>>() {});
-        
+
     rco.addDependencies(testData);
 
     mapper.setVisibility(JsonMethod.ALL, JsonAutoDetect.Visibility.ANY);
     String dump = mapper.writeValueAsString(rco.getDependencies());
-    
+
     String expected = "{\"RoleCommandPair{role=SECONDARY_NAMENODE, " +
         "cmd=UPGRADE}\":[{\"role\":{\"name\":\"NAMENODE\"},\"cmd\":\"UPGRADE\"}]," +
         "\"RoleCommandPair{role=SECONDARY_NAMENODE, cmd=START}\":[{\"role\":{\"name\":\"NAMENODE\"}," +
@@ -295,8 +294,8 @@ public class RoleCommandOrderTest {
 
     assertEquals(expected, dump);
   }
-  
-  
+
+
   @Test
   public void testInitializeDefault() throws IOException {
     RoleCommandOrder rco = injector.getInstance(RoleCommandOrder.class);
@@ -313,9 +312,9 @@ public class RoleCommandOrderTest {
 
     replay(cluster);
     replay(hdfsService);
-    
+
     rco.initialize(cluster);
-    
+
     verify(cluster);
     verify(hdfsService);
   }
@@ -333,19 +332,20 @@ public class RoleCommandOrderTest {
     Map<String,ServiceComponent> hdfsComponents = Collections.singletonMap("NAMENODE", namenode);
     expect(hdfsService.getServiceComponents()).andReturn(hdfsComponents).anyTimes();
 
-    Service nagiosService = createMock(Service.class);
-    expect(cluster.getService("NAGIOS")).andReturn(nagiosService).atLeastOnce();
-    expect(nagiosService.getCluster()).andReturn(cluster).anyTimes();
+    Service hbaseService = createMock(Service.class);
+    expect(cluster.getService("HBASE")).andReturn(hbaseService).atLeastOnce();
+    expect(hbaseService.getCluster()).andReturn(cluster).anyTimes();
 
-    ServiceComponent nagiosServer = createMock(ServiceComponent.class);
-    expect(nagiosServer.getName()).andReturn("NAGIOS_SERVER").anyTimes();
+    ServiceComponent hbaseMaster = createMock(ServiceComponent.class);
+    expect(hbaseMaster.getName()).andReturn("HBASE_MASTER").anyTimes();
 
-    Map<String,ServiceComponent> nagiosComponents = Collections.singletonMap("NAGIOS_SERVER", nagiosServer);
-    expect(nagiosService.getServiceComponents()).andReturn(nagiosComponents).anyTimes();
+    Map<String, ServiceComponent> hbaseComponents = Collections.singletonMap(
+        "HBASE_MASTER", hbaseMaster);
+    expect(hbaseService.getServiceComponents()).andReturn(hbaseComponents).anyTimes();
 
     Map<String, Service> installedServices = new HashMap<String, Service>();
     installedServices.put("HDFS", hdfsService);
-    installedServices.put("NAGIOS", nagiosService);
+    installedServices.put("HBASE", hbaseService);
     expect(cluster.getServices()).andReturn(installedServices).atLeastOnce();
 
 
@@ -356,14 +356,14 @@ public class RoleCommandOrderTest {
     expect(cluster.getCurrentStackVersion()).andReturn(new StackId("HDP", "2.0.5"));
 
     //replay
-    replay(cluster, hdfsService, nagiosService, nagiosServer, namenode);
+    replay(cluster, hdfsService, hbaseService, hbaseMaster, namenode);
 
     rco.initialize(cluster);
 
-    Set<Service> transitiveServices =
-      rco.getTransitiveServices(cluster.getService("NAGIOS"), RoleCommand.START);
+    Set<Service> transitiveServices = rco.getTransitiveServices(
+        cluster.getService("HBASE"), RoleCommand.START);
 
-    //HDFS should be started before NAGIOS start
+    // HDFS should be started before HBASE start
     Assert.assertNotNull(transitiveServices);
     Assert.assertFalse(transitiveServices.isEmpty());
     Assert.assertEquals(1, transitiveServices.size());

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/java/org/apache/ambari/server/metadata/RoleGraphTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/metadata/RoleGraphTest.java b/ambari-server/src/test/java/org/apache/ambari/server/metadata/RoleGraphTest.java
index 8d68f9c..8bead43 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/metadata/RoleGraphTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/metadata/RoleGraphTest.java
@@ -21,37 +21,35 @@ package org.apache.ambari.server.metadata;
 
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
-
 import junit.framework.Assert;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.Role;
 import org.apache.ambari.server.RoleCommand;
+import org.apache.ambari.server.orm.GuiceJpaInitializer;
+import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.stageplanner.RoleGraphNode;
-import org.junit.After;
-import org.junit.Test;
 import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.ServiceComponent;
 import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.cluster.ClusterImpl;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
 
 import com.google.inject.Guice;
 import com.google.inject.Injector;
 import com.google.inject.persist.PersistService;
 
-import org.apache.ambari.server.state.cluster.ClusterImpl;
-import org.apache.ambari.server.orm.GuiceJpaInitializer;
-import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-import org.junit.Before;
-
 public class RoleGraphTest {
 
-  
+
   private Injector injector;
-  
+
   @Before
   public void setup() throws Exception {
     injector = Guice.createInjector(new InMemoryDefaultTestModule());
-    injector.getInstance(GuiceJpaInitializer.class);    
+    injector.getInstance(GuiceJpaInitializer.class);
   }
 
   @After
@@ -63,9 +61,9 @@ public class RoleGraphTest {
   public void testValidateOrder() throws AmbariException {
     RoleCommandOrder rco = injector.getInstance(RoleCommandOrder.class);
     ClusterImpl cluster = mock(ClusterImpl.class);
-    
+
     when(cluster.getCurrentStackVersion()).thenReturn(new StackId("HDP-2.0.6"));
-    
+
     rco.initialize(cluster);
 
     RoleGraphNode datanode_upgrade = new RoleGraphNode(Role.DATANODE, RoleCommand.UPGRADE);
@@ -82,25 +80,10 @@ public class RoleGraphTest {
     RoleGraphNode datanode_start = new RoleGraphNode(Role.DATANODE, RoleCommand.START);
     RoleGraphNode datanode_install = new RoleGraphNode(Role.DATANODE, RoleCommand.INSTALL);
     RoleGraphNode jobtracker_start = new RoleGraphNode(Role.JOBTRACKER, RoleCommand.START);
-    RoleGraphNode tasktracker_start = new RoleGraphNode(Role.TASKTRACKER, RoleCommand.START);
     Assert.assertEquals(1, rco.order(datanode_start, datanode_install));
     Assert.assertEquals(1, rco.order(jobtracker_start, datanode_start));
     Assert.assertEquals(0, rco.order(jobtracker_start, jobtracker_start));
 
-    RoleGraphNode hive_client_install = new RoleGraphNode(Role.HIVE_CLIENT,
-      RoleCommand.INSTALL);
-    RoleGraphNode mapred_client_install = new RoleGraphNode(Role.MAPREDUCE_CLIENT,
-      RoleCommand.INSTALL);
-    RoleGraphNode hcat_client_install = new RoleGraphNode(Role.HCAT,
-      RoleCommand.INSTALL);
-    RoleGraphNode nagios_server_install = new RoleGraphNode(Role.NAGIOS_SERVER,
-      RoleCommand.INSTALL);
-    RoleGraphNode oozie_client_install = new RoleGraphNode(Role.OOZIE_CLIENT,
-      RoleCommand.INSTALL);
-    Assert.assertEquals(1, rco.order(nagios_server_install, hive_client_install));
-    Assert.assertEquals(1, rco.order(nagios_server_install, mapred_client_install));
-    Assert.assertEquals(1, rco.order(nagios_server_install, hcat_client_install));
-    Assert.assertEquals(1, rco.order(nagios_server_install, oozie_client_install));
 
     RoleGraphNode pig_service_check = new RoleGraphNode(Role.PIG_SERVICE_CHECK, RoleCommand.SERVICE_CHECK);
     RoleGraphNode resourcemanager_start = new RoleGraphNode(Role.RESOURCEMANAGER, RoleCommand.START);
@@ -109,78 +92,50 @@ public class RoleGraphTest {
     RoleGraphNode hdfs_service_check = new RoleGraphNode(Role.HDFS_SERVICE_CHECK, RoleCommand.SERVICE_CHECK);
     RoleGraphNode snamenode_start = new RoleGraphNode(Role.SECONDARY_NAMENODE, RoleCommand.START);
     Assert.assertEquals(-1, rco.order(snamenode_start, hdfs_service_check));
-    
+
     RoleGraphNode mapred2_service_check = new RoleGraphNode(Role.MAPREDUCE2_SERVICE_CHECK, RoleCommand.SERVICE_CHECK);
     RoleGraphNode rm_start = new RoleGraphNode(Role.RESOURCEMANAGER, RoleCommand.START);
     RoleGraphNode nm_start = new RoleGraphNode(Role.NODEMANAGER, RoleCommand.START);
     RoleGraphNode hs_start = new RoleGraphNode(Role.HISTORYSERVER, RoleCommand.START);
-    RoleGraphNode nagios_start = new RoleGraphNode(Role.NAGIOS_SERVER, RoleCommand.START);
+
     Assert.assertEquals(-1, rco.order(rm_start, mapred2_service_check));
-    Assert.assertEquals(-1, rco.order(nm_start, mapred2_service_check)); 
+    Assert.assertEquals(-1, rco.order(nm_start, mapred2_service_check));
     Assert.assertEquals(-1, rco.order(hs_start, mapred2_service_check));
     Assert.assertEquals(-1, rco.order(hs_start, mapred2_service_check));
     Assert.assertEquals(1, rco.order(nm_start, rm_start));
-    
+
     //Non-HA mode
     RoleGraphNode nn_start = new RoleGraphNode(Role.NAMENODE, RoleCommand.START);
     RoleGraphNode jn_start = new RoleGraphNode(Role.JOURNALNODE, RoleCommand.START);
     RoleGraphNode zk_server_start = new RoleGraphNode(Role.ZOOKEEPER_SERVER, RoleCommand.START);
     RoleGraphNode hbase_master_start = new RoleGraphNode(Role.HBASE_MASTER, RoleCommand.START);
-    RoleGraphNode hbase_reg_srv_start = new RoleGraphNode(Role.HBASE_REGIONSERVER, RoleCommand.START);
-    RoleGraphNode ganglia_server_start = new RoleGraphNode(Role.GANGLIA_SERVER, RoleCommand.START);
-    RoleGraphNode ganglia_monitor_start = new RoleGraphNode(Role.GANGLIA_MONITOR, RoleCommand.START);
-    RoleGraphNode hcat_start = new RoleGraphNode(Role.HCAT, RoleCommand.START);
     RoleGraphNode hive_srv_start = new RoleGraphNode(Role.HIVE_SERVER, RoleCommand.START);
     RoleGraphNode hive_ms_start = new RoleGraphNode(Role.HIVE_METASTORE, RoleCommand.START);
-    RoleGraphNode hue_start = new RoleGraphNode(Role.HUE_SERVER, RoleCommand.START);
     RoleGraphNode mysql_start = new RoleGraphNode(Role.MYSQL_SERVER, RoleCommand.START);
     RoleGraphNode oozie_srv_start = new RoleGraphNode(Role.OOZIE_SERVER, RoleCommand.START);
-    RoleGraphNode pig_start = new RoleGraphNode(Role.PIG, RoleCommand.START);
-    RoleGraphNode sqoop_start = new RoleGraphNode(Role.SQOOP, RoleCommand.START);
     RoleGraphNode webhcat_srv_start = new RoleGraphNode(Role.WEBHCAT_SERVER, RoleCommand.START);
     RoleGraphNode flume_start = new RoleGraphNode(Role.FLUME_HANDLER, RoleCommand.START);
     RoleGraphNode zkfc_start = new RoleGraphNode(Role.ZKFC, RoleCommand.START);
-    
+
     Assert.assertEquals(0, rco.order(nn_start, jn_start));
     Assert.assertEquals(0, rco.order(nn_start, zk_server_start));
     Assert.assertEquals(0, rco.order(zkfc_start, nn_start));
-    // Check that Nagios starts after other components
-    Assert.assertEquals(1, rco.order(nagios_start, nn_start));
-    Assert.assertEquals(1, rco.order(nagios_start, snamenode_start));
-    Assert.assertEquals(1, rco.order(nagios_start, datanode_start));
-    Assert.assertEquals(1, rco.order(nagios_start, resourcemanager_start));
-    Assert.assertEquals(1, rco.order(nagios_start, nm_start));
-    Assert.assertEquals(1, rco.order(nagios_start, hbase_master_start));
-    Assert.assertEquals(1, rco.order(nagios_start, hbase_reg_srv_start));
-    Assert.assertEquals(1, rco.order(nagios_start, ganglia_server_start));
-    Assert.assertEquals(1, rco.order(nagios_start, ganglia_monitor_start));
-    Assert.assertEquals(1, rco.order(nagios_start, hcat_start));
-    Assert.assertEquals(1, rco.order(nagios_start, hs_start));
-    Assert.assertEquals(1, rco.order(nagios_start, hive_srv_start));
-    Assert.assertEquals(1, rco.order(nagios_start, hue_start));
-    Assert.assertEquals(1, rco.order(nagios_start, jobtracker_start));
-    Assert.assertEquals(1, rco.order(nagios_start, tasktracker_start));
-    Assert.assertEquals(1, rco.order(nagios_start, zk_server_start));
-    Assert.assertEquals(1, rco.order(nagios_start, mysql_start));
-    Assert.assertEquals(1, rco.order(nagios_start, oozie_srv_start));
-    Assert.assertEquals(1, rco.order(nagios_start, pig_start));
-    Assert.assertEquals(1, rco.order(nagios_start, sqoop_start));
-    Assert.assertEquals(1, rco.order(nagios_start, webhcat_srv_start));
-    Assert.assertEquals(1, rco.order(nagios_start, flume_start));
-
-
-
-    //Enable HA for cluster
+
+    Assert.assertEquals(1, rco.order(flume_start, oozie_srv_start));
+    Assert.assertEquals(1, rco.order(hbase_master_start, zk_server_start));
+    Assert.assertEquals(1, rco.order(hive_srv_start, mysql_start));
+    Assert.assertEquals(1, rco.order(hive_ms_start, mysql_start));
+    Assert.assertEquals(1, rco.order(webhcat_srv_start, datanode_start));
+
+    // Enable HA for cluster
     Service hdfsServiceMock = mock(Service.class);
     ServiceComponent jnComponentMock = mock(ServiceComponent.class);
     when(cluster.getService("HDFS")).thenReturn(hdfsServiceMock);
     when(hdfsServiceMock.getServiceComponent("JOURNALNODE")).thenReturn(jnComponentMock);
-    
+
     rco.initialize(cluster);
     Assert.assertEquals(1, rco.order(nn_start, jn_start));
     Assert.assertEquals(1, rco.order(nn_start, zk_server_start));
     Assert.assertEquals(1, rco.order(zkfc_start, nn_start));
-    Assert.assertEquals(1, rco.order(nagios_start, zkfc_start));
-    Assert.assertEquals(1, rco.order(nagios_start, jn_start));
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
index a6cbc6a..0502e1a 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
@@ -18,6 +18,24 @@
 
 package org.apache.ambari.server.stack;
 
+import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.verify;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.File;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.metadata.ActionMetadata;
@@ -33,24 +51,6 @@ import org.apache.ambari.server.state.stack.OsFamily;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import java.io.File;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-
-import static org.easymock.EasyMock.createNiceMock;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.verify;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
 /**
  * StackManager unit tests.
  */
@@ -197,7 +197,6 @@ public class StackManagerTest {
     expectedServices.add("HDFS");
     expectedServices.add("HIVE");
     expectedServices.add("MAPREDUCE2");
-    expectedServices.add("NAGIOS");
     expectedServices.add("OOZIE");
     expectedServices.add("PIG");
     expectedServices.add("SQOOP");
@@ -205,6 +204,7 @@ public class StackManagerTest {
     expectedServices.add("ZOOKEEPER");
     expectedServices.add("STORM");
     expectedServices.add("FLUME");
+    expectedServices.add("FAKENAGIOS");
 
     ServiceInfo pigService = null;
     for (ServiceInfo service : services) {
@@ -391,16 +391,22 @@ public class StackManagerTest {
 
   @Test
   public void testMonitoringServicePropertyInheritance() throws Exception{
-    StackInfo stack = stackManager.getStack("HDP", "2.0.7");
+    StackInfo stack = stackManager.getStack("HDP", "2.0.8");
     Collection<ServiceInfo> allServices = stack.getServices();
     assertEquals(13, allServices.size());
+
+    boolean monitoringServiceFound = false;
+
     for (ServiceInfo serviceInfo : allServices) {
-      if (serviceInfo.getName().equals("NAGIOS")) {
+      if (serviceInfo.getName().equals("FAKENAGIOS")) {
+        monitoringServiceFound = true;
         assertTrue(serviceInfo.isMonitoringService());
       } else {
         assertNull(serviceInfo.isMonitoringService());
       }
     }
+
+    assertTrue(monitoringServiceFound);
   }
 
   @Test
@@ -408,7 +414,7 @@ public class StackManagerTest {
     StackInfo stack = stackManager.getStack("HDP", "2.0.6");
     Collection<ServiceInfo> allServices = stack.getServices();
 
-    assertEquals(12, allServices.size());
+    assertEquals(11, allServices.size());
     HashSet<String> expectedServices = new HashSet<String>();
     expectedServices.add("GANGLIA");
     expectedServices.add("HBASE");
@@ -416,7 +422,6 @@ public class StackManagerTest {
     expectedServices.add("HDFS");
     expectedServices.add("HIVE");
     expectedServices.add("MAPREDUCE2");
-    expectedServices.add("NAGIOS");
     expectedServices.add("OOZIE");
     expectedServices.add("PIG");
     expectedServices.add("ZOOKEEPER");

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/java/org/apache/ambari/server/stageplanner/TestStagePlanner.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stageplanner/TestStagePlanner.java b/ambari-server/src/test/java/org/apache/ambari/server/stageplanner/TestStagePlanner.java
index f008980..dd2a519 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stageplanner/TestStagePlanner.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stageplanner/TestStagePlanner.java
@@ -62,7 +62,7 @@ public class TestStagePlanner {
     RoleCommandOrder rco = injector.getInstance(RoleCommandOrder.class);
     ClusterImpl cluster = mock(ClusterImpl.class);
     when(cluster.getCurrentStackVersion()).thenReturn(new StackId("HDP-2.0.6"));
-    
+
     rco.initialize(cluster);
 
     RoleGraph rg = new RoleGraph(rco);
@@ -140,9 +140,6 @@ public class TestStagePlanner {
     stage.addHostRoleExecutionCommand("host7", Role.WEBHCAT_SERVER,
         RoleCommand.START, new ServiceComponentHostStartEvent("WEBHCAT_SERVER",
             "host7", now), "cluster1", "WEBHCAT");
-    stage.addHostRoleExecutionCommand("host8", Role.NAGIOS_SERVER,
-        RoleCommand.START, new ServiceComponentHostStartEvent("NAGIOS_SERVER",
-            "host8", now), "cluster1", "NAGIOS");
     stage.addHostRoleExecutionCommand("host4", Role.GANGLIA_MONITOR,
         RoleCommand.START, new ServiceComponentHostStartEvent("GANGLIA_MONITOR",
             "host4", now), "cluster1", "GANGLIA");
@@ -156,6 +153,6 @@ public class TestStagePlanner {
     for (Stage s: outStages) {
       System.out.println(s.toString());
     }
-    assertEquals(5, outStages.size());
+    assertEquals(4, outStages.size());
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/python/stacks/1.3.2/NAGIOS/test_mm_wrapper.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/NAGIOS/test_mm_wrapper.py b/ambari-server/src/test/python/stacks/1.3.2/NAGIOS/test_mm_wrapper.py
deleted file mode 100644
index a75e92a..0000000
--- a/ambari-server/src/test/python/stacks/1.3.2/NAGIOS/test_mm_wrapper.py
+++ /dev/null
@@ -1,549 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-import StringIO
-
-import os, sys
-import pprint
-import subprocess
-from unittest import TestCase
-from mock.mock import Mock, MagicMock, patch
-import mm_wrapper
-
-class TestOrWrapper(TestCase):
-
-  dummy_ignore_file = """
-vm-4.vm HIVE HIVE_METASTORE
-vm-5.vm GANGLIA GANGLIA_MONITOR
-vm-4.vm YARN NODEMANAGER
-vm-3.vm YARN NODEMANAGER
-vm-3.vm HBASE HBASE_REGIONSERVER
-vm-4.vm HBASE HBASE_REGIONSERVER
-vm-4.vm STORM STORM_REST_API
-vm-4.vm HDFS DATANODE
-vm-4.vm STORM SUPERVISOR
-vm-4.vm STORM NIMBUS
-vm-4.vm STORM STORM_UI_SERVER
-vm-3.vm STORM SUPERVISOR
-vm-4.vm HDFS SECONDARY_NAMENODE
-vm-3.vm FLUME FLUME_HANDLER
-vm-4.vm GANGLIA GANGLIA_SERVER
-vm-4.vm HIVE HIVE_SERVER
-vm-4.vm ZOOKEEPER ZOOKEEPER_SERVER
-vm-4.vm WEBHCAT WEBHCAT_SERVER
-vm-3.vm HBASE HBASE_MASTER
-vm-4.vm GANGLIA GANGLIA_MONITOR
-vm-3.vm GANGLIA GANGLIA_MONITOR
-vm-3.vm HDFS NAMENODE
-vm-4.vm HIVE MYSQL_SERVER
-vm-4.vm YARN APP_TIMELINE_SERVER
-vm-4.vm FALCON FALCON_SERVER
-vm-3.vm HDFS DATANODE
-vm-4.vm YARN RESOURCEMANAGER
-vm-4.vm OOZIE OOZIE_SERVER
-vm-4.vm MAPREDUCE2 HISTORYSERVER
-vm-4.vm STORM DRPC_SERVER
-vm-4.vm FLUME FLUME_HANDLER
-vm-3.vm ZOOKEEPER ZOOKEEPER_SERVER
-"""
-
-  default_empty_check_result = {
-    'message': 'No checks have been run (no hostnames provided)',
-    'retcode': -1,
-    'real_retcode': None
-  }
-
-
-  @patch("__builtin__.open")
-  def test_ignored_host_list(self, open_mock):
-    # Check with empty file content
-    open_mock.return_value.__enter__.return_value.read.return_value = ""
-    lst = mm_wrapper.ignored_host_list('STORM', 'SUPERVISOR')
-    self.assertEqual(pprint.pformat(lst), '[]')
-    # Check with dummy content
-    open_mock.return_value.__enter__.return_value.read.return_value = self.dummy_ignore_file
-    lst = mm_wrapper.ignored_host_list('STORM', 'SUPERVISOR')
-    self.assertEqual(pprint.pformat(lst), "['vm-4.vm', 'vm-3.vm']")
-    # Check if service name/comp name are not defined
-    open_mock.return_value.__enter__.return_value.read.return_value = self.dummy_ignore_file
-    lst = mm_wrapper.ignored_host_list('', '')
-    self.assertEqual(pprint.pformat(lst), "[]")
-
-
-  @patch("sys.exit")
-  def test_print_usage(self, exit_mock):
-    mm_wrapper.print_usage()
-    self.assertTrue(exit_mock.called)
-    self.assertEqual(exit_mock.call_args_list[0][0][0], 1)
-
-
-  def test_get_real_component(self):
-    with patch.dict(os.environ, {'NAGIOS__SERVICEHOST_COMPONENT': 'SUPERVISOR'}, clear=True):
-      component = mm_wrapper.get_real_component()
-      self.assertEqual(component, 'SUPERVISOR')
-    with patch.dict(os.environ, {'NAGIOS__SERVICEHOST_COMPONENT': 'JOBHISTORY'}, clear=True):
-      component = mm_wrapper.get_real_component()
-      self.assertEqual(component, 'MAPREDUCE2')
-
-
-  @patch("mm_wrapper.print_usage")
-  def test_parse_args(self, print_usage_mock):
-    args = ['or', 'h1', 'h2', '--', 'prog', '-h', '^^', '-opt', 'yet', 'another', 'opt']
-    mode, hostnames, command_line = mm_wrapper.parse_args(args)
-    self.assertEquals(mode, mm_wrapper.OR)
-    self.assertEquals(hostnames, ['h1', 'h2'])
-    self.assertEquals(command_line, ['prog', '-h', '^^', '-opt', 'yet', 'another', 'opt'])
-
-    args = ['and', 'h1', 'h2', '--', 'prog', '-h', '^^', '-opt', 'yet', 'another', 'opt']
-    mode, hostnames, command_line = mm_wrapper.parse_args(args)
-    self.assertEquals(mode, mm_wrapper.AND)
-    self.assertEquals(hostnames, ['h1', 'h2'])
-    self.assertEquals(command_line, ['prog', '-h', '^^', '-opt', 'yet', 'another', 'opt'])
-
-    args = ['env_only', 'h1', 'h2', '--', 'prog', '-h', '^^', '-opt', 'yet', 'another', 'opt']
-    mode, hostnames, command_line = mm_wrapper.parse_args(args)
-    self.assertEquals(mode, mm_wrapper.ENV_ONLY)
-    self.assertEquals(hostnames, ['h1', 'h2'])
-    self.assertEquals(command_line, ['prog', '-h', '^^', '-opt', 'yet', 'another', 'opt'])
-
-    # Check wrong usage
-    args = []
-    mm_wrapper.parse_args(args)
-    self.assertTrue(print_usage_mock.called)
-
-
-  @patch("mm_wrapper.ignored_host_list")
-  @patch("mm_wrapper.work_in_or_mode")
-  @patch("mm_wrapper.work_in_and_mode")
-  @patch("mm_wrapper.work_in_env_only_mode")
-  @patch("mm_wrapper.work_in_filter_mm_mode")
-  @patch("mm_wrapper.work_in_legacy_check_wrapper_mode")
-  def test_do_work(self, work_in_legacy_check_wrapper_mode, work_in_filter_mm_mode_mock,
-                   work_in_env_only_mode_mock, work_in_and_mode_mock,
-                   work_in_or_mode_mock,
-                   ignored_host_list_mock):
-    hostnames = ['h1', 'h2', 'h3', 'h4']
-    ignored_host_list_mock.return_value = ['h2', 'h3']
-    command_line = ['prog', '-h', '^^', '-opt', 'yet', 'another', 'opt']
-    work_in_or_mode_mock.return_value = {
-      'message': "or_mode mode result",
-      'retcode': 0,
-      'real_retcode': None
-    }
-    work_in_and_mode_mock.return_value = {
-      'message': "and_mode mode result",
-      'retcode': 0,
-      'real_retcode': None
-    }
-    work_in_env_only_mode_mock.return_value = {
-      'message': "env_only mode result",
-      'retcode': 0,
-      'real_retcode': None
-    }
-    work_in_filter_mm_mode_mock.return_value = {
-      'message': "filter_mm mode result",
-      'retcode': 0,
-      'real_retcode': None
-    }
-    work_in_legacy_check_wrapper_mode.return_value = {
-      'message': "legacy_check_wrapper mode result",
-      'retcode': 0,
-      'real_retcode': None
-    }
-    result = mm_wrapper.do_work(mm_wrapper.OR, hostnames, command_line)
-    self.assertEquals(str(result), "(['or_mode mode result'], 0)")
-
-    result = mm_wrapper.do_work(mm_wrapper.AND, hostnames, command_line)
-    self.assertEquals(str(result), "(['and_mode mode result'], 0)")
-
-    result = mm_wrapper.do_work(mm_wrapper.ENV_ONLY, hostnames, command_line)
-    self.assertEquals(str(result), "(['env_only mode result'], 0)")
-
-    result = mm_wrapper.do_work(mm_wrapper.FILTER_MM, hostnames, command_line)
-    self.assertEquals(str(result), "(['filter_mm mode result'], 0)")
-
-    result = mm_wrapper.do_work(mm_wrapper.LEGACY_CHECK_WRAPPER, hostnames, command_line)
-    self.assertEquals(str(result), "(['legacy_check_wrapper mode result'], 0)")
-
-    # Check behaviour when real_retcode is defined
-    work_in_or_mode_mock.return_value = {
-      'message': "or_mode mode result",
-      'retcode': 0,
-      'real_retcode': 1
-    }
-    result = mm_wrapper.do_work(mm_wrapper.OR, hostnames, command_line)
-    self.assertEquals(str(result), "(['or_mode mode result', 'AMBARIPASSIVE=1'], 0)")
-
-
-  @patch("mm_wrapper.check_output")
-  def test_work_in_or_mode(self, check_output_mock):
-    hostnames = ['h1', 'h2', 'h3', 'h4']
-    ignored_hosts = ['h2', 'h3']
-    command_line = ['prog', '-h', '^^', '-opt', 'yet', 'another', 'opt']
-    custom_env = {'MM_HOSTS': ignored_hosts}
-
-    # Normal usage
-    check_output_mock.return_value = 'Dummy message'
-    result = mm_wrapper.work_in_or_mode(hostnames, ignored_hosts, command_line,
-                                        custom_env,
-                                        self.default_empty_check_result)
-    self.assertEquals(str(result),
-                      "{'message': 'Dummy message', 'real_retcode': None, 'retcode': 0}")
-    self.assertEquals(check_output_mock.call_count, 1)  # Exited on first success
-    self.assertEquals(check_output_mock.call_args[1]['env']['MM_HOSTS'], ignored_hosts)
-    for check_tupple in zip(check_output_mock.call_args_list, hostnames):
-      self.assertEquals(check_tupple[0][0][0], ['prog', '-h', check_tupple[1], '-opt', 'yet', 'another', 'opt'])
-
-    check_output_mock.reset_mock()
-
-    # Failed all checks
-    check_output_side_effects = []
-    error = subprocess.CalledProcessError(1, 'dummy cmd')
-    error.output = 'dummy output1'
-    check_output_side_effects.append(error)
-
-    error = subprocess.CalledProcessError(3, 'dummy cmd')
-    error.output = 'dummy output2'
-    check_output_side_effects.append(error)
-
-    error = subprocess.CalledProcessError(1, 'dummy cmd')
-    error.output = 'dummy output3'
-    check_output_side_effects.append(error)
-
-    error = subprocess.CalledProcessError(2, 'dummy cmd')
-    error.output = 'dummy output4'
-    check_output_side_effects.append(error)
-
-    check_output_mock.side_effect = check_output_side_effects
-
-    result = mm_wrapper.work_in_or_mode(hostnames, ignored_hosts, command_line,
-                                        custom_env,
-                                        self.default_empty_check_result)
-    self.assertEquals(check_output_mock.call_count, 4)
-    self.assertEquals(str(result),
-                      "{'message': 'dummy output4', 'real_retcode': None, 'retcode': 2}")
-
-    check_output_mock.reset_mock()
-
-    # Failed all but MM host component checks
-    check_output_side_effects = []
-    error = subprocess.CalledProcessError(1, 'dummy cmd')
-    error.output = 'dummy output1'
-    check_output_side_effects.append(error)
-
-    error = subprocess.CalledProcessError(0, 'dummy cmd')
-    error.output = 'dummy output2'
-    check_output_side_effects.append(error)
-
-    error = subprocess.CalledProcessError(2, 'dummy cmd')
-    error.output = 'dummy output3'
-    check_output_side_effects.append(error)
-
-    error = subprocess.CalledProcessError(3, 'dummy cmd')
-    error.output = 'dummy output4'
-    check_output_side_effects.append(error)
-
-    check_output_mock.side_effect = check_output_side_effects
-
-    result = mm_wrapper.work_in_or_mode(hostnames, ignored_hosts, command_line,
-                                        custom_env,
-                                        self.default_empty_check_result)
-    self.assertEquals(check_output_mock.call_count, 4)
-    self.assertEquals(str(result),
-                      "{'message': 'dummy output4', 'real_retcode': None, 'retcode': 3}")
-
-    check_output_mock.reset_mock()
-
-    # Components check only for one check is successful
-    ignored_hosts = []
-    check_output_side_effects = []
-    error = subprocess.CalledProcessError(1, 'dummy cmd')
-    error.output = 'dummy output1'
-    check_output_side_effects.append(error)
-
-    error = subprocess.CalledProcessError(0, 'dummy cmd')
-    error.output = 'dummy output2'
-    check_output_side_effects.append(error)
-
-    error = subprocess.CalledProcessError(2, 'dummy cmd')
-    error.output = 'dummy output3'
-    check_output_side_effects.append(error)
-
-    error = subprocess.CalledProcessError(3, 'dummy cmd')
-    error.output = 'dummy output4'
-    check_output_side_effects.append(error)
-
-    check_output_mock.side_effect = check_output_side_effects
-
-    result = mm_wrapper.work_in_or_mode(hostnames, ignored_hosts, command_line,
-                                        custom_env,
-                                        self.default_empty_check_result)
-    self.assertEquals(check_output_mock.call_count, 2)
-    self.assertEquals(str(result),
-                      "{'message': 'dummy output2', 'real_retcode': None, 'retcode': 0}")
-
-
-  @patch("mm_wrapper.check_output")
-  def test_work_in_and_mode(self, check_output_mock):
-    hostnames = ['h1', 'h2', 'h3', 'h4']
-    ignored_hosts = ['h2', 'h3']
-    command_line = ['prog', '-h', '^^', '-opt', 'yet', 'another', 'opt']
-    custom_env = {'MM_HOSTS': ignored_hosts}
-
-    # Normal usage
-    check_output_mock.return_value = 'Dummy message'
-    result = mm_wrapper.work_in_and_mode(hostnames, ignored_hosts, command_line,
-                                        custom_env,
-                                        self.default_empty_check_result)
-    self.assertEquals(str(result),
-                      "{'message': 'Dummy message', 'real_retcode': None, 'retcode': 0}")
-    self.assertEquals(check_output_mock.call_count, 4)
-    self.assertEquals(check_output_mock.call_args[1]['env']['MM_HOSTS'], ignored_hosts)
-    for check_tupple in zip(check_output_mock.call_args_list, hostnames):
-      self.assertEquals(check_tupple[0][0][0], ['prog', '-h', check_tupple[1], '-opt', 'yet', 'another', 'opt'])
-
-    check_output_mock.reset_mock()
-
-    # Failed all checks
-    check_output_side_effects = []
-    error = subprocess.CalledProcessError(1, 'dummy cmd')
-    error.output = 'dummy output1'
-    check_output_side_effects.append(error)
-
-    error = subprocess.CalledProcessError(3, 'dummy cmd')
-    error.output = 'dummy output2'
-    check_output_side_effects.append(error)
-
-    error = subprocess.CalledProcessError(1, 'dummy cmd')
-    error.output = 'dummy output3'
-    check_output_side_effects.append(error)
-
-    error = subprocess.CalledProcessError(2, 'dummy cmd')
-    error.output = 'dummy output4'
-    check_output_side_effects.append(error)
-
-    check_output_mock.side_effect = check_output_side_effects
-    result = mm_wrapper.work_in_and_mode(hostnames, ignored_hosts, command_line,
-                                        custom_env,
-                                        self.default_empty_check_result)
-    self.assertEquals(check_output_mock.call_count, 4)
-    self.assertEquals(str(result),
-                      "{'message': 'dummy output4', 'real_retcode': None, 'retcode': 2}")
-
-    check_output_mock.reset_mock()
-
-    # Failed all but MM host component checks
-    check_output_side_effects = []
-    error = subprocess.CalledProcessError(1, 'dummy cmd')
-    error.output = 'dummy output1'
-    check_output_side_effects.append(error)
-
-    error = subprocess.CalledProcessError(0, 'dummy cmd')
-    error.output = 'dummy output2'
-    check_output_side_effects.append(error)
-
-    error = subprocess.CalledProcessError(2, 'dummy cmd')
-    error.output = 'dummy output3'
-    check_output_side_effects.append(error)
-
-    error = subprocess.CalledProcessError(3, 'dummy cmd')
-    error.output = 'dummy output4'
-    check_output_side_effects.append(error)
-
-    check_output_mock.side_effect = check_output_side_effects
-    result = mm_wrapper.work_in_and_mode(hostnames, ignored_hosts, command_line,
-                                        custom_env,
-                                        self.default_empty_check_result)
-    self.assertEquals(check_output_mock.call_count, 4)
-    self.assertEquals(str(result),
-                      "{'message': 'dummy output4', 'real_retcode': None, 'retcode': 3}")
-
-    check_output_mock.reset_mock()
-
-    # Components check only for one check is successful
-    ignored_hosts = []
-    check_output_side_effects = []
-    error = subprocess.CalledProcessError(1, 'dummy cmd')
-    error.output = 'dummy output1'
-    check_output_side_effects.append(error)
-
-    error = subprocess.CalledProcessError(0, 'dummy cmd')
-    error.output = 'dummy output2'
-    check_output_side_effects.append(error)
-
-    error = subprocess.CalledProcessError(2, 'dummy cmd')
-    error.output = 'dummy output3'
-    check_output_side_effects.append(error)
-
-    error = subprocess.CalledProcessError(3, 'dummy cmd')
-    error.output = 'dummy output4'
-    check_output_side_effects.append(error)
-
-    check_output_mock.side_effect = check_output_side_effects
-    result = mm_wrapper.work_in_and_mode(hostnames, ignored_hosts, command_line,
-                                        custom_env,
-                                        self.default_empty_check_result)
-    self.assertEquals(check_output_mock.call_count, 4)
-    self.assertEquals(str(result),
-                      "{'message': 'dummy output4', 'real_retcode': None, 'retcode': 3}")
-
-
-  @patch("mm_wrapper.check_output")
-  def test_work_in_env_only_mode(self, check_output_mock):
-    hostnames = ['h1', 'h2', 'h3', 'h4']
-    ignored_hosts = ['h2', 'h3']
-    command_line = ['prog', '-h', '^^', '-opt', 'yet', 'another', 'opt']
-    custom_env = {'MM_HOSTS' : ignored_hosts}
-
-    # Normal usage
-    check_output_mock.return_value = 'Dummy message'
-    result = mm_wrapper.work_in_env_only_mode(hostnames, command_line, custom_env)
-    self.assertEquals(str(result),
-                      "{'message': 'Dummy message', 'real_retcode': None, 'retcode': 0}")
-    self.assertEquals(check_output_mock.call_count, 1)
-    self.assertEquals(check_output_mock.call_args[1]['env']['MM_HOSTS'], ignored_hosts)
-    self.assertEquals(check_output_mock.call_args[0][0],
-                      ['prog', '-h', 'h1', 'h2', 'h3', 'h4', '-opt', 'yet', 'another', 'opt'])
-
-    check_output_mock.reset_mock()
-
-    # Failed all checks
-    check_output_side_effects = []
-    error = subprocess.CalledProcessError(1, 'dummy cmd')
-    error.output = 'dummy output1'
-    check_output_side_effects.append(error)
-
-    check_output_mock.side_effect = check_output_side_effects
-    result = mm_wrapper.work_in_env_only_mode(hostnames, command_line, custom_env)
-    self.assertEquals(check_output_mock.call_count, 1)
-    self.assertEquals(str(result),
-                      "{'message': 'dummy output1', 'real_retcode': None, 'retcode': 1}")
-
-    check_output_mock.reset_mock()
-
-
-  @patch("mm_wrapper.check_output")
-  def test_work_in_filter_mm_mode(self, check_output_mock):
-    hostnames = ['h1', 'h2', 'h3', 'h4']
-    ignored_hosts = ['h2', 'h3']
-    command_line = ['prog', '-h', '^^', '-opt', 'yet', 'another', 'opt']
-    custom_env = {'MM_HOSTS' : ignored_hosts}
-
-    # Normal usage
-    check_output_mock.return_value = 'Dummy message'
-    result = mm_wrapper.work_in_filter_mm_mode(hostnames, ignored_hosts, command_line,
-                                               custom_env,
-                                               self.default_empty_check_result)
-    self.assertEquals(str(result),
-                      "{'message': 'Dummy message', 'real_retcode': None, 'retcode': 0}")
-    self.assertEquals(check_output_mock.call_count, 1)
-    self.assertEquals(check_output_mock.call_args[1]['env']['MM_HOSTS'], ignored_hosts)
-    self.assertEquals(check_output_mock.call_args[0][0],
-                      ['prog', '-h', 'h1', 'h4', '-opt', 'yet', 'another', 'opt'])
-
-    check_output_mock.reset_mock()
-
-    # Failed all checks
-    check_output_side_effects = []
-    error = subprocess.CalledProcessError(1, 'dummy cmd')
-    error.output = 'dummy output1'
-    check_output_side_effects.append(error)
-
-    check_output_mock.side_effect = check_output_side_effects
-    result = mm_wrapper.work_in_filter_mm_mode(hostnames, ignored_hosts, command_line,
-                                               custom_env,
-                                               self.default_empty_check_result)
-    self.assertEquals(check_output_mock.call_count, 1)
-    self.assertEquals(str(result),
-                      "{'message': 'dummy output1', 'real_retcode': None, 'retcode': 1}")
-
-    check_output_mock.reset_mock()
-
-    # All host components are in MM
-    ignored_hosts = hostnames
-    check_output_side_effects = []
-    error = subprocess.CalledProcessError(1, 'dummy cmd')
-    error.output = 'dummy output1'
-    check_output_side_effects.append(error)
-
-    check_output_mock.side_effect = check_output_side_effects
-    result = mm_wrapper.work_in_filter_mm_mode(hostnames, ignored_hosts, command_line,
-                                               custom_env,
-                                               self.default_empty_check_result)
-    self.assertEquals(check_output_mock.call_count, 0)
-    self.assertEquals(str(result),
-                      "{'message': 'No checks have been run (no hostnames provided)', "
-                      "'real_retcode': None, 'retcode': -1}")
-
-    check_output_mock.reset_mock()
-
-
-  @patch("mm_wrapper.check_output")
-  @patch.dict(os.environ, {'NAGIOS_HOSTNAME': 'h2'}, clear=True)
-  def test_work_in_legacy_check_wrapper_mode(self, check_output_mock):
-    command_line = ['prog', '-opt', 'yet', 'another', 'opt']
-    ignored_hosts = []
-    custom_env = {'MM_HOSTS': ignored_hosts}
-
-    # Normal usage
-    ignored_hosts = []
-    check_output_mock.return_value = 'Dummy message'
-    result = mm_wrapper.work_in_legacy_check_wrapper_mode(ignored_hosts, command_line,
-                                               custom_env)
-    self.assertEquals(str(result),
-                      "{'message': 'Dummy message', 'real_retcode': None, 'retcode': 0}")
-    self.assertEquals(check_output_mock.call_count, 1)
-    self.assertEquals(check_output_mock.call_args[1]['env']['MM_HOSTS'], ignored_hosts)
-    self.assertEquals(check_output_mock.call_args[0][0],
-                      ['prog', '-opt', 'yet', 'another', 'opt'])
-
-    check_output_mock.reset_mock()
-
-    # Failed check on host that is not in MM state
-    ignored_hosts = ['h3']
-    check_output_side_effects = []
-    error = subprocess.CalledProcessError(1, 'dummy cmd')
-    error.output = 'dummy output1'
-    check_output_side_effects.append(error)
-
-    check_output_mock.side_effect = check_output_side_effects
-    result = mm_wrapper.work_in_legacy_check_wrapper_mode(ignored_hosts, command_line,
-                                               custom_env)
-    self.assertEquals(check_output_mock.call_count, 1)
-    self.assertEquals(str(result),
-                      "{'message': 'dummy output1', 'real_retcode': None, 'retcode': 1}")
-
-    check_output_mock.reset_mock()
-
-    # Failed check on host that is in MM state
-    ignored_hosts = ['h2']
-    check_output_side_effects = []
-    error = subprocess.CalledProcessError(1, 'dummy cmd')
-    error.output = 'dummy output1'
-    check_output_side_effects.append(error)
-
-    check_output_mock.side_effect = check_output_side_effects
-    result = mm_wrapper.work_in_legacy_check_wrapper_mode(ignored_hosts, command_line,
-                                               custom_env)
-    self.assertEquals(check_output_mock.call_count, 1)
-    self.assertEquals(str(result),
-                      "{'message': 'dummy output1', 'real_retcode': 1, 'retcode': 0}")
-
-    check_output_mock.reset_mock()
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/python/stacks/1.3.2/NAGIOS/test_nagios_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/NAGIOS/test_nagios_server.py b/ambari-server/src/test/python/stacks/1.3.2/NAGIOS/test_nagios_server.py
deleted file mode 100644
index f1e4650..0000000
--- a/ambari-server/src/test/python/stacks/1.3.2/NAGIOS/test_nagios_server.py
+++ /dev/null
@@ -1,282 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-from mock.mock import MagicMock, patch
-from stacks.utils.RMFTestCase import *
-
-
-class TestNagiosServer(RMFTestCase):
-  def test_configure_default(self):
-    self.executeScript("1.3.2/services/NAGIOS/package/scripts/nagios_server.py",
-                       classname="NagiosServer",
-                       command="configure",
-                       config_file="default.json"
-    )
-    self.assert_configure_default()
-    self.assertNoMoreResources()
-
-
-  def test_start_default(self):
-    self.executeScript(
-      "1.3.2/services/NAGIOS/package/scripts/nagios_service.py",
-      classname="NagiosServer",
-      command="start",
-      config_file="default.json"
-    )
-    self.assert_configure_default()
-    self.assertResourceCalled('Execute', 'service nagios start',
-                              path=['/usr/local/bin/:/bin/:/sbin/']
-    )
-    self.assertResourceCalled('MonitorWebserver', 'restart',
-    )
-    self.assertNoMoreResources()
-
-
-  @patch('os.path.isfile')
-  def test_stop_default(self, os_path_isfile_mock):
-    src_dir = RMFTestCase._getSrcFolder()    
-    os_path_isfile_mock.side_effect = [False, True]
-       
-    self.executeScript(
-      "1.3.2/services/NAGIOS/package/scripts/nagios_service.py",
-      classname="NagiosServer",
-      command="stop",
-      config_file="default.json"
-    )
-    
-    self.assertResourceCalled('Execute','service nagios stop', path=['/usr/local/bin/:/bin/:/sbin/'])
-    self.assertResourceCalled('Execute','rm -f /var/run/nagios/nagios.pid')
-    self.assertResourceCalled('MonitorWebserver', 'restart')
-    
-    self.assertNoMoreResources()
-
-
-  def assert_configure_default(self):
-    self.assertResourceCalled('File', '/etc/apache2/conf.d/nagios.conf',
-                              owner='nagios',
-                              group='nagios',
-                              content=Template("nagios.conf.j2"),
-                              mode=0644
-    )
-    self.assertResourceCalled('Directory', '/etc/nagios',
-                              owner='nagios',
-                              group='nagios',
-    )
-    self.assertResourceCalled('Directory', '/usr/lib64/nagios/plugins'
-    )
-    self.assertResourceCalled('Directory', '/etc/nagios/objects'
-    )
-    self.assertResourceCalled('Directory', '/var/run/nagios',
-                              owner='nagios',
-                              group='nagios',
-                              mode=0755,
-                              recursive=True
-    )
-    self.assertResourceCalled('Directory', '/var/nagios',
-                              owner='nagios',
-                              group='nagios',
-                              recursive=True
-    )
-    self.assertResourceCalled('Directory', '/var/nagios/spool/checkresults',
-                              owner='nagios',
-                              group='nagios',
-                              recursive=True
-    )
-    self.assertResourceCalled('Directory', '/var/nagios/rw',
-                              owner='nagios',
-                              group='nagios',
-                              recursive=True
-    )
-    self.assertResourceCalled('Directory', '/var/log/nagios',
-                              owner='nagios',
-                              group='nagios',
-                              mode=0755
-    )
-    self.assertResourceCalled('Directory', '/var/log/nagios/archives',
-                              owner='nagios',
-                              group='nagios',
-                              mode=0755
-    )
-    self.assertResourceCalled('TemplateConfig', '/etc/nagios/nagios.cfg',
-                              owner='nagios',
-                              group='nagios',
-                              mode=None
-    )
-    self.assertResourceCalled('TemplateConfig', '/etc/nagios/resource.cfg',
-                              owner='nagios',
-                              group='nagios',
-                              mode=None
-    )
-    self.assertResourceCalled('TemplateConfig',
-                              '/etc/nagios/objects/hadoop-hosts.cfg',
-                              owner='nagios',
-                              group='hadoop',
-                              mode=None
-    )
-    self.assertResourceCalled('TemplateConfig',
-                              '/etc/nagios/objects/hadoop-hostgroups.cfg',
-                              owner='nagios',
-                              group='hadoop',
-                              mode=None
-    )
-    self.assertResourceCalled('TemplateConfig',
-                              '/etc/nagios/objects/hadoop-servicegroups.cfg',
-                              owner='nagios',
-                              group='hadoop',
-                              mode=None
-    )
-    self.assertResourceCalled('TemplateConfig',
-                              '/etc/nagios/objects/hadoop-services.cfg',
-                              owner='nagios',
-                              group='hadoop',
-                              mode=None
-    )
-    self.assertResourceCalled('TemplateConfig',
-                              '/etc/nagios/objects/hadoop-commands.cfg',
-                              owner='nagios',
-                              group='hadoop',
-                              mode=None
-    )
-    self.assertResourceCalled('TemplateConfig',
-                              '/etc/nagios/objects/contacts.cfg',
-                              owner='nagios',
-                              group='hadoop',
-                              mode=None
-    )
-    self.assertResourceCalled('File', '/usr/lib64/nagios/plugins/check_cpu.pl',
-                              content=StaticFile('check_cpu.pl'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File', '/usr/lib64/nagios/plugins/check_cpu.php',
-                              content=StaticFile('check_cpu.php'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File', '/usr/lib64/nagios/plugins/check_cpu_ha.php',
-                              content=StaticFile('check_cpu_ha.php'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File',
-                              '/usr/lib64/nagios/plugins/check_datanode_storage.php',
-                              content=StaticFile('check_datanode_storage.php'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File',
-                              '/usr/lib64/nagios/plugins/check_aggregate.php',
-                              content=StaticFile('check_aggregate.php'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File',
-                              '/usr/lib64/nagios/plugins/check_hdfs_blocks.php',
-                              content=StaticFile('check_hdfs_blocks.php'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File',
-                              '/usr/lib64/nagios/plugins/check_hdfs_capacity.php',
-                              content=StaticFile('check_hdfs_capacity.php'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File',
-                              '/usr/lib64/nagios/plugins/check_rpcq_latency.php',
-                              content=StaticFile('check_rpcq_latency.php'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File',
-                              '/usr/lib64/nagios/plugins/check_webui.sh',
-                              content=StaticFile('check_webui.sh'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File',
-                              '/usr/lib64/nagios/plugins/check_name_dir_status.php',
-                              content=StaticFile('check_name_dir_status.php'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File',
-                              '/usr/lib64/nagios/plugins/check_oozie_status.sh',
-                              content=StaticFile('check_oozie_status.sh'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File',
-                              '/usr/lib64/nagios/plugins/check_templeton_status.sh',
-                              content=StaticFile('check_templeton_status.sh'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File',
-                              '/usr/lib64/nagios/plugins/check_hive_metastore_status.sh',
-                              content=StaticFile(
-                                'check_hive_metastore_status.sh'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File',
-                              '/usr/lib64/nagios/plugins/check_hue_status.sh',
-                              content=StaticFile('check_hue_status.sh'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File',
-                              '/usr/lib64/nagios/plugins/check_mapred_local_dir_used.sh',
-                              content=StaticFile(
-                                'check_mapred_local_dir_used.sh'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File',
-                              '/usr/lib64/nagios/plugins/check_nodemanager_health.sh',
-                              content=StaticFile('check_nodemanager_health.sh'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File',
-                              '/usr/lib64/nagios/plugins/check_namenodes_ha.sh',
-                              content=StaticFile('check_namenodes_ha.sh'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File',
-                              '/usr/lib64/nagios/plugins/hdp_nagios_init.php',
-                              content=StaticFile('hdp_nagios_init.php'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File',
-                              '/usr/lib64/nagios/plugins/mm_wrapper.py',
-                              content=StaticFile('mm_wrapper.py'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File',
-                              '/usr/lib64/nagios/plugins/check_hive_thrift_port.py',
-                              content=StaticFile('check_hive_thrift_port.py'),
-                              mode=0755
-    )
-    self.assertResourceCalled('Execute',
-                              'htpasswd2 -c -b  /etc/nagios/htpasswd.users nagiosadmin \'!`"\'"\'"\' 1\''
-    )
-
-    self.assertResourceCalled('File', '/etc/nagios/htpasswd.users',
-                              owner='nagios',
-                              group='nagios',
-                              mode=0640
-    )
-    self.assertResourceCalled('Execute', 'usermod -G nagios wwwrun'
-    )
-
-    self.assertResourceCalled('File', '/etc/nagios/command.cfg',
-                              owner='nagios',
-                              group='nagios'
-    )
-    self.assertResourceCalled('File', '/var/nagios/ignore.dat',
-                              owner='nagios',
-                              group='nagios',
-                              mode=0664
-    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/python/stacks/1.3.2/configs/default.hbasedecom.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/configs/default.hbasedecom.json b/ambari-server/src/test/python/stacks/1.3.2/configs/default.hbasedecom.json
index 31bc5d4..9ba4073 100644
--- a/ambari-server/src/test/python/stacks/1.3.2/configs/default.hbasedecom.json
+++ b/ambari-server/src/test/python/stacks/1.3.2/configs/default.hbasedecom.json
@@ -326,14 +326,6 @@
             "hadoop_heapsize": "1024", 
             "jtnode_opt_maxnewsize": "200m"
         }, 
-        "nagios-env": {
-            "hive_metastore_user_passwd": "password", 
-            "nagios_web_password": "!`\"' 1",
-            "nagios_user": "nagios", 
-            "nagios_group": "nagios", 
-            "nagios_web_login": "nagiosadmin", 
-            "nagios_contact": "user@com.ua"
-        }, 
         "hive-env": {
             "hive_metastore_user_passwd": "password", 
             "hcat_pid_dir": "/var/run/webhcat", 
@@ -493,9 +485,6 @@
             "c6401.ambari.apache.org", 
             "c6402.ambari.apache.org"
         ], 
-        "nagios_server_host": [
-            "c6402.ambari.apache.org"
-        ], 
         "hive_metastore_hosts": [
             "c6402.ambari.apache.org"
         ], 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/python/stacks/1.3.2/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/configs/default.json b/ambari-server/src/test/python/stacks/1.3.2/configs/default.json
index d9f9f71..3448b83 100644
--- a/ambari-server/src/test/python/stacks/1.3.2/configs/default.json
+++ b/ambari-server/src/test/python/stacks/1.3.2/configs/default.json
@@ -18,8 +18,8 @@
         "ambari_db_rca_username": "mapred", 
         "java_home": "/usr/jdk64/jdk1.7.0_45", 
         "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar",
-        "group_list": "[\"hadoop\",\"nobody\",\"users\",\"nagios\"]",
-        "user_list": "[\"hive\",\"oozie\",\"nobody\",\"nagios\",\"ambari-qa\",\"flume\",\"hdfs\",\"storm\",\"mapred\",\"hbase\",\"tez\",\"zookeeper\",\"falcon\",\"sqoop\",\"yarn\",\"hcat\"]"
+        "group_list": "[\"hadoop\",\"nobody\",\"users\"]",
+        "user_list": "[\"hive\",\"oozie\",\"nobody\",\"ambari-qa\",\"flume\",\"hdfs\",\"storm\",\"mapred\",\"hbase\",\"tez\",\"zookeeper\",\"falcon\",\"sqoop\",\"yarn\",\"hcat\"]"
     }, 
     "commandType": "EXECUTION_COMMAND", 
     "roleParams": {}, 
@@ -335,14 +335,6 @@
             "jtnode_opt_maxnewsize": "200m",
             "rca_properties": "\nambari.jobhistory.database={ambari_db_rca_url}\nambari.jobhistory.driver={ambari_db_rca_driver}\nambari.jobhistory.user={ambari_db_rca_username}\nambari.jobhistory.password={ambari_db_rca_password}\nambari.jobhistory.logger=${{hadoop.root.logger}}\n\nlog4j.appender.JHA=org.apache.ambari.log4j.hadoop.mapreduce.jobhistory.JobHistoryAppender\nlog4j.appender.JHA.database={ambari_db_rca_url}\nlog4j.appender.JHA.driver={ambari_db_rca_driver}\nlog4j.appender.JHA.user={ambari_db_rca_username}\nlog4j.appender.JHA.password={ambari_db_rca_password}\n\nlog4j.logger.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger=DEBUG,JHA\nlog4j.additivity.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger=true\n\n"
         }, 
-        "nagios-env": {
-            "hive_metastore_user_passwd": "password", 
-            "nagios_web_password": "!`\"' 1",
-            "nagios_user": "nagios", 
-            "nagios_group": "nagios", 
-            "nagios_web_login": "nagiosadmin", 
-            "nagios_contact": "user@com.ua"
-        }, 
         "hive-env": {
             "hive_metastore_user_passwd": "password", 
             "hcat_pid_dir": "/var/run/webhcat", 
@@ -559,9 +551,6 @@
             "c6401.ambari.apache.org", 
             "c6402.ambari.apache.org"
         ], 
-        "nagios_server_host": [
-            "c6402.ambari.apache.org"
-        ], 
         "hive_metastore_hosts": [
             "c6402.ambari.apache.org"
         ], 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/python/stacks/1.3.2/configs/default.non_gmetad_host.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/configs/default.non_gmetad_host.json b/ambari-server/src/test/python/stacks/1.3.2/configs/default.non_gmetad_host.json
index 2ec3a22..fa8d183 100644
--- a/ambari-server/src/test/python/stacks/1.3.2/configs/default.non_gmetad_host.json
+++ b/ambari-server/src/test/python/stacks/1.3.2/configs/default.non_gmetad_host.json
@@ -326,14 +326,6 @@
             "hadoop_heapsize": "1024", 
             "jtnode_opt_maxnewsize": "200m"
         }, 
-        "nagios-env": {
-            "hive_metastore_user_passwd": "password", 
-            "nagios_web_password": "!`\"' 1",
-            "nagios_user": "nagios", 
-            "nagios_group": "nagios", 
-            "nagios_web_login": "nagiosadmin", 
-            "nagios_contact": "user@com.ua"
-        }, 
         "hive-env": {
             "hive_metastore_user_passwd": "password", 
             "hcat_pid_dir": "/var/run/webhcat", 
@@ -541,9 +533,6 @@
             "c6401.ambari.apache.org", 
             "c6402.ambari.apache.org"
         ], 
-        "nagios_server_host": [
-            "c6402.ambari.apache.org"
-        ], 
         "hive_metastore_hosts": [
             "c6402.ambari.apache.org"
         ], 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/python/stacks/1.3.2/configs/default_client.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/configs/default_client.json b/ambari-server/src/test/python/stacks/1.3.2/configs/default_client.json
index da7a878..77a06f2 100644
--- a/ambari-server/src/test/python/stacks/1.3.2/configs/default_client.json
+++ b/ambari-server/src/test/python/stacks/1.3.2/configs/default_client.json
@@ -326,14 +326,6 @@
             "hadoop_heapsize": "1024", 
             "jtnode_opt_maxnewsize": "200m"
         }, 
-        "nagios-env": {
-            "hive_metastore_user_passwd": "password", 
-            "nagios_web_password": "!`\"' 1",
-            "nagios_user": "nagios", 
-            "nagios_group": "nagios", 
-            "nagios_web_login": "nagiosadmin", 
-            "nagios_contact": "user@com.ua"
-        }, 
         "hive-env": {
             "hive_metastore_user_passwd": "password", 
             "hcat_pid_dir": "/var/run/webhcat", 
@@ -544,9 +536,6 @@
             "c6401.ambari.apache.org", 
             "c6402.ambari.apache.org"
         ], 
-        "nagios_server_host": [
-            "c6402.ambari.apache.org"
-        ], 
         "hive_metastore_hosts": [
             "c6402.ambari.apache.org"
         ], 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/python/stacks/1.3.2/configs/secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/configs/secured.json b/ambari-server/src/test/python/stacks/1.3.2/configs/secured.json
index 3c08702..1278350 100644
--- a/ambari-server/src/test/python/stacks/1.3.2/configs/secured.json
+++ b/ambari-server/src/test/python/stacks/1.3.2/configs/secured.json
@@ -170,15 +170,13 @@
             "tasktracker_task_controller": "org.apache.hadoop.mapred.LinuxTaskController", 
             "oozie_keytab": "/etc/security/keytabs/oozie.service.keytab", 
             "hadoop_http_principal_name": "HTTP/_HOST", 
-            "kinit_path_local": "/usr/bin", 
-            "nagios_keytab_path": "/etc/security/keytabs/nagios.service.keytab", 
+            "kinit_path_local": "/usr/bin",  
             "hbase_regionserver_heapsize": "1024m",
             "hbase_regionserver_xmn_max": "512",
             "hbase_regionserver_xmn_ratio": "0.2",
             "datanode_primary_name": "dn", 
             "namenode_principal_name": "nn/_HOST", 
-            "namenode_keytab": "/etc/security/keytabs/nn.service.keytab", 
-            "nagios_principal_name": "nagios/c6402.ambari.apache.org@EXAMPLE.COM", 
+            "namenode_keytab": "/etc/security/keytabs/nn.service.keytab",  
             "dfs_datanode_http_address": "1022",
             "hbase_user_keytab": "/etc/security/keytabs/hbase.headless.keytab", 
             "jobtracker_primary_name": "jt", 
@@ -188,8 +186,7 @@
             "clientPort": "2181", 
             "oozie_jdbc_driver": "org.apache.derby.jdbc.EmbeddedDriver", 
             "hive_metastore_primary_name": "hive", 
-            "hbase_master_keytab": "/etc/security/keytabs/hbase.service.keytab", 
-            "nagios_primary_name": "nagios", 
+            "hbase_master_keytab": "/etc/security/keytabs/hbase.service.keytab",  
             "jobtracker_principal_name": "jt/_HOST", 
             "hive_database": "New MySQL Database", 
             "hcat_pid_dir": "/var/run/webhcat",
@@ -198,7 +195,6 @@
             "oozie_pid_dir": "/var/run/oozie", 
             "datanode_principal_name": "dn/_HOST", 
             "hive_metastore_keytab": "/etc/security/keytabs/hive.service.keytab", 
-            "nagios_group": "nagios", 
             "hcat_user": "hcat", 
             "hadoop_heapsize": "1024", 
             "hbase_regionserver_primary_name": "hbase", 
@@ -219,7 +215,6 @@
             "smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab", 
             "kerberos_domain": "EXAMPLE.COM", 
             "snamenode_keytab": "/etc/security/keytabs/nn.service.keytab", 
-            "nagios_server": "c6402.ambari.apache.org", 
             "ganglia_runtime_dir": "/var/run/ganglia/hdp", 
             "lzo_enabled": "true", 
             "oozie_principal_name": "oozie/c6402.ambari.apache.org", 
@@ -241,7 +236,6 @@
             "gmetad_user": "nobody", 
             "oozie_http_keytab": "/etc/security/keytabs/spnego.service.keytab", 
             "hive_metastore": "c6402.ambari.apache.org", 
-            "nagios_user": "nagios", 
             "security_enabled": "true", 
             "proxyuser_group": "users", 
             "namenode_formatted_mark_dir": "/var/run/hadoop/hdfs/namenode/formatted/", 
@@ -261,8 +255,6 @@
             "jtnode_heapsize": "1024m", 
             "yarn_user": "yarn", 
             "gmond_user": "nobody", 
-            "nagios_web_login": "nagiosadmin", 
-            "nagios_contact": "q@q.q", 
             "snamenode_primary_name": "nn", 
             "hdfs_user": "hdfs", 
             "oozie_database_type": "derby", 
@@ -290,7 +282,6 @@
             "user_group": "hadoop", 
             "hive_user": "hive", 
             "webHCat_http_primary_name": "HTTP", 
-            "nagios_web_password": "!`\"' 1", 
             "smokeuser": "ambari-qa", 
             "ganglia_conf_dir": "/etc/ganglia/hdp", 
             "hbase_master_heapsize": "1024m", 
@@ -508,16 +499,6 @@
             "hadoop_heapsize": "1024", 
             "jtnode_opt_maxnewsize": "200m"
         }, 
-        "nagios-env": {
-            "hive_metastore_user_passwd": "password", 
-            "nagios_web_password": "password", 
-            "nagios_user": "nagios", 
-            "nagios_group": "nagios", 
-            "nagios_web_login": "nagiosadmin", 
-            "nagios_contact": "user@com.ua",
-            "nagios_principal_name": "nagios/c6402.ambari.apache.org@EXAMPLE.COM",
-            "nagios_keytab_path": "/etc/security/keytabs/nagios.service.keytab"
-        }, 
         "hive-env": {
             "hive_metastore_user_passwd": "password", 
             "hcat_pid_dir": "/var/run/webhcat", 
@@ -740,9 +721,6 @@
             "c6401.ambari.apache.org", 
             "c6402.ambari.apache.org"
         ], 
-        "nagios_server_host": [
-            "c6402.ambari.apache.org"
-        ], 
         "hive_metastore_hosts": [
             "c6402.ambari.apache.org"
         ], 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/python/stacks/1.3.2/configs/secured_client.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/configs/secured_client.json b/ambari-server/src/test/python/stacks/1.3.2/configs/secured_client.json
index 74ea204..b41af13 100644
--- a/ambari-server/src/test/python/stacks/1.3.2/configs/secured_client.json
+++ b/ambari-server/src/test/python/stacks/1.3.2/configs/secured_client.json
@@ -170,15 +170,13 @@
             "tasktracker_task_controller": "org.apache.hadoop.mapred.LinuxTaskController", 
             "oozie_keytab": "/etc/security/keytabs/oozie.service.keytab", 
             "hadoop_http_principal_name": "HTTP/_HOST", 
-            "kinit_path_local": "/usr/bin", 
-            "nagios_keytab_path": "/etc/security/keytabs/nagios.service.keytab", 
+            "kinit_path_local": "/usr/bin",  
             "hbase_regionserver_heapsize": "1024m",
             "hbase_regionserver_xmn_max": "512",
             "hbase_regionserver_xmn_ratio": "0.2",
             "datanode_primary_name": "dn", 
             "namenode_principal_name": "nn/_HOST", 
-            "namenode_keytab": "/etc/security/keytabs/nn.service.keytab", 
-            "nagios_principal_name": "nagios/c6402.ambari.apache.org@EXAMPLE.COM", 
+            "namenode_keytab": "/etc/security/keytabs/nn.service.keytab",  
             "dfs_datanode_http_address": "1022", 
             "hbase_user_keytab": "/etc/security/keytabs/hbase.headless.keytab", 
             "jobtracker_primary_name": "jt", 
@@ -188,8 +186,7 @@
             "clientPort": "2181", 
             "oozie_jdbc_driver": "org.apache.derby.jdbc.EmbeddedDriver", 
             "hive_metastore_primary_name": "hive", 
-            "hbase_master_keytab": "/etc/security/keytabs/hbase.service.keytab", 
-            "nagios_primary_name": "nagios", 
+            "hbase_master_keytab": "/etc/security/keytabs/hbase.service.keytab",  
             "jobtracker_principal_name": "jt/_HOST", 
             "hive_database": "New MySQL Database", 
             "hcat_pid_dir": "/var/run/webhcat",
@@ -197,8 +194,7 @@
             "snappy_enabled": "true", 
             "oozie_pid_dir": "/var/run/oozie", 
             "datanode_principal_name": "dn/_HOST", 
-            "hive_metastore_keytab": "/etc/security/keytabs/hive.service.keytab", 
-            "nagios_group": "nagios", 
+            "hive_metastore_keytab": "/etc/security/keytabs/hive.service.keytab",  
             "hcat_user": "hcat", 
             "hadoop_heapsize": "1024", 
             "hbase_regionserver_primary_name": "hbase", 
@@ -218,8 +214,7 @@
             "namenode_heapsize": "1024m", 
             "smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab", 
             "kerberos_domain": "EXAMPLE.COM", 
-            "snamenode_keytab": "/etc/security/keytabs/nn.service.keytab", 
-            "nagios_server": "c6402.ambari.apache.org", 
+            "snamenode_keytab": "/etc/security/keytabs/nn.service.keytab",  
             "ganglia_runtime_dir": "/var/run/ganglia/hdp", 
             "lzo_enabled": "true", 
             "oozie_principal_name": "oozie/c6402.ambari.apache.org", 
@@ -240,8 +235,7 @@
             "hadoop_http_keytab": "/etc/security/keytabs/spnego.service.keytab", 
             "gmetad_user": "nobody", 
             "oozie_http_keytab": "/etc/security/keytabs/spnego.service.keytab", 
-            "hive_metastore": "c6402.ambari.apache.org", 
-            "nagios_user": "nagios", 
+            "hive_metastore": "c6402.ambari.apache.org",  
             "security_enabled": "true", 
             "proxyuser_group": "users", 
             "namenode_formatted_mark_dir": "/var/run/hadoop/hdfs/namenode/formatted/", 
@@ -261,8 +255,6 @@
             "jtnode_heapsize": "1024m", 
             "yarn_user": "yarn", 
             "gmond_user": "nobody", 
-            "nagios_web_login": "nagiosadmin", 
-            "nagios_contact": "q@q.q", 
             "snamenode_primary_name": "nn", 
             "hdfs_user": "hdfs", 
             "oozie_database_type": "derby", 
@@ -289,8 +281,7 @@
             "hbase_log_dir": "/var/log/hbase", 
             "user_group": "hadoop", 
             "hive_user": "hive", 
-            "webHCat_http_primary_name": "HTTP", 
-            "nagios_web_password": "!`\"' 1", 
+            "webHCat_http_primary_name": "HTTP",  
             "smokeuser": "ambari-qa", 
             "ganglia_conf_dir": "/etc/ganglia/hdp", 
             "hbase_master_heapsize": "1024m", 
@@ -506,16 +497,6 @@
             "hadoop_heapsize": "1024", 
             "jtnode_opt_maxnewsize": "200m"
         }, 
-        "nagios-env": {
-            "hive_metastore_user_passwd": "password", 
-            "nagios_web_password": "password", 
-            "nagios_user": "nagios", 
-            "nagios_group": "nagios", 
-            "nagios_web_login": "nagiosadmin", 
-            "nagios_contact": "user@com.ua",
-            "nagios_principal_name": "nagios/c6402.ambari.apache.org@EXAMPLE.COM",
-            "nagios_keytab_path": "/etc/security/keytabs/nagios.service.keytab"
-        }, 
         "hive-env": {
             "hive_metastore_user_passwd": "password", 
             "hcat_pid_dir": "/var/run/webhcat", 
@@ -733,9 +714,6 @@
             "c6401.ambari.apache.org", 
             "c6402.ambari.apache.org"
         ], 
-        "nagios_server_host": [
-            "c6402.ambari.apache.org"
-        ], 
         "hive_metastore_hosts": [
             "c6402.ambari.apache.org"
         ], 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/python/stacks/1.3.2/configs/secured_no_jce_name.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/configs/secured_no_jce_name.json b/ambari-server/src/test/python/stacks/1.3.2/configs/secured_no_jce_name.json
index 0412d50..124db7c 100644
--- a/ambari-server/src/test/python/stacks/1.3.2/configs/secured_no_jce_name.json
+++ b/ambari-server/src/test/python/stacks/1.3.2/configs/secured_no_jce_name.json
@@ -169,15 +169,13 @@
             "tasktracker_task_controller": "org.apache.hadoop.mapred.LinuxTaskController", 
             "oozie_keytab": "/etc/security/keytabs/oozie.service.keytab", 
             "hadoop_http_principal_name": "HTTP/_HOST", 
-            "kinit_path_local": "/usr/bin", 
-            "nagios_keytab_path": "/etc/security/keytabs/nagios.service.keytab", 
+            "kinit_path_local": "/usr/bin",  
             "hbase_regionserver_heapsize": "1024m",
             "hbase_regionserver_xmn_max": "512",
             "hbase_regionserver_xmn_ratio": "0.2",
             "datanode_primary_name": "dn", 
             "namenode_principal_name": "nn/_HOST", 
-            "namenode_keytab": "/etc/security/keytabs/nn.service.keytab", 
-            "nagios_principal_name": "nagios/c6402.ambari.apache.org@EXAMPLE.COM", 
+            "namenode_keytab": "/etc/security/keytabs/nn.service.keytab",  
             "dfs_datanode_http_address": "1022", 
             "hbase_user_keytab": "/etc/security/keytabs/hbase.headless.keytab", 
             "jobtracker_primary_name": "jt", 
@@ -188,7 +186,6 @@
             "oozie_jdbc_driver": "org.apache.derby.jdbc.EmbeddedDriver", 
             "hive_metastore_primary_name": "hive", 
             "hbase_master_keytab": "/etc/security/keytabs/hbase.service.keytab", 
-            "nagios_primary_name": "nagios", 
             "jobtracker_principal_name": "jt/_HOST", 
             "hive_database": "New MySQL Database", 
             "hcat_pid_dir": "/etc/run/webhcat", 
@@ -197,7 +194,6 @@
             "oozie_pid_dir": "/var/run/oozie", 
             "datanode_principal_name": "dn/_HOST", 
             "hive_metastore_keytab": "/etc/security/keytabs/hive.service.keytab", 
-            "nagios_group": "nagios", 
             "hcat_user": "hcat", 
             "hadoop_heapsize": "1024", 
             "hbase_regionserver_primary_name": "hbase", 
@@ -217,7 +213,6 @@
             "smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab", 
             "kerberos_domain": "EXAMPLE.COM", 
             "snamenode_keytab": "/etc/security/keytabs/nn.service.keytab", 
-            "nagios_server": "c6402.ambari.apache.org", 
             "ganglia_runtime_dir": "/var/run/ganglia/hdp", 
             "lzo_enabled": "true", 
             "oozie_principal_name": "oozie/c6402.ambari.apache.org", 
@@ -238,8 +233,7 @@
             "hadoop_http_keytab": "/etc/security/keytabs/spnego.service.keytab", 
             "gmetad_user": "nobody", 
             "oozie_http_keytab": "/etc/security/keytabs/spnego.service.keytab", 
-            "hive_metastore": "c6402.ambari.apache.org", 
-            "nagios_user": "nagios", 
+            "hive_metastore": "c6402.ambari.apache.org",  
             "security_enabled": "true", 
             "proxyuser_group": "users", 
             "namenode_formatted_mark_dir": "/var/run/hadoop/hdfs/namenode/formatted/", 
@@ -259,8 +253,6 @@
             "jtnode_heapsize": "1024m", 
             "yarn_user": "yarn", 
             "gmond_user": "nobody", 
-            "nagios_web_login": "nagiosadmin", 
-            "nagios_contact": "q@q.q", 
             "snamenode_primary_name": "nn", 
             "hdfs_user": "hdfs", 
             "oozie_database_type": "derby", 
@@ -288,7 +280,6 @@
             "user_group": "hadoop", 
             "hive_user": "hive", 
             "webHCat_http_primary_name": "HTTP", 
-            "nagios_web_password": "!`\"' 1", 
             "smokeuser": "ambari-qa", 
             "ganglia_conf_dir": "/etc/ganglia/hdp", 
             "hbase_master_heapsize": "1024m", 
@@ -591,9 +582,6 @@
             "c6401.ambari.apache.org", 
             "c6402.ambari.apache.org"
         ], 
-        "nagios_server_host": [
-            "c6402.ambari.apache.org"
-        ], 
         "hive_metastore_hosts": [
             "c6402.ambari.apache.org"
         ], 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/python/stacks/1.3.2/hooks/before-ANY/test_before_any.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/hooks/before-ANY/test_before_any.py b/ambari-server/src/test/python/stacks/1.3.2/hooks/before-ANY/test_before_any.py
index 836274a..16aa939 100644
--- a/ambari-server/src/test/python/stacks/1.3.2/hooks/before-ANY/test_before_any.py
+++ b/ambari-server/src/test/python/stacks/1.3.2/hooks/before-ANY/test_before_any.py
@@ -45,9 +45,6 @@ class TestHookBeforeInstall(RMFTestCase):
     self.assertResourceCalled('Group', 'users',
         ignore_failures = False,
     )
-    self.assertResourceCalled('Group', 'nagios',
-        ignore_failures = False,
-    )
     self.assertResourceCalled('User', 'hive',
         gid = 'hadoop',
         ignore_failures = False,
@@ -63,11 +60,6 @@ class TestHookBeforeInstall(RMFTestCase):
         ignore_failures = False,
         groups = [u'nobody'],
     )
-    self.assertResourceCalled('User', 'nagios',
-        gid = 'nagios',
-        ignore_failures = False,
-        groups = [u'hadoop'],
-    )
     self.assertResourceCalled('User', 'ambari-qa',
         gid = 'hadoop',
         ignore_failures = False,


[17/17] ambari git commit: AMBARI-8276 - Alerts: Remove Nagios Service From The Stack (jonathanhurley)

Posted by jo...@apache.org.
AMBARI-8276 - Alerts: Remove Nagios Service From The Stack (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e4ededeb
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e4ededeb
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e4ededeb

Branch: refs/heads/trunk
Commit: e4ededeb69dde413b35a6400197b23a889b3963d
Parents: 0bf6672
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Tue Nov 11 11:03:37 2014 -0500
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Tue Nov 11 15:42:45 2014 -0500

----------------------------------------------------------------------
 .../system_action_definitions.xml               |   10 -
 .../custom_actions/nagios_update_ignore.py      |  101 --
 .../custom_actions/validate_configs.py          |    7 -
 .../src/main/resources/properties.json          |    1 -
 .../src/main/resources/role_command_order.json  |   14 +-
 .../0.8/blueprints/multinode-default.json       |    8 -
 .../0.8/blueprints/singlenode-default.json      |    8 -
 .../0.8/hooks/before-INSTALL/scripts/params.py  |    6 -
 .../0.8/hooks/before-START/scripts/params.py    |    2 -
 .../stacks/BIGTOP/0.8/role_command_order.json   |   13 +-
 .../services/GANGLIA/package/files/gmondLib.sh  |    1 -
 .../0.8/services/HDFS/package/scripts/params.py |    4 -
 .../NAGIOS/configuration/nagios-env.xml         |   53 -
 .../BIGTOP/0.8/services/NAGIOS/metainfo.xml     |  160 --
 .../NAGIOS/package/files/check_aggregate.php    |  248 ----
 .../NAGIOS/package/files/check_ambari_alerts.py |   80 -
 .../package/files/check_checkpoint_time.py      |  112 --
 .../services/NAGIOS/package/files/check_cpu.php |  109 --
 .../services/NAGIOS/package/files/check_cpu.pl  |  114 --
 .../NAGIOS/package/files/check_cpu_ha.php       |  116 --
 .../package/files/check_datanode_storage.php    |  100 --
 .../NAGIOS/package/files/check_hdfs_blocks.php  |  102 --
 .../package/files/check_hdfs_capacity.php       |  109 --
 .../files/check_hive_metastore_status.sh        |   45 -
 .../NAGIOS/package/files/check_hue_status.sh    |   31 -
 .../files/check_mapred_local_dir_used.sh        |   34 -
 .../package/files/check_name_dir_status.php     |   93 --
 .../NAGIOS/package/files/check_namenodes_ha.sh  |   83 --
 .../package/files/check_nodemanager_health.sh   |   45 -
 .../NAGIOS/package/files/check_oozie_status.sh  |   45 -
 .../NAGIOS/package/files/check_rpcq_latency.php |  104 --
 .../package/files/check_rpcq_latency_ha.php     |  115 --
 .../package/files/check_templeton_status.sh     |   46 -
 .../NAGIOS/package/files/check_webui.sh         |  103 --
 .../NAGIOS/package/files/check_webui_ha.sh      |   64 -
 .../NAGIOS/package/files/check_wrapper.sh       |   94 --
 .../package/files/hdp_mon_nagios_addons.conf    |   24 -
 .../NAGIOS/package/files/hdp_nagios_init.php    |   81 --
 .../NAGIOS/package/files/nagios_alerts.php      |  513 -------
 .../services/NAGIOS/package/files/sys_logger.py |  186 ---
 .../NAGIOS/package/scripts/functions.py         |   47 -
 .../services/NAGIOS/package/scripts/nagios.py   |  109 --
 .../NAGIOS/package/scripts/nagios_server.py     |  111 --
 .../package/scripts/nagios_server_config.py     |   98 --
 .../NAGIOS/package/scripts/nagios_service.py    |  103 --
 .../services/NAGIOS/package/scripts/params.py   |  287 ----
 .../NAGIOS/package/scripts/status_params.py     |   29 -
 .../NAGIOS/package/templates/contacts.cfg.j2    |  109 --
 .../package/templates/hadoop-commands.cfg.j2    |  166 ---
 .../package/templates/hadoop-hostgroups.cfg.j2  |   33 -
 .../package/templates/hadoop-hosts.cfg.j2       |   53 -
 .../templates/hadoop-servicegroups.cfg.j2       |  119 --
 .../package/templates/hadoop-services.cfg.j2    |  804 ----------
 .../NAGIOS/package/templates/nagios.cfg.j2      | 1365 -----------------
 .../NAGIOS/package/templates/nagios.conf.j2     |   84 --
 .../services/NAGIOS/package/templates/nagios.j2 |  164 ---
 .../NAGIOS/package/templates/resource.cfg.j2    |   51 -
 .../stacks/BIGTOP/0.8/services/stack_advisor.py |    4 +-
 .../1.3.2/hooks/before-ANY/scripts/params.py    |    6 -
 .../hooks/before-INSTALL/scripts/params.py      |    6 -
 .../1.3.2/hooks/before-START/scripts/params.py  |    2 -
 .../stacks/HDP/1.3.2/role_command_order.json    |   15 +-
 .../services/GANGLIA/package/files/gmondLib.sh  |    1 -
 .../services/HDFS/package/scripts/params.py     |    4 -
 .../NAGIOS/configuration/nagios-env.xml         |   53 -
 .../HDP/1.3.2/services/NAGIOS/metainfo.xml      |  125 --
 .../NAGIOS/package/files/check_aggregate.php    |  247 ----
 .../services/NAGIOS/package/files/check_cpu.php |  109 --
 .../services/NAGIOS/package/files/check_cpu.pl  |  114 --
 .../NAGIOS/package/files/check_cpu_ha.php       |  116 --
 .../package/files/check_datanode_storage.php    |  100 --
 .../NAGIOS/package/files/check_hdfs_blocks.php  |  102 --
 .../package/files/check_hdfs_capacity.php       |  109 --
 .../files/check_hive_metastore_status.sh        |   45 -
 .../package/files/check_hive_thrift_port.py     |   72 -
 .../NAGIOS/package/files/check_hue_status.sh    |   31 -
 .../files/check_mapred_local_dir_used.sh        |   34 -
 .../package/files/check_name_dir_status.php     |   93 --
 .../NAGIOS/package/files/check_namenodes_ha.sh  |   83 --
 .../package/files/check_nodemanager_health.sh   |   45 -
 .../NAGIOS/package/files/check_oozie_status.sh  |   45 -
 .../NAGIOS/package/files/check_rpcq_latency.php |  104 --
 .../package/files/check_templeton_status.sh     |   46 -
 .../NAGIOS/package/files/check_webui.sh         |   89 --
 .../NAGIOS/package/files/hdp_nagios_init.php    |   81 --
 .../services/NAGIOS/package/files/mm_wrapper.py |  334 -----
 .../NAGIOS/package/scripts/functions.py         |   47 -
 .../services/NAGIOS/package/scripts/nagios.py   |   94 --
 .../NAGIOS/package/scripts/nagios_server.py     |  103 --
 .../package/scripts/nagios_server_config.py     |   95 --
 .../NAGIOS/package/scripts/nagios_service.py    |   69 -
 .../services/NAGIOS/package/scripts/params.py   |  161 ---
 .../NAGIOS/package/scripts/status_params.py     |   26 -
 .../NAGIOS/package/templates/contacts.cfg.j2    |  109 --
 .../package/templates/hadoop-commands.cfg.j2    |  147 --
 .../package/templates/hadoop-hostgroups.cfg.j2  |   52 -
 .../package/templates/hadoop-hosts.cfg.j2       |   54 -
 .../templates/hadoop-servicegroups.cfg.j2       |  105 --
 .../package/templates/hadoop-services.cfg.j2    |  613 --------
 .../NAGIOS/package/templates/nagios.cfg.j2      | 1368 ------------------
 .../NAGIOS/package/templates/nagios.conf.j2     |   81 --
 .../services/NAGIOS/package/templates/nagios.j2 |  165 ---
 .../NAGIOS/package/templates/resource.cfg.j2    |   70 -
 .../stacks/HDP/1.3.2/services/stack_advisor.py  |    2 +-
 .../stacks/HDP/1.3.3/role_command_order.json    |   15 +-
 .../stacks/HDP/1.3/role_command_order.json      |   15 +-
 .../HDP/2.0.6.GlusterFS/role_command_order.json |   14 +-
 .../2.0.6/hooks/before-ANY/scripts/params.py    |    6 -
 .../hooks/before-INSTALL/scripts/params.py      |    6 -
 .../2.0.6/hooks/before-START/scripts/params.py  |    2 -
 .../stacks/HDP/2.0.6/role_command_order.json    |   14 +-
 .../services/GANGLIA/package/files/gmondLib.sh  |    1 -
 .../services/HDFS/package/scripts/params.py     |    4 -
 .../NAGIOS/configuration/nagios-env.xml         |   53 -
 .../HDP/2.0.6/services/NAGIOS/metainfo.xml      |  163 ---
 .../NAGIOS/package/files/check_aggregate.php    |  248 ----
 .../NAGIOS/package/files/check_ambari_alerts.py |   80 -
 .../package/files/check_checkpoint_time.py      |  123 --
 .../services/NAGIOS/package/files/check_cpu.php |  109 --
 .../services/NAGIOS/package/files/check_cpu.pl  |  114 --
 .../NAGIOS/package/files/check_cpu_ha.php       |  116 --
 .../package/files/check_datanode_storage.php    |  100 --
 .../NAGIOS/package/files/check_hdfs_blocks.php  |  102 --
 .../package/files/check_hdfs_capacity.php       |  109 --
 .../files/check_hive_metastore_status.sh        |   45 -
 .../package/files/check_hive_thrift_port.py     |   72 -
 .../NAGIOS/package/files/check_hue_status.sh    |   31 -
 .../files/check_mapred_local_dir_used.sh        |   34 -
 .../package/files/check_name_dir_status.php     |   93 --
 .../NAGIOS/package/files/check_namenodes_ha.sh  |   83 --
 .../package/files/check_nodemanager_health.sh   |   45 -
 .../NAGIOS/package/files/check_oozie_status.sh  |   45 -
 .../NAGIOS/package/files/check_rpcq_latency.php |  104 --
 .../package/files/check_rpcq_latency_ha.php     |  115 --
 .../package/files/check_templeton_status.sh     |   46 -
 .../NAGIOS/package/files/check_webui.sh         |  103 --
 .../NAGIOS/package/files/check_webui_ha.sh      |   64 -
 .../package/files/hdp_mon_nagios_addons.conf    |   24 -
 .../NAGIOS/package/files/hdp_nagios_init.php    |   81 --
 .../services/NAGIOS/package/files/mm_wrapper.py |  335 -----
 .../NAGIOS/package/files/nagios_alerts.php      |  515 -------
 .../services/NAGIOS/package/files/sys_logger.py |  197 ---
 .../NAGIOS/package/scripts/functions.py         |   47 -
 .../services/NAGIOS/package/scripts/nagios.py   |  109 --
 .../NAGIOS/package/scripts/nagios_server.py     |  111 --
 .../package/scripts/nagios_server_config.py     |   99 --
 .../NAGIOS/package/scripts/nagios_service.py    |  103 --
 .../services/NAGIOS/package/scripts/params.py   |  363 -----
 .../NAGIOS/package/scripts/status_params.py     |   29 -
 .../NAGIOS/package/templates/contacts.cfg.j2    |  109 --
 .../package/templates/hadoop-commands.cfg.j2    |  166 ---
 .../package/templates/hadoop-hostgroups.cfg.j2  |   33 -
 .../package/templates/hadoop-hosts.cfg.j2       |   53 -
 .../templates/hadoop-servicegroups.cfg.j2       |  128 --
 .../package/templates/hadoop-services.cfg.j2    |  869 -----------
 .../NAGIOS/package/templates/nagios.cfg.j2      | 1365 -----------------
 .../NAGIOS/package/templates/nagios.conf.j2     |   84 --
 .../services/NAGIOS/package/templates/nagios.j2 |  164 ---
 .../NAGIOS/package/templates/resource.cfg.j2    |   51 -
 .../stacks/HDP/2.0.6/services/stack_advisor.py  |    2 +-
 .../stacks/HDP/2.0/role_command_order.json      |   14 +-
 .../blueprints/multinode-default.json           |    8 -
 .../blueprints/singlenode-default.json          |    8 -
 .../HDP/2.1.GlusterFS/role_command_order.json   |   13 +-
 .../HDP/2.1/blueprints/multinode-default.json   |    8 -
 .../HDP/2.1/blueprints/singlenode-default.json  |    8 -
 .../stacks/HDP/2.1/role_command_order.json      |   14 +-
 .../stacks/HDP/2.1/services/NAGIOS/metainfo.xml |   40 -
 .../stacks/HDP/2.1/services/stack_advisor.py    |    2 +-
 .../stacks/HDP/2.2/role_command_order.json      |   14 +-
 .../server/agent/TestHeartbeatMonitor.java      |   85 +-
 .../server/api/services/AmbariMetaInfoTest.java |   29 +-
 .../ComponentSSLConfigurationTest.java          |   26 +-
 .../AmbariManagementControllerTest.java         |  240 +--
 .../ganglia/GangliaPropertyProviderTest.java    |  209 +--
 .../GangliaReportPropertyProviderTest.java      |   24 +-
 .../GSInstallerComponentProviderTest.java       |    9 +-
 .../GSInstallerHostComponentProviderTest.java   |    9 +-
 .../GSInstallerServiceProviderTest.java         |   41 +-
 .../internal/HttpPropertyProviderTest.java      |   49 +-
 .../controller/internal/RequestImplTest.java    |   10 +-
 .../nagios/NagiosPropertyProviderTest.java      |  584 --------
 .../server/metadata/RoleCommandOrderTest.java   |   44 +-
 .../ambari/server/metadata/RoleGraphTest.java   |   95 +-
 .../ambari/server/stack/StackManagerTest.java   |   51 +-
 .../server/stageplanner/TestStagePlanner.java   |    7 +-
 .../stacks/1.3.2/NAGIOS/test_mm_wrapper.py      |  549 -------
 .../stacks/1.3.2/NAGIOS/test_nagios_server.py   |  282 ----
 .../1.3.2/configs/default.hbasedecom.json       |   11 -
 .../python/stacks/1.3.2/configs/default.json    |   15 +-
 .../1.3.2/configs/default.non_gmetad_host.json  |   11 -
 .../stacks/1.3.2/configs/default_client.json    |   11 -
 .../python/stacks/1.3.2/configs/secured.json    |   28 +-
 .../stacks/1.3.2/configs/secured_client.json    |   36 +-
 .../1.3.2/configs/secured_no_jce_name.json      |   18 +-
 .../1.3.2/hooks/before-ANY/test_before_any.py   |    8 -
 .../stacks/2.0.6/NAGIOS/test_mm_wrapper.py      |  549 -------
 .../stacks/2.0.6/NAGIOS/test_nagios_server.py   |  315 ----
 .../2.0.6/configs/default.hbasedecom.json       |    3 -
 .../python/stacks/2.0.6/configs/default.json    |   15 +-
 .../2.0.6/configs/default.non_gmetad_host.json  |    3 -
 .../stacks/2.0.6/configs/default_client.json    |   11 -
 .../python/stacks/2.0.6/configs/flume_22.json   |   15 +-
 .../stacks/2.0.6/configs/flume_target.json      |    8 -
 .../python/stacks/2.0.6/configs/ha_default.json |   14 +-
 .../python/stacks/2.0.6/configs/ha_secured.json |   14 +-
 .../python/stacks/2.0.6/configs/secured.json    |   13 -
 .../stacks/2.0.6/configs/secured_client.json    |   13 -
 .../2.0.6/configs/secured_no_jce_name.json      |   22 +-
 .../2.0.6/hooks/before-ANY/test_before_any.py   |    8 -
 .../test/python/stacks/2.1/common/services.json |   81 +-
 .../stacks/2.1/common/test_stack_advisor.py     |    2 +-
 .../test/python/stacks/2.1/configs/default.json |   16 -
 .../test/python/stacks/2.1/configs/secured.json |   13 -
 .../python/stacks/2.2/common/1/services.json    |   81 +-
 .../python/stacks/2.2/common/2/services.json    |   84 +-
 .../multinode-default.json                      |    2 +-
 .../resources/api_testscripts/curl-addnagios.sh |   21 -
 .../curl-setup-multiple-hbase-master.sh         |    9 +-
 ambari-server/src/test/resources/deploy_HDP2.sh |    2 +-
 .../src/test/resources/gsInstaller-hosts.txt    |    1 -
 .../src/test/resources/nagios_alerts.txt        |  605 --------
 .../HDP/1.2.0/services/NAGIOS/metainfo.xml      |   95 --
 .../services/NAGIOS/configuration/global.xml    |   50 -
 .../HDP/1.3.0/services/NAGIOS/metainfo.xml      |   95 --
 .../HDP/1.3.1/services/NAGIOS/metainfo.xml      |   95 --
 .../HDP/1.3.4/services/NAGIOS/metainfo.xml      |   94 --
 .../HDP/2.0.1/services/NAGIOS/metainfo.xml      |   90 --
 .../HDP/2.0.5/services/NAGIOS/metainfo.xml      |   89 --
 .../stacks/HDP/2.0.6/role_command_order.json    |   14 +-
 .../HDP/2.0.6/services/NAGIOS/metainfo.xml      |  139 --
 .../stacks/HDP/2.0.7/role_command_order.json    |   14 +-
 .../HDP/2.0.7/services/NAGIOS/metainfo.xml      |  136 --
 .../stacks/HDP/2.0.8/role_command_order.json    |   14 +-
 .../HDP/2.0.8/services/FAKENAGIOS/metainfo.xml  |   51 +
 .../stacks/HDP/2.1.1/role_command_order.json    |   14 +-
 .../stacks/OTHER/1.0/role_command_order.json    |   14 +-
 .../OTHER/1.0/role_command_order.json           |   14 +-
 ambari-server/src/test/resources/test_api.sh    |    6 -
 .../src/test/resources/test_multnode_api.sh     |    6 -
 .../nagios/conf.d/hdp_mon_nagios_addons.conf    |    7 -
 .../addOns/nagios/plugins/check_aggregate.php   |  195 ---
 .../src/addOns/nagios/plugins/check_hadoop.sh   |   96 --
 .../src/addOns/nagios/plugins/check_hbase.sh    |   91 --
 .../addOns/nagios/plugins/check_hdfs_blocks.php |   72 -
 .../nagios/plugins/check_hdfs_capacity.php      |   68 -
 .../plugins/check_hive_metastore_status.sh      |   32 -
 .../nagios/plugins/check_name_dir_status.php    |   59 -
 .../addOns/nagios/plugins/check_oozie_status.sh |   35 -
 .../nagios/plugins/check_rpcq_latency.php       |   67 -
 .../src/addOns/nagios/plugins/check_webui.sh    |   73 -
 .../src/addOns/nagios/plugins/sys_logger.py     |  197 ---
 .../src/addOns/nagios/scripts/nagios_alerts.php |  513 -------
 253 files changed, 495 insertions(+), 26373 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/custom_action_definitions/system_action_definitions.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/custom_action_definitions/system_action_definitions.xml b/ambari-server/src/main/resources/custom_action_definitions/system_action_definitions.xml
index c65a496..cd5d5f7 100644
--- a/ambari-server/src/main/resources/custom_action_definitions/system_action_definitions.xml
+++ b/ambari-server/src/main/resources/custom_action_definitions/system_action_definitions.xml
@@ -20,16 +20,6 @@
 
 <actionDefinitions>
   <actionDefinition>
-    <actionName>nagios_update_ignore</actionName>
-    <actionType>SYSTEM</actionType>
-    <inputs>[nagios_ignore]</inputs>
-    <targetService>NAGIOS</targetService>
-    <targetComponent>NAGIOS_SERVER</targetComponent>
-    <defaultTimeout>60</defaultTimeout>
-    <description>Used to create an alert blackout</description>
-    <targetType>ANY</targetType>
-  </actionDefinition>
-  <actionDefinition>
     <actionName>check_host</actionName>
     <actionType>SYSTEM</actionType>
     <inputs></inputs>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/custom_actions/nagios_update_ignore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/custom_actions/nagios_update_ignore.py b/ambari-server/src/main/resources/custom_actions/nagios_update_ignore.py
deleted file mode 100644
index b7026e2..0000000
--- a/ambari-server/src/main/resources/custom_actions/nagios_update_ignore.py
+++ /dev/null
@@ -1,101 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import json
-import sys
-#import traceback
-from resource_management import *
-
-
-class NagiosIgnore(Script):
-  def actionexecute(self, env):
-    config = Script.get_config()
-
-    ignores = None
-
-    structured_output_example = {
-      'result': 'Ignore table updated.'
-    }
-
-    try:
-      if (config.has_key('passiveInfo')):
-        ignores = config['passiveInfo']
-      else:
-        structured_output_example['result'] = "Key 'passiveInfo' not found, skipping"
-        Logger.info("Key 'passiveInfo' was not found, skipping")
-        self.put_structured_out(structured_output_example)
-    except Exception:
-      structured_output_example['result'] = "Error accessing passiveInfo"
-      self.put_structured_out(structured_output_example)
-      Logger.debug("Error accessing passiveInfo")
-      return
-
-    if ignores is None:
-      Logger.info("Nothing to do - maintenance info was not provided")
-      return
-    
-    new_file_entries = []
-
-    if ignores is not None:
-      for define in ignores:
-        try:
-          host = str(define['host'])
-          service = str(define['service'])
-          component = str(define['component'])
-          key = host + " " + service + " " + component
-          Logger.info("found entry for host=" + host +
-            ", service=" + service +
-            ", component=" + component)
-
-          new_file_entries.append(key)
-        except KeyError:
-          Logger.debug("Could not load host, service, or component for " + str(define))
-          pass
-
-    writeFile(new_file_entries)
-
-    self.put_structured_out(structured_output_example)
-
-def writeFile(entries):
-  buf = ""
-  for entry in entries:
-    buf += entry + "\n"
-
-  f = None
-  try:
-    f = open('/var/nagios/ignore.dat', 'w')
-    f.write(buf)
-    if 0 == len(entries):
-      Logger.info("Cleared all entries from '/var/nagios/ignore.dat'")
-    elif 1 == len(entries):
-      Logger.info("Persisted '/var/nagios/ignore.dat' with 1 entry")
-    else:
-      Logger.info("Persisted '/var/nagios/ignore.dat' with " + str(len(entries)) + " entries")
-  except:
-    Logger.info("Could not open '/var/nagios/ignore.dat' to update")
-    pass
-  finally:
-    if f is not None:
-      f.close()
-
-if __name__ == "__main__":
-  NagiosIgnore().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/custom_actions/validate_configs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/custom_actions/validate_configs.py b/ambari-server/src/main/resources/custom_actions/validate_configs.py
index c245dbb..4529539 100644
--- a/ambari-server/src/main/resources/custom_actions/validate_configs.py
+++ b/ambari-server/src/main/resources/custom_actions/validate_configs.py
@@ -134,7 +134,6 @@ PROPERTIES_TO_CHECK = {
   "HCAT": {
     "hive-env": ["hcat_log_dir", "hcat_pid_dir"]
   },
-  #NAGIOS - no directories to check
   #OOZIE
   "OOZIE_SERVER": {
     "oozie-env": ["oozie_data_dir", "oozie_log_dir", "oozie_pid_dir"]
@@ -301,12 +300,6 @@ USERS_TO_GROUP_MAPPING = {
       "hive_user": "hive_user"
     }
   },
-  #NAGIOS
-  "NAGIOS_SERVER": {
-    "nagios-env": {
-      "nagios_user": "nagios_group"
-    }
-  },
   #OOZIE
   "OOZIE_SERVER": {
     "oozie-env": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/properties.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/properties.json b/ambari-server/src/main/resources/properties.json
index 36cff96..ad59922 100644
--- a/ambari-server/src/main/resources/properties.json
+++ b/ambari-server/src/main/resources/properties.json
@@ -72,7 +72,6 @@
         "HostRoles/desired_stack_id",
         "HostRoles/actual_configs",
         "params/run_smoke_test",
-        "HostRoles/nagios_alerts",
         "HostRoles/stale_configs",
         "HostRoles/desired_admin_state",
         "HostRoles/maintenance_state",

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/role_command_order.json b/ambari-server/src/main/resources/role_command_order.json
index c45ba07..1404ef6 100644
--- a/ambari-server/src/main/resources/role_command_order.json
+++ b/ambari-server/src/main/resources/role_command_order.json
@@ -3,8 +3,6 @@
   "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
   "general_deps" : {
     "_comment" : "dependencies for all cases",
-    "NAGIOS_SERVER-INSTALL" : ["HIVE_CLIENT-INSTALL", "HCAT-INSTALL",
-        "MAPREDUCE_CLIENT-INSTALL", "OOZIE_CLIENT-INSTALL"],
     "HBASE_MASTER-START": ["ZOOKEEPER_SERVER-START"],
     "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
     "OOZIE_SERVER-START": ["JOBTRACKER-START", "TASKTRACKER-START"],
@@ -13,12 +11,6 @@
     "HIVE_SERVER-START": ["TASKTRACKER-START", "MYSQL_SERVER-START"],
     "HUE_SERVER-START": ["HIVE_SERVER-START", "HCAT-START", "OOZIE_SERVER-START"],
     "FLUME_HANDLER-START": ["OOZIE_SERVER-START"],
-    "NAGIOS_SERVER-START": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START",
-        "GANGLIA_SERVER-START", "GANGLIA_MONITOR-START", "HCAT-START",
-        "HIVE_SERVER-START", "HIVE_METASTORE-START", "HUE_SERVER-START",
-        "JOBTRACKER-START", "TASKTRACKER-START", "ZOOKEEPER_SERVER-START",
-        "MYSQL_SERVER-START", "OOZIE_SERVER-START", "PIG-START", "SQOOP-START",
-        "WEBHCAT_SERVER-START", "FLUME_HANDLER-START"],
     "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"],
     "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START"],
     "WEBHCAT_SERVICE_CHECK-SERVICE_CHECK": ["WEBHCAT_SERVER-START"],
@@ -48,8 +40,7 @@
     "WEBHCAT_SERVER-UPGRADE" : ["OOZIE_CLIENT-UPGRADE"],
     "PIG-UPGRADE" : ["WEBHCAT_SERVER-UPGRADE"],
     "SQOOP-UPGRADE" : ["PIG-UPGRADE"],
-    "NAGIOS_SERVER-UPGRADE" : ["SQOOP-UPGRADE"],
-    "GANGLIA_SERVER-UPGRADE" : ["NAGIOS_SERVER-UPGRADE"],
+    "GANGLIA_SERVER-UPGRADE" : ["SQOOP-UPGRADE"],
     "GANGLIA_MONITOR-UPGRADE" : ["GANGLIA_SERVER-UPGRADE"]
   },
   "_comment" : "GLUSTERFS-specific dependencies",
@@ -71,8 +62,6 @@
     "TASKTRACKER-START": ["NAMENODE-START", "DATANODE-START"],
     "HIVE_SERVER-START": ["DATANODE-START"],
     "WEBHCAT_SERVER-START": ["DATANODE-START"],
-    "NAGIOS_SERVER-START": ["NAMENODE-START", "SECONDARY_NAMENODE-START",
-        "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START", "HISTORYSERVER-START"],
     "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START",
         "SECONDARY_NAMENODE-START"],
     "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START",
@@ -93,7 +82,6 @@
   "namenode_optional_ha": {
     "NAMENODE-START": ["JOURNALNODE-START", "ZOOKEEPER_SERVER-START"],
     "ZKFC-START": ["NAMENODE-START"],
-    "NAGIOS_SERVER-START": ["ZKFC-START", "JOURNALNODE-START"],
     "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["ZKFC-START"]
   },
   "_comment" : "Dependencies that are used in ResourceManager HA cluster",

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/blueprints/multinode-default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/blueprints/multinode-default.json b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/blueprints/multinode-default.json
index 642fcfa..de4be19 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/blueprints/multinode-default.json
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/blueprints/multinode-default.json
@@ -1,10 +1,5 @@
 {
     "configurations" : [
-        {
-            "nagios-env" : {
-                "nagios_contact" : "admin@localhost"
-            }
-        }
     ],
     "host_groups" : [
         {
@@ -135,9 +130,6 @@
                     "name" : "AMBARI_SERVER"
                 },
                 {
-                    "name" : "NAGIOS_SERVER"
-                },
-                {
                     "name" : "ZOOKEEPER_CLIENT"
                 },
                 {

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/blueprints/singlenode-default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/blueprints/singlenode-default.json b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/blueprints/singlenode-default.json
index 3c769dd..c6b916b 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/blueprints/singlenode-default.json
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/blueprints/singlenode-default.json
@@ -1,10 +1,5 @@
 {
     "configurations" : [
-        {
-            "nagios-env" : {
-                "nagios_contact" : "admin@localhost"
-            }
-        }
     ],
     "host_groups" : [
         {
@@ -86,9 +81,6 @@
                     "name" : "FALCON_CLIENT"
                 },
                 {
-                    "name" : "NAGIOS_SERVER"
-                },
-                {
                     "name" : "SECONDARY_NAMENODE"
                 },
                 {

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/scripts/params.py
index 01789a7..38d3137 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/scripts/params.py
@@ -28,7 +28,6 @@ tmp_dir = Script.get_tmp_dir()
 
 #users and groups
 hbase_user = config['configurations']['hbase-env']['hbase_user']
-nagios_user = config['configurations']['nagios-env']['nagios_user']
 smoke_user =  config['configurations']['cluster-env']['smokeuser']
 gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
 gmond_user = config['configurations']['ganglia-env']["gmond_user"]
@@ -36,14 +35,12 @@ tez_user = config['configurations']['tez-env']["tez_user"]
 
 user_group = config['configurations']['cluster-env']['user_group']
 proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
-nagios_group = config['configurations']['nagios-env']['nagios_group']
 
 #hosts
 hostname = config["hostname"]
 ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
 rm_host = default("/clusterHostInfo/rm_host", [])
 slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
 oozie_servers = default("/clusterHostInfo/oozie_server", [])
 hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
 hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
@@ -61,7 +58,6 @@ has_namenode = not len(namenode_host) == 0
 has_hs = not len(hs_host) == 0
 has_resourcemanager = not len(rm_host) == 0
 has_slaves = not len(slave_hosts) == 0
-has_nagios = not len(hagios_server_hosts) == 0
 has_oozie_server = not len(oozie_servers)  == 0
 has_hcat_server_host = not len(hcat_server_hosts)  == 0
 has_hive_server_host = not len(hive_server_host)  == 0
@@ -111,8 +107,6 @@ if has_tez:
   user_to_groups_dict[tez_user] = [proxyuser_group]
 
 user_to_gid_dict = collections.defaultdict(lambda:user_group)
-if has_nagios:
-  user_to_gid_dict[nagios_user] = nagios_group
 
 user_list = json.loads(config['hostLevelParams']['user_list'])
 group_list = json.loads(config['hostLevelParams']['group_list'])

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/params.py
index ac8f1c8..ebcaaec 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/params.py
@@ -38,7 +38,6 @@ hostname = config["hostname"]
 ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
 rm_host = default("/clusterHostInfo/rm_host", [])
 slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
 oozie_servers = default("/clusterHostInfo/oozie_server", [])
 hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
 hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
@@ -52,7 +51,6 @@ ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
 has_namenode = not len(namenode_host) == 0
 has_resourcemanager = not len(rm_host) == 0
 has_slaves = not len(slave_hosts) == 0
-has_nagios = not len(hagios_server_hosts) == 0
 has_oozie_server = not len(oozie_servers)  == 0
 has_hcat_server_host = not len(hcat_server_hosts)  == 0
 has_hive_server_host = not len(hive_server_host)  == 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/role_command_order.json b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/role_command_order.json
index 69fcdac..25611d1 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/role_command_order.json
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/role_command_order.json
@@ -3,8 +3,6 @@
   "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
   "general_deps" : {
     "_comment" : "dependencies for all cases",
-    "NAGIOS_SERVER-INSTALL" : ["HIVE_CLIENT-INSTALL", "HCAT-INSTALL",
-        "MAPREDUCE_CLIENT-INSTALL", "OOZIE_CLIENT-INSTALL"],
     "HBASE_MASTER-START": ["ZOOKEEPER_SERVER-START"],
     "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
     "OOZIE_SERVER-START": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
@@ -13,12 +11,6 @@
     "HIVE_SERVER-START": ["NODEMANAGER-START", "MYSQL_SERVER-START", "POSTGRESQL_SERVER-START"],
     "HUE_SERVER-START": ["HIVE_SERVER-START", "HCAT-START", "OOZIE_SERVER-START"],
     "FLUME_HANDLER-START": ["OOZIE_SERVER-START"],
-    "NAGIOS_SERVER-START": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START",
-        "GANGLIA_SERVER-START", "GANGLIA_MONITOR-START", "HCAT-START",
-        "HIVE_SERVER-START", "HIVE_METASTORE-START", "HUE_SERVER-START",
-        "ZOOKEEPER_SERVER-START", "NODEMANAGER-START", "RESOURCEMANAGER-START",
-        "MYSQL_SERVER-START", "POSTGRESQL_SERVER-START", "OOZIE_SERVER-START", "PIG-START", "SQOOP-START",
-        "WEBHCAT_SERVER-START", "FLUME_HANDLER-START"],
     "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
     "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START", "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK"],
     "WEBHCAT_SERVICE_CHECK-SERVICE_CHECK": ["WEBHCAT_SERVER-START"],
@@ -47,8 +39,6 @@
     "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START"],
     "HIVE_SERVER-START": ["DATANODE-START"],
     "WEBHCAT_SERVER-START": ["DATANODE-START"],
-    "NAGIOS_SERVER-START": ["NAMENODE-START", "SECONDARY_NAMENODE-START",
-        "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START", "HISTORYSERVER-START"],
     "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START",
         "SECONDARY_NAMENODE-START"],
     "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START",
@@ -65,8 +55,7 @@
   "_comment" : "Dependencies that are used in HA NameNode cluster",
   "namenode_optional_ha": {
     "NAMENODE-START": ["ZKFC-START", "JOURNALNODE-START", "ZOOKEEPER_SERVER-START"],
-    "ZKFC-START": ["ZOOKEEPER_SERVER-START"],
-    "NAGIOS_SERVER-START": ["ZKFC-START", "JOURNALNODE-START"]
+    "ZKFC-START": ["ZOOKEEPER_SERVER-START"]
   },
   "_comment" : "Dependencies that are used in ResourceManager HA cluster",
   "resourcemanager_optional_ha" : {

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/GANGLIA/package/files/gmondLib.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/GANGLIA/package/files/gmondLib.sh b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/GANGLIA/package/files/gmondLib.sh
index e7ea83f..d06afd8 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/GANGLIA/package/files/gmondLib.sh
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/GANGLIA/package/files/gmondLib.sh
@@ -160,7 +160,6 @@ host {
  *
  * At the very least, every gmond must expose its XML state to 
  * queriers from localhost.
- * Also we use this port for Nagios monitoring
  */
 tcp_accept_channel {
   bind = 0.0.0.0

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py
index 83b2ed9..0946d84 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py
@@ -42,7 +42,6 @@ kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/
 hostname = config["hostname"]
 rm_host = default("/clusterHostInfo/rm_host", [])
 slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
 oozie_servers = default("/clusterHostInfo/oozie_server", [])
 hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
 hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
@@ -63,7 +62,6 @@ has_resourcemanager = not len(rm_host) == 0
 has_histroryserver = not len(hs_host) == 0
 has_hbase_masters = not len(hbase_master_hosts) == 0
 has_slaves = not len(slave_hosts) == 0
-has_nagios = not len(hagios_server_hosts) == 0
 has_oozie_server = not len(oozie_servers)  == 0
 has_hcat_server_host = not len(hcat_server_hosts)  == 0
 has_hive_server_host = not len(hive_server_host)  == 0
@@ -85,7 +83,6 @@ if has_ganglia_server:
 #users and groups
 yarn_user = config['configurations']['yarn-env']['yarn_user']
 hbase_user = config['configurations']['hbase-env']['hbase_user']
-nagios_user = config['configurations']['nagios-env']['nagios_user']
 oozie_user = config['configurations']['oozie-env']['oozie_user']
 webhcat_user = config['configurations']['hive-env']['hcat_user']
 hcat_user = config['configurations']['hive-env']['hcat_user']
@@ -97,7 +94,6 @@ hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_nam
 
 user_group = config['configurations']['cluster-env']['user_group']
 proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
-nagios_group = config['configurations']['nagios-env']['nagios_group']
 
 #hadoop params
 hadoop_conf_dir = "/etc/hadoop/conf"

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/configuration/nagios-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/configuration/nagios-env.xml b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/configuration/nagios-env.xml
deleted file mode 100644
index fad8374..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/configuration/nagios-env.xml
+++ /dev/null
@@ -1,53 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>nagios_user</name>
-    <value>nagios</value>
-    <property-type>USER</property-type>
-    <description>Nagios Username.</description>
-  </property>
-  <property>
-    <name>nagios_group</name>
-    <value>nagios</value>
-    <property-type>GROUP</property-type>
-    <description>Nagios Group.</description>
-  </property>
-  <property>
-    <name>nagios_web_login</name>
-    <value>nagiosadmin</value>
-    <description>Nagios web user.</description>
-  </property>
-  <property require-input = "true">
-    <name>nagios_web_password</name>
-    <value></value>
-    <property-type>PASSWORD</property-type>
-    <description>Nagios Admin Password.</description>
-  </property>
-  <property require-input = "true">
-    <name>nagios_contact</name>
-    <value></value>
-    <description>Hadoop Admin Email.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/metainfo.xml b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/metainfo.xml
deleted file mode 100644
index bebc7d6..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/metainfo.xml
+++ /dev/null
@@ -1,160 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>NAGIOS</name>
-      <displayName>Nagios</displayName>
-      <comment>Nagios Monitoring and Alerting system</comment>
-      <version>3.5.0</version>
-      <components>
-        <component>
-           <name>NAGIOS_SERVER</name>
-          <displayName>Nagios Server</displayName>
-           <category>MASTER</category>
-           <cardinality>1</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>OOZIE/OOZIE_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>YARN/YARN_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>HCATALOG/HCAT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-           <commandScript>
-             <script>scripts/nagios_server.py</script>
-             <scriptType>PYTHON</scriptType>
-             <timeout>600</timeout>
-           </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>perl</name>
-            </package>
-            <package>
-              <name>fping</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>ubuntu12</osFamily>
-          <packages>
-            <package>
-              <name>nagios3</name>
-            </package>
-            <package>
-              <name>nagios3-common</name>
-            </package>
-            <package>
-              <name>nagios3-dbg</name>
-            </package>
-            <package>
-              <name>nagios3-doc</name>
-            </package>
-            <package>
-              <name>nagios-plugins-extra</name>
-            </package>
-            <package>
-              <name>php5-curl</name>
-            </package>
-            <package>
-              <name>libapache2-mod-php5</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>redhat5,redhat6,suse11</osFamily>
-          <packages>
-            <package>
-              <name>nagios-plugins-1.4.9</name>
-            </package>
-            <package>
-              <name>nagios-3.5.0-99</name>
-            </package>
-            <package>
-              <name>nagios-www-3.5.0-99</name>
-            </package>
-            <package>
-              <name>nagios-devel-3.5.0-99</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>suse11</osFamily>
-          <packages>
-            <package>
-              <name>php5*-json</name>
-            </package>
-            <package>
-              <name>apache2?mod_php*</name>
-            </package>
-            <package>
-              <name>php-curl</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>redhat5</osFamily>
-          <packages>
-            <package>
-              <name>php-pecl-json.x86_64</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <configuration-dependencies>
-        <config-type>nagios-env</config-type>
-      </configuration-dependencies>
-      <monitoringService>true</monitoringService>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_aggregate.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_aggregate.php b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_aggregate.php
deleted file mode 100644
index 792b25b..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_aggregate.php
+++ /dev/null
@@ -1,248 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-define("PASSIVE_MODE_STR", "AMBARIPASSIVE=");
-
-  $options = getopt ("f:s:n:w:c:t:");
-  if (!array_key_exists('t', $options) || !array_key_exists('f', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options) || !array_key_exists('s', $options)) {
-    usage();
-    exit(3);
-  }
-  $status_file=$options['f'];
-  $status_code=$options['s'];
-  $type=$options['t'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  if ($type == "service" && !array_key_exists('n', $options)) {
-    echo "Service description not provided -n option\n";
-    exit(3);
-  }
-  if ($type == "service") {
-    $service_name=$options['n'];
-    /* echo "DESC: " . $service_name . "\n"; */
-  }
-
-  $result = array();
-  $status_file_content = file_get_contents($status_file);
-
-  $counts;
-  if ($type == "service") {
-    $counts=query_alert_count($status_file_content, $service_name, $status_code);
-  } else {
-    $counts=query_host_count($status_file_content, $status_code);
-  }
-
-  if ($counts['total'] == 0) {
-    $percent = 0;
-  } else {
-    $percent = ($counts['actual']/$counts['total'])*100;
-  }
-  if ($percent >= $crit) {
-    echo "CRITICAL: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
-    exit (2);
-  }
-  if ($percent >= $warn) {
-    echo "WARNING: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
-    exit (1);
-  }
-  echo "OK: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
-  exit(0);
-
-
-  # Functions
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -f <status_file_path> -t type(host/service) -s <status_codes> -n <service description> -w <warn%> -c <crit%>\n";
-  }
-
-  /* Query host count */
-  function query_host_count ($status_file_content, $status_code) {
-    $num_matches = preg_match_all("/hoststatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    $hostcounts_object = array ();
-    $total_hosts = 0;
-    $hosts = 0;
-    foreach ($matches[0] as $object) {
-      $total_hosts++;
-      if (getParameter($object, "current_state") == $status_code) {
-        $hosts++;
-      }
-    }
-    $hostcounts_object['total'] = $total_hosts;
-    $hostcounts_object['actual'] = $hosts;
-    return $hostcounts_object;
-  }
-
-  /* Query Alert counts */
-  function query_alert_count ($status_file_content, $service_name, $status_code) {
-    $num_matches = preg_match_all("/servicestatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    $alertcounts_objects = array ();
-    $total_alerts=0;
-    $alerts=0;
-    foreach ($matches[0] as $object) {
-      $long_out = getParameter($object, "long_plugin_output");
-      $skip_if_match=!strncmp($long_out, PASSIVE_MODE_STR, strlen(PASSIVE_MODE_STR));
-
-      if (getParameter($object, "service_description") == $service_name && !$skip_if_match) {
-        $total_alerts++;
-        if (getParameter($object, "current_state") >= $status_code) {
-          $alerts++;
-        }
-      }
-    }
-    $alertcounts_objects['total'] = $total_alerts;
-    $alertcounts_objects['actual'] = $alerts;
-    return $alertcounts_objects;
-  }
-
-  function get_service_type($service_description)
-  {
-    $pieces = explode("::", $service_description);
-    switch ($pieces[0]) {
-      case "NAMENODE":
-        $pieces[0] = "HDFS";
-        break;
-      case "JOBTRACKER":
-        $pieces[0] = "MAPREDUCE";
-        break;
-      case "HBASEMASTER":
-        $pieces[0] = "HBASE";
-        break;
-      case "SYSTEM":
-      case "HDFS":
-      case "MAPREDUCE":
-      case "HBASE":
-      case "STORM":
-        break;
-      default:
-        $pieces[0] = "UNKNOWN";
-    }
-    return $pieces[0];
-  }
-
-  function getParameter($object, $key)
-  {
-    $pattern="/\s" . $key . "[\s= ]*([\S, ]*)\n/";
-    $num_mat = preg_match($pattern, $object, $matches);
-    $value = "";
-    if ($num_mat) {
-      $value = $matches[1];
-    }
-    return $value;
-  }
-
-function indent($json) {
-
-    $result      = '';
-    $pos         = 0;
-    $strLen      = strlen($json);
-    $indentStr   = '  ';
-    $newLine     = "\n";
-    $prevChar    = '';
-    $outOfQuotes = true;
-
-    for ($i=0; $i<=$strLen; $i++) {
-
-        // Grab the next character in the string.
-        $char = substr($json, $i, 1);
-
-        // Are we inside a quoted string?
-        if ($char == '"' && $prevChar != '\\') {
-            $outOfQuotes = !$outOfQuotes;
-
-        // If this character is the end of an element,
-        // output a new line and indent the next line.
-        } else if(($char == '}' || $char == ']') && $outOfQuotes) {
-            $result .= $newLine;
-            $pos --;
-            for ($j=0; $j<$pos; $j++) {
-                $result .= $indentStr;
-            }
-        }
-
-        // Add the character to the result string.
-        $result .= $char;
-
-        // If the last character was the beginning of an element,
-        // output a new line and indent the next line.
-        if (($char == ',' || $char == '{' || $char == '[') && $outOfQuotes) {
-            $result .= $newLine;
-            if ($char == '{' || $char == '[') {
-                $pos ++;
-            }
-
-            for ($j = 0; $j < $pos; $j++) {
-                $result .= $indentStr;
-            }
-        }
-
-        $prevChar = $char;
-    }
-
-    return $result;
-}
-
-/* JSON documment format */
-/*
-{
-  "programstatus":{
-    "last_command_check":"1327385743"
-  },
-  "hostcounts":{
-    "up_nodes":"",
-    "down_nodes":""
-  },
-  "hoststatus":[
-    {
-      "host_name"="ip-10-242-191-48.ec2.internal",
-      "current_state":"0",
-      "last_hard_state":"0",
-      "plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms",
-      "last_check":"1327385564",
-      "current_attempt":"1",
-      "last_hard_state_change":"1327362079",
-      "last_time_up":"1327385574",
-      "last_time_down":"0",
-      "last_time_unreachable":"0",
-      "is_flapping":"0",
-      "last_check":"1327385574",
-      "servicestatus":[
-      ]
-    }
-  ],
-  "servicestatus":[
-    {
-      "service_type":"HDFS",  {HBASE, MAPREDUCE, HIVE, ZOOKEEPER}
-      "service_description":"HDFS Current Load",
-      "host_name"="ip-10-242-191-48.ec2.internal",
-      "current_attempt":"1",
-      "current_state":"0",
-      "plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms",
-      "last_hard_state_change":"1327362079",
-      "last_time_ok":"1327385479",
-      "last_time_warning":"0",
-      "last_time_unknown":"0",
-      "last_time_critical":"0",
-      "last_check":"1327385574",
-      "is_flapping":"0"
-    }
-  ]
-}
-*/
-
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_ambari_alerts.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_ambari_alerts.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_ambari_alerts.py
deleted file mode 100644
index 833a798..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_ambari_alerts.py
+++ /dev/null
@@ -1,80 +0,0 @@
-#!/usr/bin/env python
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-import os
-import optparse
-import json
-import traceback
-
-def main():
-
-  parser = optparse.OptionParser()
-
-  parser.add_option("-H", "--host", dest="host", default="localhost", help="NameNode host")
-  parser.add_option("-n", "--name", dest="alert_name", help="Alert name to check")
-  parser.add_option("-f", "--file", dest="alert_file", help="File containing the alert structure")
-
-  (options, args) = parser.parse_args()
-
-  if options.alert_name is None:
-    print "Alert name is required (--name or -n)"
-    exit(-1)
-
-  if options.alert_file is None:
-    print "Alert file is required (--file or -f)"
-    exit(-1)
-
-  if not os.path.exists(options.alert_file):
-    print "Status is unreported"
-    exit(3)
-
-  try:
-    with open(options.alert_file, 'r') as f:
-      data = json.load(f)
-
-      buf_list = []
-      exit_code = 0
-
-      for_hosts = data[options.alert_name]
-      if for_hosts.has_key(options.host):
-        for host_entry in for_hosts[options.host]:
-          buf_list.append(host_entry['text'])
-          alert_state = host_entry['state']
-          if alert_state == 'CRITICAL' and exit_code < 2:
-            exit_code = 2
-          elif alert_state == 'WARNING' and exit_code < 1:
-            exit_code = 1
-
-      if 0 == len(buf_list):
-        print "Status is not reported"
-        exit(3)
-      else:
-        print ", ".join(buf_list)
-        exit(exit_code)
-      
-  except Exception:
-    traceback.print_exc()
-    exit(3)
-
-if __name__ == "__main__":
-  main()
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_checkpoint_time.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_checkpoint_time.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_checkpoint_time.py
deleted file mode 100644
index ab889d1..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_checkpoint_time.py
+++ /dev/null
@@ -1,112 +0,0 @@
-#!/usr/bin/env python
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-import os
-import optparse
-import time
-import urllib2
-import json
-
-CRIT_MESSAGE = "CRITICAL: Last checkpoint time is below acceptable. Checkpoint was done {h}h. {m}m. ago"
-WARNING_MESSAGE = "WARNING: Last checkpoint time is below acceptable. Checkpoint was done {h}h. {m}m. ago"
-OK_MESSAGE = "OK: Last checkpoint time"
-WARNING_JMX_MESSAGE = "WARNING: NameNode JMX not accessible"
-
-def main():
-
-  current_time = int(round(time.time() * 1000))
-
-  parser = optparse.OptionParser()
-
-  parser.add_option("-H", "--host", dest="host",
-                    default="localhost", help="NameNode host")
-  parser.add_option("-p", "--port", dest="port",
-                    default="50070", help="NameNode jmx port")
-  parser.add_option("-w", "--warning", dest="warning",
-                    default="200", help="Percent for warning alert")
-  parser.add_option("-c", "--critical", dest="crit",
-                    default="200", help="Percent for critical alert")
-  parser.add_option("-t", "--period", dest="period",
-                    default="21600", help="Period time")
-  parser.add_option("-x", "--txns", dest="txns",
-                    default="1000000",
-                    help="CheckpointNode will create a checkpoint of the namespace every 'dfs.namenode.checkpoint.txns'")
-  (options, args) = parser.parse_args()
-
-  host = get_available_nn_host(options)
-
-  last_checkpoint_time_qry = "http://{host}:{port}/jmx?qry=Hadoop:service=NameNode,name=FSNamesystem".\
-    format(host=host, port=options.port)
-  last_checkpoint_time = int(get_value_from_jmx(last_checkpoint_time_qry,"LastCheckpointTime"))
-
-  journal_transaction_info_qry = "http://{host}:{port}/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo".\
-    format(host=host, port=options.port)
-  journal_transaction_info = get_value_from_jmx(journal_transaction_info_qry,"JournalTransactionInfo")
-  journal_transaction_info_dict = json.loads(journal_transaction_info)
-
-  last_txid = int(journal_transaction_info_dict['LastAppliedOrWrittenTxId'])
-  most_txid = int(journal_transaction_info_dict['MostRecentCheckpointTxId'])
-
-  delta = (current_time - last_checkpoint_time)/1000
-
-  if ((last_txid - most_txid) > int(options.txns)) and (float(delta) / int(options.period)*100 >= int(options.crit)):
-    print CRIT_MESSAGE.format(h=get_time(delta)['h'], m=get_time(delta)['m'])
-    exit(2)
-  elif ((last_txid - most_txid) > int(options.txns)) and (float(delta) / int(options.period)*100 >= int(options.warning)):
-    print WARNING_MESSAGE.format(h=get_time(delta)['h'], m=get_time(delta)['m'])
-    exit(1)
-  else:
-    print OK_MESSAGE
-    exit(0)
-
-def get_time(delta):
-  h = int(delta/3600)
-  m = int((delta % 3600)/60)
-  return {'h':h, 'm':m}
-
-def get_value_from_jmx(qry, property):
-  try:
-    response = urllib2.urlopen(qry)
-    data=response.read()
-  except Exception:
-    print WARNING_JMX_MESSAGE
-    exit(1)
-
-  data_dict = json.loads(data)
-  return (data_dict["beans"][0][property])
-
-def get_available_nn_host(options):
-  nn_hosts = options.host.split(" ")
-  for nn_host in nn_hosts:
-    try:
-      urllib2.urlopen("http://{host}:{port}/jmx".format(host=nn_host, port=options.port))
-      return nn_host
-    except Exception:
-      pass
-  print WARNING_JMX_MESSAGE
-  exit(1)
-
-if __name__ == "__main__":
-  main()
-
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_cpu.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_cpu.php b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_cpu.php
deleted file mode 100644
index 0744e38..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_cpu.php
+++ /dev/null
@@ -1,109 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:k:r:t:u:e");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $hosts=$options['h'];
-  $port=$options['p'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['u'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-
-  foreach (preg_split('/,/', $hosts) as $host) {
-    /* Get the json document */
-
-    $ch = curl_init();
-    $username = rtrim(`id -un`, "\n");
-    curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=java.lang:type=OperatingSystem",
-                                  CURLOPT_RETURNTRANSFER => true,
-                                  CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                  CURLOPT_USERPWD => "$username:",
-                                  CURLOPT_SSL_VERIFYPEER => FALSE ));
-    $json_string = curl_exec($ch);
-    $info = curl_getinfo($ch);
-    if (intval($info['http_code']) == 401){
-      logout();
-      $json_string = curl_exec($ch);
-    }
-    $info = curl_getinfo($ch);
-    curl_close($ch);
-    $json_array = json_decode($json_string, true);
-
-    $object = $json_array['beans'][0];
-
-    if (count($object) == 0) {
-      echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-      exit(2);
-    }
-
-    $cpu_load = $object['SystemCpuLoad'];
-
-    if (!isset($object['SystemCpuLoad']) || $cpu_load < 0.0) {
-      echo "WARNING: Data unavailable, SystemCpuLoad is not set\n";
-      exit(1);
-    }
-
-    $cpu_count = $object['AvailableProcessors'];
-
-    $cpu_percent = $cpu_load*100;
-  }
-
-  $out_msg = $cpu_count . " CPU, load " . number_format($cpu_percent, 1, '.', '') . '%';
-
-  if ($cpu_percent > $crit) {
-    echo $out_msg . ' > ' . $crit . "% : CRITICAL\n";
-    exit(2);
-  }
-  if ($cpu_percent > $warn) {
-    echo $out_msg . ' > ' . $warn . "% : WARNING\n";
-    exit(1);
-  }
-
-  echo $out_msg . ' < ' . $warn . "% : OK\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -k keytab_path -r principal_name -t kinit_path -u security_enabled -e ssl_enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_cpu.pl
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_cpu.pl b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_cpu.pl
deleted file mode 100644
index a5680f7..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_cpu.pl
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/usr/bin/perl -w 
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-use strict;
-use Net::SNMP;
-use Getopt::Long;
-
-# Variable
-my $base_proc = "1.3.6.1.2.1.25.3.3.1";   
-my $proc_load = "1.3.6.1.2.1.25.3.3.1.2"; 
-my $o_host = 	undef;
-my $o_community = undef;
-my $o_warn=	undef;
-my $o_crit=	undef;
-my $o_timeout = 15;
-my $o_port = 161;
-
-sub Usage {
-    print "Usage: $0 -H <host> -C <snmp_community> -w <warn level> -c <crit level>\n";
-}
-
-Getopt::Long::Configure ("bundling");
-GetOptions(
-  'H:s'   => \$o_host,	
-  'C:s'   => \$o_community,	
-  'c:s'   => \$o_crit,        
-  'w:s'   => \$o_warn
-          );
-if (!defined $o_host || !defined $o_community || !defined $o_crit || !defined $o_warn) {
-  Usage();
-  exit 3;
-}
-$o_warn =~ s/\%//g; 
-$o_crit =~ s/\%//g;
-alarm ($o_timeout);
-$SIG{'ALRM'} = sub {
- print "Unable to contact host: $o_host\n";
- exit 3;
-};
-
-# Connect to host
-my ($session,$error);
-($session, $error) = Net::SNMP->session(
-		-hostname  => $o_host,
-		-community => $o_community,
-		-port      => $o_port,
-		-timeout   => $o_timeout
-	  );
-if (!defined($session)) {
-   printf("Error opening session: %s.\n", $error);
-   exit 3;
-}
-
-my $exit_val=undef;
-my $resultat =  (Net::SNMP->VERSION < 4) ?
-	  $session->get_table($base_proc)
-	: $session->get_table(Baseoid => $base_proc);
-
-if (!defined($resultat)) {
-   printf("ERROR: Description table : %s.\n", $session->error);
-   $session->close;
-   exit 3;
-}
-
-$session->close;
-
-my ($cpu_used,$ncpu)=(0,0);
-foreach my $key ( keys %$resultat) {
-  if ($key =~ /$proc_load/) {
-    $cpu_used += $$resultat{$key};
-    $ncpu++;
-  }
-}
-
-if ($ncpu==0) {
-  print "Can't find CPU usage information : UNKNOWN\n";
-  exit 3;
-}
-
-$cpu_used /= $ncpu;
-
-print "$ncpu CPU, ", $ncpu==1 ? "load" : "average load";
-printf(" %.1f%%",$cpu_used);
-$exit_val=0;
-
-if ($cpu_used > $o_crit) {
- print " > $o_crit% : CRITICAL\n";
- $exit_val=2;
-} else {
-  if ($cpu_used > $o_warn) {
-   print " > $o_warn% : WARNING\n";
-   $exit_val=1;
-  }
-}
-print " < $o_warn% : OK\n" if ($exit_val eq 0);
-exit $exit_val;

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_cpu_ha.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_cpu_ha.php b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_cpu_ha.php
deleted file mode 100644
index 91a7c64..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_cpu_ha.php
+++ /dev/null
@@ -1,116 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:k:r:t:u:e");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $hosts=$options['h'];
-  $port=$options['p'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['u'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-  $jmx_response_available = false;
-  $jmx_response;
-
-  foreach (preg_split('/,/', $hosts) as $host) {
-    /* Get the json document */
-
-    $ch = curl_init();
-    $username = rtrim(`id -un`, "\n");
-    curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=java.lang:type=OperatingSystem",
-                                  CURLOPT_RETURNTRANSFER => true,
-                                  CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                  CURLOPT_USERPWD => "$username:",
-                                  CURLOPT_SSL_VERIFYPEER => FALSE ));
-    $json_string = curl_exec($ch);
-    $info = curl_getinfo($ch);
-    if (intval($info['http_code']) == 401){
-      logout();
-      $json_string = curl_exec($ch);
-    }
-    $info = curl_getinfo($ch);
-    curl_close($ch);
-    $json_array = json_decode($json_string, true);
-
-    $object = $json_array['beans'][0];
-
-    if (count($object) > 0) {
-      $jmx_response_available = true;
-      $jmx_response = $object;
-    }
-  }
-
-  if ($jmx_response_available === false) {
-    echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-    exit(2);
-  }
-
-  $cpu_load = $jmx_response['SystemCpuLoad'];
-
-  if (!isset($jmx_response['SystemCpuLoad']) || $cpu_load < 0.0) {
-    echo "WARNING: Data unavailable, SystemCpuLoad is not set\n";
-    exit(1);
-  }
-
-  $cpu_count = $jmx_response['AvailableProcessors'];
-
-  $cpu_percent = $cpu_load*100;
-
-  $out_msg = $cpu_count . " CPU, load " . number_format($cpu_percent, 1, '.', '') . '%';
-
-  if ($cpu_percent > $crit) {
-    echo $out_msg . ' > ' . $crit . "% : CRITICAL\n";
-    exit(2);
-  }
-  if ($cpu_percent > $warn) {
-    echo $out_msg . ' > ' . $warn . "% : WARNING\n";
-    exit(1);
-  }
-
-  echo $out_msg . ' < ' . $warn . "% : OK\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -k keytab_path -r principal_name -t kinit_path -u security_enabled -e ssl_enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_datanode_storage.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_datanode_storage.php b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_datanode_storage.php
deleted file mode 100644
index dee22b4..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_datanode_storage.php
+++ /dev/null
@@ -1,100 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * check the storage capacity remaining on local datanode storage
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:e:k:r:t:s:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options) 
-      || !array_key_exists('c', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['s'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-  /* Get the json document */
-  $ch = curl_init();
-  $username = rtrim(`id -un`, "\n");
-  curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=DataNode,name=FSDatasetState-*",
-                                CURLOPT_RETURNTRANSFER => true,
-                                CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                CURLOPT_USERPWD => "$username:",
-                                CURLOPT_SSL_VERIFYPEER => FALSE ));
-  $json_string = curl_exec($ch);
-  $info = curl_getinfo($ch);
-  if (intval($info['http_code']) == 401){
-    logout();
-    $json_string = curl_exec($ch);
-  }
-  $info = curl_getinfo($ch);
-  curl_close($ch);
-  $json_array = json_decode($json_string, true);
-  $object = $json_array['beans'][0];
-  $cap_remain = $object['Remaining']; /* Total capacity - any extenal files created in data directories by non-hadoop app */
-  $cap_total = $object['Capacity']; /* Capacity used by all data partitions minus space reserved for M/R */
-  if (count($object) == 0) {
-    echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-    exit(2);
-  }  
-  $percent_full = ($cap_total - $cap_remain)/$cap_total * 100;
-
-  $out_msg = "Capacity:[" . $cap_total . 
-             "], Remaining Capacity:[" . $cap_remain . 
-             "], percent_full:[" . $percent_full  . "]";
-  
-  if ($percent_full > $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($percent_full > $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_hdfs_blocks.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_hdfs_blocks.php b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_hdfs_blocks.php
deleted file mode 100644
index 3693aa0..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_hdfs_blocks.php
+++ /dev/null
@@ -1,102 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * check the corrupt or missing blocks % is > threshod
- * check_jmx -H hostaddress -p port -w 1% -c 1%
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:s:e:k:r:t:u:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('s', $options)) {
-    usage();
-    exit(3);
-  }
-  $hosts=$options['h'];
-  $port=$options['p'];
-  $nn_jmx_property=$options['s'];
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['u'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-
-  foreach (preg_split('/,/', $hosts) as $host) {
-    /* Get the json document */
-
-    $ch = curl_init();
-    $username = rtrim(`id -un`, "\n");
-    curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=".$nn_jmx_property,
-                                  CURLOPT_RETURNTRANSFER => true,
-                                  CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                  CURLOPT_USERPWD => "$username:",
-                                  CURLOPT_SSL_VERIFYPEER => FALSE ));
-    $json_string = curl_exec($ch);
-    $info = curl_getinfo($ch);
-    if (intval($info['http_code']) == 401){
-      logout();
-      $json_string = curl_exec($ch);
-    }
-    $info = curl_getinfo($ch);
-    curl_close($ch);
-    $json_array = json_decode($json_string, true);
-    $m_percent = 0;
-    $object = $json_array['beans'][0];
-    $missing_blocks = $object['MissingBlocks'];
-    $total_blocks = $object['BlocksTotal'];
-    if (count($object) == 0) {
-      echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-      exit(2);
-    }    
-    if($total_blocks == 0) {
-      $m_percent = 0;
-    } else {
-      $m_percent = ($missing_blocks/$total_blocks)*100;
-      break;
-    }
-  }
-  $out_msg = "missing_blocks:<" . $missing_blocks .
-             ">, total_blocks:<" . $total_blocks . ">";
-
-  if ($m_percent > 0) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -s <namenode bean name> -k keytab path -r principal name -t kinit path -u security enabled -e ssl enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_hdfs_capacity.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_hdfs_capacity.php b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_hdfs_capacity.php
deleted file mode 100644
index af72723..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_hdfs_capacity.php
+++ /dev/null
@@ -1,109 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * check the % HDFS capacity used >= warn and critical limits.
- * check_jmx -H hostaddress -p port -w 1 -c 1
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:e:k:r:t:s:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $hosts=$options['h'];
-  $port=$options['p'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['s'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-
-  foreach (preg_split('/,/', $hosts) as $host) {
-    /* Get the json document */
-    $ch = curl_init();
-    $username = rtrim(`id -un`, "\n");
-    curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=FSNamesystemState",
-                                  CURLOPT_RETURNTRANSFER => true,
-                                  CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                  CURLOPT_USERPWD => "$username:",
-                                  CURLOPT_SSL_VERIFYPEER => FALSE ));
-    $json_string = curl_exec($ch);
-    $info = curl_getinfo($ch);
-    if (intval($info['http_code']) == 401){
-      logout();
-      $json_string = curl_exec($ch);
-    }
-    $info = curl_getinfo($ch);
-    curl_close($ch);
-    $json_array = json_decode($json_string, true);
-    $percent = 0;
-    $object = $json_array['beans'][0];
-    $CapacityUsed = $object['CapacityUsed'];
-    $CapacityRemaining = $object['CapacityRemaining'];
-    if (count($object) == 0) {
-      echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-      exit(2);
-    }    
-    $CapacityTotal = $CapacityUsed + $CapacityRemaining;
-    if($CapacityTotal == 0) {
-      $percent = 0;
-    } else {
-      $percent = ($CapacityUsed/$CapacityTotal)*100;
-      break;
-    }
-  }
-  $out_msg = "DFSUsedGB:<" . round ($CapacityUsed/(1024*1024*1024),1) .
-             ">, DFSTotalGB:<" . round($CapacityTotal/(1024*1024*1024),1) . ">";
-
-  if ($percent >= $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($percent >= $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
-  }
-?>


[08/17] ambari git commit: AMBARI-8276 - Alerts: Remove Nagios Service From The Stack (jonathanhurley)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-commands.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
deleted file mode 100644
index c1a792c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
+++ /dev/null
@@ -1,166 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-{% if check_cpu_on %}
-# 'check_cpu' check remote cpu load
-define command {
-        command_name    check_cpu
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- php $USER1$/check_cpu.php -h $HOSTADDRESS$ -p $ARG1$ -w $ARG2$ -c $ARG3$ -e $ARG4$ -k $ARG5$ -r $ARG6$ -t $ARG7$ -u $ARG8$
-       }
-define command {
-        command_name    check_cpu_ha
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py or $ARG1$ -- php $USER1$/check_cpu_ha.php -h ^^ -p $ARG2$ -w $ARG3$ -c $ARG4$ -e $ARG5$ -k $ARG6$ -r $ARG7$ -t $ARG8$ -u $ARG9$
-       }
-{% endif %}
-
-# Check data node storage full 
-define command {
-        command_name    check_datanode_storage
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- php $USER1$/check_datanode_storage.php -h $HOSTADDRESS$ -p $ARG1$ -w $ARG2$ -c $ARG3$ -e $ARG4$ -k $ARG5$ -r $ARG6$ -t $ARG7$ -s $ARG8$
-       }
-
-define command{
-        command_name    check_hdfs_blocks
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py and $ARG1$ -- php $USER1$/check_hdfs_blocks.php -h ^^ -p $ARG2$ -s $ARG3$ -e $ARG4$ -k $ARG5$ -r $ARG6$ -t $ARG7$ -u $ARG8$
-       }
-
-define command{
-        command_name    check_hdfs_capacity
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py and $ARG1$ -- php $USER1$/check_hdfs_capacity.php -h ^^ -p $ARG2$ -w $ARG3$ -c $ARG4$ -e $ARG5$ -k $ARG6$ -r $ARG7$ -t $ARG8$ -s $ARG9$
-       }
-
-define command{
-        command_name    check_aggregate
-        command_line    php $USER1$/check_aggregate.php -f /var/nagios/status.dat -s 1 -t service -n $ARG1$ -w $ARG2$ -c $ARG3$
-       }
-
-define command{
-        command_name    check_rpcq_latency
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- php $USER1$/check_rpcq_latency.php -h $HOSTADDRESS$ -p $ARG2$ -n $ARG1$ -w $ARG3$ -c $ARG4$ -e $ARG5$ -k $ARG6$ -r $ARG7$ -t $ARG8$ -s $ARG9$
-       }
-
-define command{
-        command_name    check_rpcq_latency_ha
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py or $ARG1$ -- php $USER1$/check_rpcq_latency_ha.php -h ^^ -p $ARG3$ -n $ARG2$ -w $ARG4$ -c $ARG5$ -e $ARG6$ -k $ARG7$ -r $ARG8$ -t $ARG9$ -s $ARG10$
-       }
-
-define command{
-        command_name    check_nagios
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- $USER1$/check_nagios -e $ARG1$ -F $ARG2$ -C $ARG3$ 
-       }
-
-define command{
-        command_name    check_webui
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- $USER1$/check_webui.sh $ARG1$ $HOSTADDRESS$ $ARG2$
-       }
-
-define command{
-        command_name    check_webui_ha
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py and $ARG2$ -- $USER1$/check_webui_ha.sh $ARG1$ ^^ $ARG3$
-       }
-
-define command{
-        command_name    check_name_dir_status
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- php $USER1$/check_name_dir_status.php -h $HOSTADDRESS$ -p $ARG1$ -e $ARG2$ -k $ARG3$ -r $ARG4$ -t $ARG5$ -s $ARG6$
-       }
-
-define command{
-        command_name    check_oozie_status
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- $USER1$/check_oozie_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$
-       }
-
-define command{
-        command_name    check_templeton_status
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- $USER1$/check_templeton_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$
-       }
-
-define command{
-        command_name    check_hive_metastore_status
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- $USER1$/check_hive_metastore_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$
-       }
-define command{
-        command_name    check_hue_status
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- $USER1$/check_hue_status.sh
-       }
-
-define command{
-       command_name    check_mapred_local_dir_used_space
-       command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- $USER1$/check_mapred_local_dir_used.sh $ARG1$ $ARG2$
-       }
-
-define command{
-       command_name    check_namenodes_ha
-       command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- $USER1$/check_namenodes_ha.sh $ARG1$ $ARG2$
-       }
-
-define command{
-        command_name    check_nodemanager_health
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- $USER1$/check_nodemanager_health.sh $HOSTADDRESS$ $ARG1$
-       }
-
-define command{
-        command_name    host_sys_logger
-        command_line    $USER1$/sys_logger.py $HOSTSTATETYPE$ $HOSTATTEMPT$ $HOSTSTATE$ "Host::Ping" "Event Host=$HOSTADDRESS$($HOSTSTATE$), $HOSTOUTPUT$ $LONGHOSTOUTPUT$"
-       }
-
-define command{
-        command_name    service_sys_logger
-        command_line    $USER1$/sys_logger.py $SERVICESTATETYPE$ $SERVICEATTEMPT$ $SERVICESTATE$ "$SERVICEDESC$" "Event Host=$HOSTADDRESS$ Service Description=$SERVICEDESC$($SERVICESTATE$), $SERVICEOUTPUT$ $LONGSERVICEOUTPUT$"
-       }
-
-define command{
-        command_name check_tcp_wrapper
-        command_line  /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py and $HOSTADDRESS$ -- $USER1$/check_tcp -H ^^ -p $ARG1$ $ARG2$
-       }
-
-define command{
-        command_name check_checkpoint_time
-        command_line /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py or $ARG1$ -- /var/lib/ambari-agent/ambari-python-wrap $USER1$/check_checkpoint_time.py -H ^^ -p $ARG2$ -w $ARG3$ -c $ARG4$ -t $ARG5$ -x $ARG6$ -s $ARG7$
-       }
-
-define command{
-        command_name check_tcp_wrapper_sasl
-        command_line /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py and $HOSTADDRESS$ -- /var/lib/ambari-agent/ambari-python-wrap $USER1$/check_hive_thrift_port.py -H ^^ -p $ARG1$ $ARG2$
-       }
-
-define command{
-        command_name check_ambari
-        command_line /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- /var/lib/ambari-agent/ambari-python-wrap $USER1$/check_ambari_alerts.py -H $HOSTADDRESS$ -f $ARG1$ -n $ARG2$
-       }

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2
deleted file mode 100644
index 05c1252..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2
+++ /dev/null
@@ -1,33 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for name, hosts in hostgroup_defs.iteritems() %}
-{% if hosts %}
-define hostgroup {
-        hostgroup_name  {{name}}
-        alias           {{name}}
-        members         {{','.join(hosts)}}
-}
-{% endif %}
-{% endfor %}
-
-define hostgroup {
-        hostgroup_name  all-servers
-        alias           All Servers
-        members         {{','.join(all_hosts)}}
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2
deleted file mode 100644
index 8bcc980..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2
+++ /dev/null
@@ -1,53 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-{% for host in all_hosts %}
-define host {
-        alias                     {{host}}
-        host_name                 {{host}}
-        use                       {{host_template}}
-        address                   {{host}}
-        check_command             check_tcp_wrapper!{{all_ping_ports[loop.index-1]}}!-w 1 -c 1
-        check_interval            0.25
-        retry_interval            0.25
-        max_check_attempts        4
-        notifications_enabled     1
-        first_notification_delay  0     # Send notification soon after change in the hard state
-        notification_interval     0     # Send the notification once
-        notification_options      d,u,r
-}
-
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2
deleted file mode 100644
index a10fa80..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2
+++ /dev/null
@@ -1,128 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-
-
-{% if hostgroup_defs['namenode'] or 
-  hostgroup_defs['snamenode']  or
-  hostgroup_defs['slaves'] %}
-  {% if hostgroup_defs['namenode'] != None %}
-  define servicegroup {
-    servicegroup_name  HDFS
-    alias  HDFS Checks
-  }
-  {% endif %}
-{% endif %} 
-{%if hostgroup_defs['jobtracker'] or
-  hostgroup_defs['historyserver2']-%}
-define servicegroup {
-  servicegroup_name  MAPREDUCE2
-  alias  MAPREDUCE Checks
-}
-{% endif %}
-{%if hostgroup_defs['resourcemanager'] or
-  hostgroup_defs['nodemanagers'] %}
-define servicegroup {
-  servicegroup_name  YARN
-  alias  YARN Checks
-}
-{% endif %}
-{%if hostgroup_defs['hbasemasters'] %}
-define servicegroup {
-  servicegroup_name  HBASE
-  alias  HBASE Checks
-}
-{% endif %}
-{% if hostgroup_defs['oozie-server'] %}
-define servicegroup {
-  servicegroup_name  OOZIE
-  alias  OOZIE Checks
-}
-{% endif %}
-{% if hostgroup_defs['nagios-server'] %}
-define servicegroup {
-  servicegroup_name  NAGIOS
-  alias  NAGIOS Checks
-}
-{% endif %}
-{% if hostgroup_defs['ganglia-server'] %}
-define servicegroup {
-  servicegroup_name  GANGLIA
-  alias  GANGLIA Checks
-}
-{% endif %}
-{% if hostgroup_defs['hiveserver'] or hostgroup_defs['webhcat-server'] %}
-define servicegroup {
-  servicegroup_name  HIVE
-  alias  HIVE Checks
-}
-{% endif %}
-{% if hostgroup_defs['zookeeper-servers'] %}
-define servicegroup {
-  servicegroup_name  ZOOKEEPER
-  alias  ZOOKEEPER Checks
-}
-{% endif %}
-define servicegroup {
-  servicegroup_name  AMBARI
-  alias  AMBARI Checks
-}
-{% if hostgroup_defs['hue-server'] %}
-define servicegroup {
-  servicegroup_name  HUE
-  alias  HUE Checks
-}
-{% endif %}
-{% if hostgroup_defs['nimbus'] or
-  hostgroup_defs['drpc-server'] or
-  hostgroup_defs['storm_ui'] or
-  hostgroup_defs['supervisors'] or
-  hostgroup_defs['storm_rest_api']%}
-define servicegroup {
-  servicegroup_name  STORM
-  alias  STORM Checks
-}
-{% endif %}
-{% if hostgroup_defs['falcon-server'] %}
-define servicegroup {
-  servicegroup_name  FALCON
-  alias  FALCON Checks
-}
-{% endif %}
-
-{%if hostgroup_defs['flume-servers'] %}
-define servicegroup {
-  servicegroup_name  FLUME
-  alias  FLUME Checks
-}
-{% endif %}
-
-{%if hostgroup_defs['knox-gateway'] %}
-define servicegroup {
-  servicegroup_name  KNOX
-  alias  KNOX Checks
-}
-{% endif %}
-
-{%if hostgroup_defs['kafka-broker'] %}
-define servicegroup {
-  servicegroup_name  KAFKA
-  alias  KAFKA Checks
-}
-{% endif %}
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-services.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-services.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-services.cfg.j2
deleted file mode 100644
index f07de3a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-services.cfg.j2
+++ /dev/null
@@ -1,869 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-{# TODO: Look for { or } in created file #}
-# NAGIOS SERVER Check (status log update)
-{% if hostgroup_defs['nagios-server'] %}
-define service {
-        name                            hadoop-service
-        use                             generic-service
-        notification_options            w,u,c,r,f,s
-        first_notification_delay        0
-        notification_interval           0                 # Send the notification once
-        contact_groups                  admins
-        notifications_enabled           1
-        event_handler_enabled           1
-        register                        0
-}
-
-define service {        
-        hostgroup_name          nagios-server        
-        use                     hadoop-service
-        service_description     NAGIOS::Nagios status log freshness
-        servicegroups           NAGIOS
-        check_command           check_nagios!10!/var/nagios/status.dat!{{nagios_lookup_daemon_str}}
-        normal_check_interval   5
-        retry_check_interval    0.5
-        max_check_attempts      2
-}
-
-# NAGIOS SERVER HDFS Checks
-{% if hostgroup_defs['namenode'] != None %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::Percent DataNodes with space available
-        servicegroups           HDFS
-        check_command           check_aggregate!"DATANODE::DataNode space"!10%!30%
-        normal_check_interval   0.5
-        retry_check_interval    1 
-        max_check_attempts      1
-}
-
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::Percent DataNodes live
-        servicegroups           HDFS
-        check_command           check_aggregate!"DATANODE::DataNode process"!10%!30%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-{# used only for HDP2 #}
-{% if hostgroup_defs['namenode'] and hostgroup_defs['namenode'] != None and dfs_ha_enabled %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::NameNode HA Healthy
-        servicegroups           HDFS
-        check_command           check_namenodes_ha!$HOSTGROUPMEMBERS:namenode$!{{ namenode_port }}
-        _host_component         NAMENODE
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      5
-}
-{% endif %}
-
-# AMBARI AGENT Checks
-{% for hostname in all_hosts %}
-define service {
-        host_name	        {{ hostname }}
-        use                     hadoop-service
-        service_description     AMBARI::Ambari Agent process
-        servicegroups           AMBARI
-        check_command           check_tcp_wrapper!{{all_ping_ports[loop.index-1]}}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-
-{% endfor %}
-
-# NAGIOS SERVER ZOOKEEPER Checks
-{% if hostgroup_defs['zookeeper-servers'] %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     ZOOKEEPER::Percent ZooKeeper Servers live
-        servicegroups           ZOOKEEPER
-        check_command           check_aggregate!"ZOOKEEPER::ZooKeeper Server process"!35%!70%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-
-# NAGIOS SERVER HBASE Checks
-{% if hostgroup_defs['hbasemasters'] %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HBASE::Percent RegionServers live
-        servicegroups           HBASE
-        check_command           check_aggregate!"REGIONSERVER::RegionServer process"!10%!30%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-{% endif %}
-
-
-
-# GANGLIA SERVER Checks
-{% if hostgroup_defs['ganglia-server'] %}
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Server process
-        servicegroups           GANGLIA
-        check_command           check_tcp_wrapper!{{ ganglia_port }}!-w 1 -c 1
-        _host_component         GANGLIA_SERVER
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-
-{% if hostgroup_defs['namenode'] %}
-define service {
-        hostgroup_name	        ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for NameNode
-        servicegroups           GANGLIA
-        check_command           check_tcp_wrapper!{{ ganglia_collector_namenode_port }}!-w 1 -c 1
-        _host_component         GANGLIA_MONITOR
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endif %}
-
-{% if hostgroup_defs['hbasemasters'] %}
-define service {
-        hostgroup_name	        ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for HBase Master
-        servicegroups           GANGLIA
-        check_command           check_tcp_wrapper!{{ ganglia_collector_hbase_port }}!-w 1 -c 1
-        _host_component         GANGLIA_MONITOR
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endif %}
-
-{% if hostgroup_defs['resourcemanager'] %}
-define service {
-        hostgroup_name	        ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for ResourceManager
-        servicegroups           GANGLIA
-        check_command           check_tcp_wrapper!{{ ganglia_collector_rm_port }}!-w 1 -c 1
-        _host_component         GANGLIA_MONITOR
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endif %}
-
-{% if hostgroup_defs['historyserver2'] %}
-define service {
-        hostgroup_name	        ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for HistoryServer
-        servicegroups           GANGLIA
-        check_command           check_tcp_wrapper!{{ ganglia_collector_hs_port }}!-w 1 -c 1
-        _host_component         GANGLIA_MONITOR
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endif %}
-{% endif %}
-
-
-{% if hostgroup_defs['snamenode'] and hostgroup_defs['namenode'] != None %}
-# Secondary namenode checks
-define service {
-        hostgroup_name          snamenode
-        use                     hadoop-service
-        service_description     NAMENODE::Secondary NameNode process
-        servicegroups           HDFS
-        check_command           check_tcp_wrapper!{{ snamenode_port }}!-w 1 -c 1
-        _host_component         SECONDARY_NAMENODE
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-
-{% if hostgroup_defs['storm_ui'] %}
-# STORM UI Checks
-define service {
-        hostgroup_name          storm_ui
-        use                     hadoop-service
-        service_description     STORM_UI_SERVER::Storm UI on {{ hostgroup_defs['storm_ui'][0] }}
-        servicegroups           STORM
-        check_command           check_webui!storm_ui!{{ storm_ui_port }}
-        _host_component         STORM_UI_SERVER
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-{% endif %}
-
-{% if hostgroup_defs['storm_ui'] %}
-# STORM UI Checks
-define service {
-        hostgroup_name          storm_ui
-        use                     hadoop-service
-        service_description     STORM_UI_SERVER::Storm UI Server process
-        servicegroups           STORM
-        check_command           check_tcp_wrapper!{{ storm_ui_port }}!-w 1 -c 1
-        _host_component         STORM_UI_SERVER
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-
-{% if hostgroup_defs['nimbus'] %}
-# Nimbus Checks
-define service {
-        hostgroup_name          nimbus
-        use                     hadoop-service
-        service_description     NIMBUS::Nimbus process
-        servicegroups           STORM
-        check_command           check_tcp_wrapper!{{ nimbus_port }}!-w 1 -c 1
-        _host_component         NIMBUS
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-
-{% if hostgroup_defs['drpc-server'] %}
-# drpc Checks
-define service {
-        hostgroup_name          drpc-server
-        use                     hadoop-service
-        service_description     DRPC_SERVER::DRPC Server process
-        servicegroups           STORM
-        check_command           check_tcp_wrapper!{{ drpc_port }}!-w 1 -c 1
-        _host_component         DRPC_SERVER
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-
-{% if hostgroup_defs['storm_rest_api'] %}
-# Storm REST API Checks
-define service {
-        hostgroup_name          storm_rest_api
-        use                     hadoop-service
-        service_description     STORM_REST_API::Storm REST API Server process
-        servicegroups           STORM
-        check_command           check_tcp_wrapper!{{ storm_rest_api_port }}!-w 1 -c 1
-        _host_component         STORM_REST_API
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-
-# NAGIOS SERVER Supervisor Checks
-{% if hostgroup_defs['supervisors'] %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     SUPERVISOR::Percent Supervisors live
-        servicegroups           STORM
-        check_command           check_aggregate!"SUPERVISOR::Supervisors process"!10%!30%
-        _host_component         SUPERVISOR
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-
-define service {
-        hostgroup_name          supervisors
-        use                     hadoop-service
-        service_description     SUPERVISOR::Supervisors process
-        servicegroups           STORM
-        check_command           check_tcp_wrapper!{{ supervisor_port }}!-w 1 -c 1
-        _host_component         SUPERVISOR
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{%  endif %}
-
-{% if hostgroup_defs['namenode'] and hostgroup_defs['namenode'] != None %}
-# HDFS Checks
-{%  for namenode_hostname in namenode_host %}
-{# TODO: check if we can get rid of str, lower #}
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode edit logs directory status on {{ namenode_hostname }}
-        servicegroups           HDFS
-        check_command           check_name_dir_status!{{ namenode_port }}!{{ str(hdfs_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        _host_component         NAMENODE
-        normal_check_interval   0.5
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-{% if check_cpu_on %}
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode host CPU utilization on {{ namenode_hostname }}
-        servicegroups           HDFS
-        check_command           check_cpu!{{ namenode_port }}!200%!250%!{{ str(hdfs_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        _host_component         NAMENODE
-        normal_check_interval   5
-        retry_check_interval    2
-        max_check_attempts      5
-}
-{% endif %}
-
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode Web UI on {{ namenode_hostname }}
-        servicegroups           HDFS
-        check_command           check_webui!namenode!{{ namenode_port }}
-        _host_component         NAMENODE
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode process on {{ namenode_hostname }}
-        servicegroups           HDFS
-        check_command           check_tcp_wrapper!{{nn_ha_host_port_map[namenode_hostname]}}!-w 1 -c 1
-        _host_component         NAMENODE
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     HDFS::NameNode RPC latency on {{ namenode_hostname }}
-        servicegroups           HDFS
-        check_command           check_rpcq_latency!NameNode!{{ namenode_port }}!3000!5000!{{ str(hdfs_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        _host_component         NAMENODE
-        normal_check_interval   5
-        retry_check_interval    1
-        max_check_attempts      5
-}
-
-{%  endfor  %}
-
-define service {
-        host_name               {{namenode_host[0]}}
-        use                     hadoop-service
-        service_description     NAMENODE::Last checkpoint time
-        servicegroups           HDFS
-        check_command           check_checkpoint_time!{{ nn_hosts_string }}!{{ namenode_port }}!200!200!{{ dfs_namenode_checkpoint_period }}!{{dfs_namenode_checkpoint_txns}}!{{str(hdfs_ssl_enabled).lower()}}
-        _host_component         NAMENODE
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::Blocks health
-        servicegroups           HDFS
-        check_command           check_hdfs_blocks!$HOSTGROUPMEMBERS:namenode$!{{ namenode_port }}!{{ nn_metrics_property }}!{{ str(hdfs_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        _host_component         DATANODE
-        normal_check_interval   2
-        retry_check_interval    1
-        max_check_attempts      1
-}
-
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::HDFS capacity utilization
-        servicegroups           HDFS
-        check_command           check_hdfs_capacity!$HOSTGROUPMEMBERS:namenode$!{{ namenode_port }}!80%!90%!{{ str(hdfs_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        _host_component         DATANODE
-        normal_check_interval   2
-        retry_check_interval    1
-        max_check_attempts      1
-}
-
-{% endif %}
-
-{% if hostgroup_defs['resourcemanager'] %}
-# YARN::RESOURCEMANAGER Checks
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     RESOURCEMANAGER::ResourceManager Web UI
-        servicegroups           YARN
-        check_command           check_webui_ha!resourcemanager!{{ rm_hosts_in_str }}!{{ rm_port }}
-        _host_component         RESOURCEMANAGER
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-
-{% if check_cpu_on %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     RESOURCEMANAGER::ResourceManager CPU utilization
-        servicegroups           YARN
-        check_command           check_cpu_ha!{{ rm_hosts_in_str }}!{{ rm_port }}!200%!250%!{{ str(yarn_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        _host_component         RESOURCEMANAGER
-        normal_check_interval   5
-        retry_check_interval    2 
-        max_check_attempts      5
-}
-{% endif %}
-
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     RESOURCEMANAGER::ResourceManager RPC latency
-        servicegroups           YARN
-        check_command           check_rpcq_latency_ha!{{ rm_hosts_in_str }}!ResourceManager!{{ rm_port }}!3000!5000!{{ str(yarn_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        _host_component         RESOURCEMANAGER
-        normal_check_interval   5
-        retry_check_interval    1 
-        max_check_attempts      5
-}
-
-{%  for rm_host in _rm_host  %}
-define service {
-        host_name               {{ rm_host }}
-        use                     hadoop-service
-        service_description     RESOURCEMANAGER::ResourceManager process on {{ rm_host }}
-        servicegroups           YARN
-        check_command           check_tcp_wrapper!{{ rm_port }}!-w 1 -c 1
-        _host_component         RESOURCEMANAGER
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endfor %}
-{%  endif %}
-
-{% if hostgroup_defs['nodemanagers'] %}
-# YARN::NODEMANAGER Checks
-define service {
-        hostgroup_name          nodemanagers
-        use                     hadoop-service
-        service_description     NODEMANAGER::NodeManager process
-        servicegroups           YARN
-        check_command           check_tcp_wrapper!{{ nm_port }}!-w 1 -c 1
-        _host_component         NODEMANAGER
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-define service {
-        hostgroup_name          nodemanagers
-        use                     hadoop-service
-        service_description     NODEMANAGER::NodeManager health
-        servicegroups           YARN
-        check_command           check_nodemanager_health!{{ nm_port }}!{{ str(security_enabled).lower() }}!{{ str(yarn_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
-        _host_component         NODEMANAGER
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     NODEMANAGER::Percent NodeManagers live
-        servicegroups           YARN
-        check_command           check_aggregate!"NODEMANAGER::NodeManager process"!10%!30%
-        _host_component         NODEMANAGER
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{%  endif %}
-
-{% if hostgroup_defs['historyserver2'] %}
-# MAPREDUCE::JOBHISTORY Checks
-define service {
-        hostgroup_name          historyserver2
-        use                     hadoop-service
-        service_description     JOBHISTORY::HistoryServer Web UI
-        servicegroups           MAPREDUCE2
-        check_command           check_webui!historyserver2!{{ hs_port }}
-        _host_component         HISTORYSERVER
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-
-{% if check_cpu_on %}
-define service {
-        hostgroup_name          historyserver2
-        use                     hadoop-service
-        service_description     JOBHISTORY::HistoryServer CPU utilization
-        servicegroups           MAPREDUCE2
-        check_command           check_cpu!{{ hs_port }}!200%!250%!{{ str(mapreduce_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        _host_component         HISTORYSERVER
-        normal_check_interval   5
-        retry_check_interval    2 
-        max_check_attempts      5
-}
-{%  endif %}
-
-define service {
-        hostgroup_name          historyserver2
-        use                     hadoop-service
-        service_description     JOBHISTORY::HistoryServer RPC latency
-        servicegroups           MAPREDUCE2
-        check_command           check_rpcq_latency!JobHistoryServer!{{ hs_port }}!3000!5000!{{ str(mapreduce_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        _host_component         HISTORYSERVER
-        normal_check_interval   5
-        retry_check_interval    1 
-        max_check_attempts      5
-}
-
-define service {
-        hostgroup_name          historyserver2
-        use                     hadoop-service
-        service_description     JOBHISTORY::HistoryServer process
-        servicegroups           MAPREDUCE2
-        check_command           check_tcp_wrapper!{{ hs_port }}!-w 1 -c 1
-        _host_component         HISTORYSERVER
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-{%  endif %}
-
-{% if hostgroup_defs['journalnodes'] %}
-# Journalnode checks
-define service {
-        hostgroup_name          journalnodes
-        use                     hadoop-service
-        service_description     JOURNALNODE::JournalNode process
-        servicegroups           HDFS
-        check_command           check_tcp_wrapper!{{ journalnode_port }}!-w 1 -c 1
-        _host_component         JOURNALNODE
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-{% if dfs_ha_enabled %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::Percent JournalNodes live
-        servicegroups           HDFS
-        check_command           check_aggregate!"JOURNALNODE::JournalNode process"!33%!50%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-{% endif %}
-
-{% if hostgroup_defs['slaves'] and hostgroup_defs['namenode'] != None %}
-# HDFS::DATANODE Checks
-define service {
-        hostgroup_name          slaves
-        use                     hadoop-service
-        service_description     DATANODE::DataNode process
-        servicegroups           HDFS
-        check_command           check_tcp_wrapper!{{datanode_port}}!-w 1 -c 1
-        _host_component         DATANODE
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-define service {
-        hostgroup_name          slaves
-        use                     hadoop-service
-        service_description     DATANODE::DataNode space
-        servicegroups           HDFS
-        check_command           check_datanode_storage!{{ datanode_port }}!90%!90%!{{ str(hdfs_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        _host_component         DATANODE
-        normal_check_interval   2 
-        retry_check_interval    1
-        max_check_attempts      2
-}
-
-{% endif %}
-
-{% if hostgroup_defs['zookeeper-servers'] %}
-# ZOOKEEPER Checks
-define service {
-        hostgroup_name          zookeeper-servers
-        use                     hadoop-service
-        service_description     ZOOKEEPER::ZooKeeper Server process
-        servicegroups           ZOOKEEPER
-        check_command           check_tcp_wrapper!{{ clientPort }}!-w 1 -c 1
-        _host_component         ZOOKEEPER_SERVER
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-
-{% if hostgroup_defs['hbasemasters'] and hostgroup_defs['region-servers'] != None %}
-# HBASE::REGIONSERVER Checks
-define service {
-        hostgroup_name          region-servers
-        use                     hadoop-service
-        service_description     REGIONSERVER::RegionServer process
-        servicegroups           HBASE
-        check_command           check_tcp_wrapper!{{ hbase_rs_port }}!-w 1 -c 1
-        _host_component         HBASE_REGIONSERVER
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-{% if hostgroup_defs['hbasemasters'] %}
-{% if check_cpu_on %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HBASEMASTER::HBase Master CPU utilization
-        servicegroups           HBASE
-        check_command           check_cpu_ha!{{ hbase_master_hosts_in_str }}!{{ hbase_master_port }}!200%!250%!false!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        _host_component         HBASE_MASTER
-        normal_check_interval   5
-        retry_check_interval    2
-        max_check_attempts      5
-}
-{%  endif %}
-{%  endif %}
-
-{%  for hbasemaster in hbase_master_hosts  %}
-define service {
-        host_name               {{ hbasemaster }}
-        use                     hadoop-service
-        service_description     HBASEMASTER::HBase Master process on {{ hbasemaster }}
-        servicegroups           HBASE
-        check_command           check_tcp_wrapper!{{ hbase_master_rpc_port }}!-w 1 -c 1
-        _host_component         HBASE_MASTER
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endfor %}
-{% endif %}
-
-{% if hostgroup_defs['hiveserver'] %}
-# HIVE Metastore check
-define service {
-        hostgroup_name          hiveserver
-        use                     hadoop-service
-        service_description     HIVE-METASTORE::Hive Metastore process
-        servicegroups           HIVE
-        check_command           check_tcp_wrapper!{{ hive_metastore_port }}!-w 1 -c 1
-        _host_component         HIVE_METASTORE
-        normal_check_interval   0.5
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-# HIVE Server check
-define service {
-        hostgroup_name          hiveserver
-        use                     hadoop-service
-        service_description     HIVE-SERVER::HiveServer2 process
-        servicegroups           HIVE
-        check_command           check_tcp_wrapper_sasl!{{ hive_server_port }}!{{ '--security-enabled' if security_enabled else '' }}!-w 1 -c 1
-        _host_component         HIVE_SERVER
-        normal_check_interval   0.5
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-{% if hostgroup_defs['oozie-server'] %}
-# Oozie check
-define service {
-        hostgroup_name          oozie-server
-        use                     hadoop-service
-        service_description     OOZIE::Oozie Server status
-        servicegroups           OOZIE
-        {% if security_enabled %}
-        check_command           check_oozie_status!{{ oozie_server_port }}!{{ java64_home }}!true!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
-        {% else %}
-        check_command           check_oozie_status!{{ oozie_server_port }}!{{ java64_home }}!false
-        {% endif %}
-        _host_component         OOZIE_SERVER
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-{% endif %}
-{% if hostgroup_defs['webhcat-server'] %}
-# WEBHCAT check
-define service {
-        hostgroup_name          webhcat-server
-        use                     hadoop-service
-        service_description     WEBHCAT::WebHCat Server status
-        servicegroups           HIVE
-        {% if security_enabled %}
-        check_command           check_templeton_status!{{ templeton_port }}!v1!{{ str(security_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
-        {% else %}
-        check_command           check_templeton_status!{{ templeton_port }}!v1!false
-        {% endif %}
-        _host_component         WEBHCAT_SERVER
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-
-{% if hostgroup_defs['hue-server'] %}
-define service {
-        hostgroup_name          hue-server
-        use                     hadoop-service
-        service_description     HUE::Hue Server status
-        servicegroups           HUE
-        check_command           check_hue_status
-        _host_component         HUE
-        normal_check_interval   100
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-
-#FALCON checks
-{% if hostgroup_defs['falcon-server'] %}
-define service {
-        hostgroup_name          falcon-server
-        service_description     FALCON::Falcon Server process
-        servicegroups           FALCON
-        check_command           check_tcp_wrapper!{{ falcon_port }}!-w 1 -c 1
-        _host_component         FALCON_SERVER
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-define service {
-        hostgroup_name          falcon-server
-        service_description     FALCON::Falcon Server Web UI
-        servicegroups           FALCON
-        check_command           check_webui!falconserver!{{ falcon_port }}
-        _host_component         FALCON_SERVER
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-{% if hostgroup_defs['ats-servers'] %}
-define service {
-        hostgroup_name          ats-servers
-        use                     hadoop-service
-        service_description     APP_TIMELINE_SERVER::App Timeline Server process
-        servicegroups           YARN
-        check_command           check_tcp_wrapper!{{ ahs_port }}!-w 1 -c 1
-        _host_component         APP_TIMELINE_SERVER
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-
-{% if hostgroup_defs['flume-servers'] %}
-# FLUME Checks
-define service {
-        hostgroup_name          flume-servers
-        use                     hadoop-service
-        service_description     FLUME::Flume Agent process
-        servicegroups           FLUME
-        check_command           check_ambari!/var/nagios/ambari.json!flume_agent
-        _host_component         FLUME_HANDLER
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-
-{% if hostgroup_defs['knox-gateway'] %}
-# KNOX Checks
-define service {
-        hostgroup_name          knox-gateway
-        use                     hadoop-service
-        service_description     KNOX::Knox Gateway process
-        servicegroups           KNOX
-        check_command           check_tcp_wrapper!{{ knox_gateway_port }}!-w 1 -c 1
-        _host_component         KNOX_GATEWAY
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-
-{% if hostgroup_defs['kafka-broker'] %}
-# KNOX Checks
-define service {
-        hostgroup_name          kafka-broker
-        use                     hadoop-service
-        service_description     KAFKA::Kafka Broker process
-        servicegroups           KAFKA
-        check_command           check_tcp_wrapper!{{ kafka_broker_port }}!-w 1 -c 1
-        _host_component         KAFKA_BROKER
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-
-


[11/17] ambari git commit: AMBARI-8276 - Alerts: Remove Nagios Service From The Stack (jonathanhurley)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/nagios.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/nagios.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/nagios.cfg.j2
deleted file mode 100644
index 8fd948d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/nagios.cfg.j2
+++ /dev/null
@@ -1,1368 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-##############################################################################
-#
-# NAGIOS.CFG - Sample Main Config File for Nagios 3.2.3
-#
-# Read the documentation for more information on this configuration
-# file.  I've provided some comments here, but things may not be so
-# clear without further explanation.
-#
-# Last Modified: 12-14-2008
-#
-##############################################################################
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-# LOG FILE
-# This is the main log file where service and host events are logged
-# for historical purposes.  This should be the first option specified 
-# in the config file!!!
-
-log_file=/var/log/nagios/nagios.log
-
-
-
-# OBJECT CONFIGURATION FILE(S)
-# These are the object configuration files in which you define hosts,
-# host groups, contacts, contact groups, services, etc.
-# You can split your object definitions across several config files
-# if you wish (as shown below), or keep them all in a single config file.
-
-# You can specify individual object config files as shown below:
-cfg_file=/etc/nagios/objects/commands.cfg
-cfg_file=/etc/nagios/objects/contacts.cfg
-cfg_file=/etc/nagios/objects/timeperiods.cfg
-cfg_file=/etc/nagios/objects/templates.cfg
-
-# Definitions for monitoring the local (Linux) host
-#cfg_file=/etc/nagios/objects/localhost.cfg
-
-# Definitions for monitoring a Windows machine
-#cfg_file=/etc/nagios/objects/windows.cfg
-
-# Definitions for monitoring a router/switch
-#cfg_file=/etc/nagios/objects/switch.cfg
-
-# Definitions for monitoring a network printer
-#cfg_file=/etc/nagios/objects/printer.cfg
-
-# Definitions for hadoop servers
-cfg_file={{nagios_host_cfg}}
-cfg_file={{nagios_hostgroup_cfg}}
-cfg_file={{nagios_servicegroup_cfg}}
-cfg_file={{nagios_service_cfg}}
-cfg_file={{nagios_command_cfg}}
-
-
-# You can also tell Nagios to process all config files (with a .cfg
-# extension) in a particular directory by using the cfg_dir
-# directive as shown below:
-
-#cfg_dir=/etc/nagios/servers
-#cfg_dir=/etc/nagios/printers
-#cfg_dir=/etc/nagios/switches
-#cfg_dir=/etc/nagios/routers
-
-
-
-
-# OBJECT CACHE FILE
-# This option determines where object definitions are cached when
-# Nagios starts/restarts.  The CGIs read object definitions from 
-# this cache file (rather than looking at the object config files
-# directly) in order to prevent inconsistencies that can occur
-# when the config files are modified after Nagios starts.
-
-object_cache_file=/var/nagios/objects.cache
-
-
-
-# PRE-CACHED OBJECT FILE
-# This options determines the location of the precached object file.
-# If you run Nagios with the -p command line option, it will preprocess
-# your object configuration file(s) and write the cached config to this
-# file.  You can then start Nagios with the -u option to have it read
-# object definitions from this precached file, rather than the standard
-# object configuration files (see the cfg_file and cfg_dir options above).
-# Using a precached object file can speed up the time needed to (re)start 
-# the Nagios process if you've got a large and/or complex configuration.
-# Read the documentation section on optimizing Nagios to find our more
-# about how this feature works.
-
-precached_object_file=/var/nagios/objects.precache
-
-
-
-# RESOURCE FILE
-# This is an optional resource file that contains $USERx$ macro
-# definitions. Multiple resource files can be specified by using
-# multiple resource_file definitions.  The CGIs will not attempt to
-# read the contents of resource files, so information that is
-# considered to be sensitive (usernames, passwords, etc) can be
-# defined as macros in this file and restrictive permissions (600)
-# can be placed on this file.
-
-resource_file={{nagios_resource_cfg}}
-
-
-
-# STATUS FILE
-# This is where the current status of all monitored services and
-# hosts is stored.  Its contents are read and processed by the CGIs.
-# The contents of the status file are deleted every time Nagios
-#  restarts.
-
-status_file=/var/nagios/status.dat
-
-
-
-# STATUS FILE UPDATE INTERVAL
-# This option determines the frequency (in seconds) that
-# Nagios will periodically dump program, host, and 
-# service status data.
-
-status_update_interval=10
-
-
-
-# NAGIOS USER
-# This determines the effective user that Nagios should run as.  
-# You can either supply a username or a UID.
-
-nagios_user={{nagios_user}}
-
-
-
-# NAGIOS GROUP
-# This determines the effective group that Nagios should run as.  
-# You can either supply a group name or a GID.
-
-nagios_group={{nagios_group}}
-
-
-
-# EXTERNAL COMMAND OPTION
-# This option allows you to specify whether or not Nagios should check
-# for external commands (in the command file defined below).  By default
-# Nagios will *not* check for external commands, just to be on the
-# cautious side.  If you want to be able to use the CGI command interface
-# you will have to enable this.
-# Values: 0 = disable commands, 1 = enable commands
-
-check_external_commands=1
-
-
-
-# EXTERNAL COMMAND CHECK INTERVAL
-# This is the interval at which Nagios should check for external commands.
-# This value works of the interval_length you specify later.  If you leave
-# that at its default value of 60 (seconds), a value of 1 here will cause
-# Nagios to check for external commands every minute.  If you specify a
-# number followed by an "s" (i.e. 15s), this will be interpreted to mean
-# actual seconds rather than a multiple of the interval_length variable.
-# Note: In addition to reading the external command file at regularly 
-# scheduled intervals, Nagios will also check for external commands after
-# event handlers are executed.
-# NOTE: Setting this value to -1 causes Nagios to check the external
-# command file as often as possible.
-
-#command_check_interval=15s
-command_check_interval=-1
-
-
-
-# EXTERNAL COMMAND FILE
-# This is the file that Nagios checks for external command requests.
-# It is also where the command CGI will write commands that are submitted
-# by users, so it must be writeable by the user that the web server
-# is running as (usually 'nobody').  Permissions should be set at the 
-# directory level instead of on the file, as the file is deleted every
-# time its contents are processed.
-
-command_file=/var/nagios/rw/nagios.cmd
-
-
-
-# EXTERNAL COMMAND BUFFER SLOTS
-# This settings is used to tweak the number of items or "slots" that
-# the Nagios daemon should allocate to the buffer that holds incoming 
-# external commands before they are processed.  As external commands 
-# are processed by the daemon, they are removed from the buffer.  
-
-external_command_buffer_slots=4096
-
-
-
-# LOCK FILE
-# This is the lockfile that Nagios will use to store its PID number
-# in when it is running in daemon mode.
-
-lock_file={{nagios_pid_file}}
-
-
-
-# TEMP FILE
-# This is a temporary file that is used as scratch space when Nagios
-# updates the status log, cleans the comment file, etc.  This file
-# is created, used, and deleted throughout the time that Nagios is
-# running.
-
-temp_file=/var/nagios/nagios.tmp
-
-
-
-# TEMP PATH
-# This is path where Nagios can create temp files for service and
-# host check results, etc.
-
-temp_path=/tmp
-
-
-
-# EVENT BROKER OPTIONS
-# Controls what (if any) data gets sent to the event broker.
-# Values:  0      = Broker nothing
-#         -1      = Broker everything
-#         <other> = See documentation
-
-event_broker_options=-1
-
-
-
-# EVENT BROKER MODULE(S)
-# This directive is used to specify an event broker module that should
-# by loaded by Nagios at startup.  Use multiple directives if you want
-# to load more than one module.  Arguments that should be passed to
-# the module at startup are seperated from the module path by a space.
-#
-#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-# WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING
-#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-#
-# Do NOT overwrite modules while they are being used by Nagios or Nagios
-# will crash in a fiery display of SEGFAULT glory.  This is a bug/limitation
-# either in dlopen(), the kernel, and/or the filesystem.  And maybe Nagios...
-#
-# The correct/safe way of updating a module is by using one of these methods:
-#    1. Shutdown Nagios, replace the module file, restart Nagios
-#    2. Delete the original module file, move the new module file into place, restart Nagios
-#
-# Example:
-#
-#   broker_module=<modulepath> [moduleargs]
-
-#broker_module=/somewhere/module1.o
-#broker_module=/somewhere/module2.o arg1 arg2=3 debug=0
-
-
-
-# LOG ROTATION METHOD
-# This is the log rotation method that Nagios should use to rotate
-# the main log file. Values are as follows..
-#	n	= None - don't rotate the log
-#	h	= Hourly rotation (top of the hour)
-#	d	= Daily rotation (midnight every day)
-#	w	= Weekly rotation (midnight on Saturday evening)
-#	m	= Monthly rotation (midnight last day of month)
-
-log_rotation_method=d
-
-
-
-# LOG ARCHIVE PATH
-# This is the directory where archived (rotated) log files should be 
-# placed (assuming you've chosen to do log rotation).
-
-log_archive_path=/var/log/nagios/archives
-
-
-
-# LOGGING OPTIONS
-# If you want messages logged to the syslog facility, as well as the
-# Nagios log file set this option to 1.  If not, set it to 0.
-
-use_syslog=1
-
-
-
-# NOTIFICATION LOGGING OPTION
-# If you don't want notifications to be logged, set this value to 0.
-# If notifications should be logged, set the value to 1.
-
-log_notifications=1
-
-
-
-# SERVICE RETRY LOGGING OPTION
-# If you don't want service check retries to be logged, set this value
-# to 0.  If retries should be logged, set the value to 1.
-
-log_service_retries=1
-
-
-
-# HOST RETRY LOGGING OPTION
-# If you don't want host check retries to be logged, set this value to
-# 0.  If retries should be logged, set the value to 1.
-
-log_host_retries=1
-
-
-
-# EVENT HANDLER LOGGING OPTION
-# If you don't want host and service event handlers to be logged, set
-# this value to 0.  If event handlers should be logged, set the value
-# to 1.
-
-log_event_handlers=1
-
-
-
-# INITIAL STATES LOGGING OPTION
-# If you want Nagios to log all initial host and service states to
-# the main log file (the first time the service or host is checked)
-# you can enable this option by setting this value to 1.  If you
-# are not using an external application that does long term state
-# statistics reporting, you do not need to enable this option.  In
-# this case, set the value to 0.
-
-log_initial_states=0
-
-
-
-# EXTERNAL COMMANDS LOGGING OPTION
-# If you don't want Nagios to log external commands, set this value
-# to 0.  If external commands should be logged, set this value to 1.
-# Note: This option does not include logging of passive service
-# checks - see the option below for controlling whether or not
-# passive checks are logged.
-
-log_external_commands=1
-
-
-
-# PASSIVE CHECKS LOGGING OPTION
-# If you don't want Nagios to log passive host and service checks, set
-# this value to 0.  If passive checks should be logged, set
-# this value to 1.
-
-log_passive_checks=1
-
-
-
-# GLOBAL HOST AND SERVICE EVENT HANDLERS
-# These options allow you to specify a host and service event handler
-# command that is to be run for every host or service state change.
-# The global event handler is executed immediately prior to the event
-# handler that you have optionally specified in each host or
-# service definition. The command argument is the short name of a
-# command definition that you define in your host configuration file.
-# Read the HTML docs for more information.
-
-#global_host_event_handler=somecommand
-#global_service_event_handler=somecommand
-
-
-
-# SERVICE INTER-CHECK DELAY METHOD
-# This is the method that Nagios should use when initially
-# "spreading out" service checks when it starts monitoring.  The
-# default is to use smart delay calculation, which will try to
-# space all service checks out evenly to minimize CPU load.
-# Using the dumb setting will cause all checks to be scheduled
-# at the same time (with no delay between them)!  This is not a
-# good thing for production, but is useful when testing the
-# parallelization functionality.
-#	n	= None - don't use any delay between checks
-#	d	= Use a "dumb" delay of 1 second between checks
-#	s	= Use "smart" inter-check delay calculation
-#       x.xx    = Use an inter-check delay of x.xx seconds
-
-service_inter_check_delay_method=s
-
-
-
-# MAXIMUM SERVICE CHECK SPREAD
-# This variable determines the timeframe (in minutes) from the
-# program start time that an initial check of all services should
-# be completed.  Default is 30 minutes.
-
-max_service_check_spread=30
-
-
-
-# SERVICE CHECK INTERLEAVE FACTOR
-# This variable determines how service checks are interleaved.
-# Interleaving the service checks allows for a more even
-# distribution of service checks and reduced load on remote
-# hosts.  Setting this value to 1 is equivalent to how versions
-# of Nagios previous to 0.0.5 did service checks.  Set this
-# value to s (smart) for automatic calculation of the interleave
-# factor unless you have a specific reason to change it.
-#       s       = Use "smart" interleave factor calculation
-#       x       = Use an interleave factor of x, where x is a
-#                 number greater than or equal to 1.
-
-service_interleave_factor=s
-
-
-
-# HOST INTER-CHECK DELAY METHOD
-# This is the method that Nagios should use when initially
-# "spreading out" host checks when it starts monitoring.  The
-# default is to use smart delay calculation, which will try to
-# space all host checks out evenly to minimize CPU load.
-# Using the dumb setting will cause all checks to be scheduled
-# at the same time (with no delay between them)!
-#	n	= None - don't use any delay between checks
-#	d	= Use a "dumb" delay of 1 second between checks
-#	s	= Use "smart" inter-check delay calculation
-#       x.xx    = Use an inter-check delay of x.xx seconds
-
-host_inter_check_delay_method=s
-
-
-
-# MAXIMUM HOST CHECK SPREAD
-# This variable determines the timeframe (in minutes) from the
-# program start time that an initial check of all hosts should
-# be completed.  Default is 30 minutes.
-
-max_host_check_spread=30
-
-
-
-# MAXIMUM CONCURRENT SERVICE CHECKS
-# This option allows you to specify the maximum number of 
-# service checks that can be run in parallel at any given time.
-# Specifying a value of 1 for this variable essentially prevents
-# any service checks from being parallelized.  A value of 0
-# will not restrict the number of concurrent checks that are
-# being executed.
-
-max_concurrent_checks=0
-
-
-
-# HOST AND SERVICE CHECK REAPER FREQUENCY
-# This is the frequency (in seconds!) that Nagios will process
-# the results of host and service checks.
-
-check_result_reaper_frequency=10
-
-
-
-
-# MAX CHECK RESULT REAPER TIME
-# This is the max amount of time (in seconds) that  a single
-# check result reaper event will be allowed to run before 
-# returning control back to Nagios so it can perform other
-# duties.
-
-max_check_result_reaper_time=30
-
-
-
-
-# CHECK RESULT PATH
-# This is directory where Nagios stores the results of host and
-# service checks that have not yet been processed.
-#
-# Note: Make sure that only one instance of Nagios has access
-# to this directory!  
-
-check_result_path=/var/nagios/spool/checkresults
-
-
-
-
-# MAX CHECK RESULT FILE AGE
-# This option determines the maximum age (in seconds) which check
-# result files are considered to be valid.  Files older than this 
-# threshold will be mercilessly deleted without further processing.
-
-max_check_result_file_age=3600
-
-
-
-
-# CACHED HOST CHECK HORIZON
-# This option determines the maximum amount of time (in seconds)
-# that the state of a previous host check is considered current.
-# Cached host states (from host checks that were performed more
-# recently that the timeframe specified by this value) can immensely
-# improve performance in regards to the host check logic.
-# Too high of a value for this option may result in inaccurate host
-# states being used by Nagios, while a lower value may result in a
-# performance hit for host checks.  Use a value of 0 to disable host
-# check caching.
-
-cached_host_check_horizon=15
-
-
-
-# CACHED SERVICE CHECK HORIZON
-# This option determines the maximum amount of time (in seconds)
-# that the state of a previous service check is considered current.
-# Cached service states (from service checks that were performed more
-# recently that the timeframe specified by this value) can immensely
-# improve performance in regards to predictive dependency checks.
-# Use a value of 0 to disable service check caching.
-
-cached_service_check_horizon=15
-
-
-
-# ENABLE PREDICTIVE HOST DEPENDENCY CHECKS
-# This option determines whether or not Nagios will attempt to execute
-# checks of hosts when it predicts that future dependency logic test
-# may be needed.  These predictive checks can help ensure that your
-# host dependency logic works well.
-# Values:
-#  0 = Disable predictive checks
-#  1 = Enable predictive checks (default)
-
-enable_predictive_host_dependency_checks=1
-
-
-
-# ENABLE PREDICTIVE SERVICE DEPENDENCY CHECKS
-# This option determines whether or not Nagios will attempt to execute
-# checks of service when it predicts that future dependency logic test
-# may be needed.  These predictive checks can help ensure that your
-# service dependency logic works well.
-# Values:
-#  0 = Disable predictive checks
-#  1 = Enable predictive checks (default)
-
-enable_predictive_service_dependency_checks=1
-
-
-
-# SOFT STATE DEPENDENCIES
-# This option determines whether or not Nagios will use soft state 
-# information when checking host and service dependencies. Normally 
-# Nagios will only use the latest hard host or service state when 
-# checking dependencies. If you want it to use the latest state (regardless
-# of whether its a soft or hard state type), enable this option. 
-# Values:
-#  0 = Don't use soft state dependencies (default) 
-#  1 = Use soft state dependencies 
-
-soft_state_dependencies=0
-
-
-
-# TIME CHANGE ADJUSTMENT THRESHOLDS
-# These options determine when Nagios will react to detected changes
-# in system time (either forward or backwards).
-
-#time_change_threshold=900
-
-
-
-# AUTO-RESCHEDULING OPTION
-# This option determines whether or not Nagios will attempt to
-# automatically reschedule active host and service checks to
-# "smooth" them out over time.  This can help balance the load on
-# the monitoring server.  
-# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
-# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
-
-auto_reschedule_checks=0
-
-
-
-# AUTO-RESCHEDULING INTERVAL
-# This option determines how often (in seconds) Nagios will
-# attempt to automatically reschedule checks.  This option only
-# has an effect if the auto_reschedule_checks option is enabled.
-# Default is 30 seconds.
-# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
-# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
-
-auto_rescheduling_interval=30
-
-
-
-# AUTO-RESCHEDULING WINDOW
-# This option determines the "window" of time (in seconds) that
-# Nagios will look at when automatically rescheduling checks.
-# Only host and service checks that occur in the next X seconds
-# (determined by this variable) will be rescheduled. This option
-# only has an effect if the auto_reschedule_checks option is
-# enabled.  Default is 180 seconds (3 minutes).
-# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
-# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
-
-auto_rescheduling_window=180
-
-
-
-# SLEEP TIME
-# This is the number of seconds to sleep between checking for system
-# events and service checks that need to be run.
-
-sleep_time=0.25
-
-
-
-# TIMEOUT VALUES
-# These options control how much time Nagios will allow various
-# types of commands to execute before killing them off.  Options
-# are available for controlling maximum time allotted for
-# service checks, host checks, event handlers, notifications, the
-# ocsp command, and performance data commands.  All values are in
-# seconds.
-
-service_check_timeout=60
-host_check_timeout=30
-event_handler_timeout=30
-notification_timeout=30
-ocsp_timeout=5
-perfdata_timeout=5
-
-
-
-# RETAIN STATE INFORMATION
-# This setting determines whether or not Nagios will save state
-# information for services and hosts before it shuts down.  Upon
-# startup Nagios will reload all saved service and host state
-# information before starting to monitor.  This is useful for 
-# maintaining long-term data on state statistics, etc, but will
-# slow Nagios down a bit when it (re)starts.  Since its only
-# a one-time penalty, I think its well worth the additional
-# startup delay.
-
-retain_state_information=1
-
-
-
-# STATE RETENTION FILE
-# This is the file that Nagios should use to store host and
-# service state information before it shuts down.  The state 
-# information in this file is also read immediately prior to
-# starting to monitor the network when Nagios is restarted.
-# This file is used only if the retain_state_information
-# variable is set to 1.
-
-state_retention_file=/var/nagios/retention.dat
-
-
-
-# RETENTION DATA UPDATE INTERVAL
-# This setting determines how often (in minutes) that Nagios
-# will automatically save retention data during normal operation.
-# If you set this value to 0, Nagios will not save retention
-# data at regular interval, but it will still save retention
-# data before shutting down or restarting.  If you have disabled
-# state retention, this option has no effect.
-
-retention_update_interval=60
-
-
-
-# USE RETAINED PROGRAM STATE
-# This setting determines whether or not Nagios will set 
-# program status variables based on the values saved in the
-# retention file.  If you want to use retained program status
-# information, set this value to 1.  If not, set this value
-# to 0.
-
-use_retained_program_state=1
-
-
-
-# USE RETAINED SCHEDULING INFO
-# This setting determines whether or not Nagios will retain
-# the scheduling info (next check time) for hosts and services
-# based on the values saved in the retention file.  If you
-# If you want to use retained scheduling info, set this
-# value to 1.  If not, set this value to 0.
-
-use_retained_scheduling_info=1
-
-
-
-# RETAINED ATTRIBUTE MASKS (ADVANCED FEATURE)
-# The following variables are used to specify specific host and
-# service attributes that should *not* be retained by Nagios during
-# program restarts.
-#
-# The values of the masks are bitwise ANDs of values specified
-# by the "MODATTR_" definitions found in include/common.h.  
-# For example, if you do not want the current enabled/disabled state
-# of flap detection and event handlers for hosts to be retained, you
-# would use a value of 24 for the host attribute mask...
-# MODATTR_EVENT_HANDLER_ENABLED (8) + MODATTR_FLAP_DETECTION_ENABLED (16) = 24
-
-# This mask determines what host attributes are not retained
-retained_host_attribute_mask=0
-
-# This mask determines what service attributes are not retained
-retained_service_attribute_mask=0
-
-# These two masks determine what process attributes are not retained.
-# There are two masks, because some process attributes have host and service
-# options.  For example, you can disable active host checks, but leave active
-# service checks enabled.
-retained_process_host_attribute_mask=0
-retained_process_service_attribute_mask=0
-
-# These two masks determine what contact attributes are not retained.
-# There are two masks, because some contact attributes have host and
-# service options.  For example, you can disable host notifications for
-# a contact, but leave service notifications enabled for them.
-retained_contact_host_attribute_mask=0
-retained_contact_service_attribute_mask=0
-
-
-
-# INTERVAL LENGTH
-# This is the seconds per unit interval as used in the
-# host/contact/service configuration files.  Setting this to 60 means
-# that each interval is one minute long (60 seconds).  Other settings
-# have not been tested much, so your mileage is likely to vary...
-
-interval_length=60
-
-
-
-# CHECK FOR UPDATES
-# This option determines whether Nagios will automatically check to
-# see if new updates (releases) are available.  It is recommend that you
-# enable this option to ensure that you stay on top of the latest critical
-# patches to Nagios.  Nagios is critical to you - make sure you keep it in
-# good shape.  Nagios will check once a day for new updates. Data collected
-# by Nagios Enterprises from the update check is processed in accordance 
-# with our privacy policy - see http://api.nagios.org for details.
-
-check_for_updates=1
-
-
-
-# BARE UPDATE CHECK
-# This option deterines what data Nagios will send to api.nagios.org when
-# it checks for updates.  By default, Nagios will send information on the 
-# current version of Nagios you have installed, as well as an indicator as
-# to whether this was a new installation or not.  Nagios Enterprises uses
-# this data to determine the number of users running specific version of 
-# Nagios.  Enable this option if you do not want this information to be sent.
-
-bare_update_check=0
-
-
-
-# AGGRESSIVE HOST CHECKING OPTION
-# If you don't want to turn on aggressive host checking features, set
-# this value to 0 (the default).  Otherwise set this value to 1 to
-# enable the aggressive check option.  Read the docs for more info
-# on what aggressive host check is or check out the source code in
-# base/checks.c
-
-use_aggressive_host_checking=0
-
-
-
-# SERVICE CHECK EXECUTION OPTION
-# This determines whether or not Nagios will actively execute
-# service checks when it initially starts.  If this option is 
-# disabled, checks are not actively made, but Nagios can still
-# receive and process passive check results that come in.  Unless
-# you're implementing redundant hosts or have a special need for
-# disabling the execution of service checks, leave this enabled!
-# Values: 1 = enable checks, 0 = disable checks
-
-execute_service_checks=1
-
-
-
-# PASSIVE SERVICE CHECK ACCEPTANCE OPTION
-# This determines whether or not Nagios will accept passive
-# service checks results when it initially (re)starts.
-# Values: 1 = accept passive checks, 0 = reject passive checks
-
-accept_passive_service_checks=1
-
-
-
-# HOST CHECK EXECUTION OPTION
-# This determines whether or not Nagios will actively execute
-# host checks when it initially starts.  If this option is 
-# disabled, checks are not actively made, but Nagios can still
-# receive and process passive check results that come in.  Unless
-# you're implementing redundant hosts or have a special need for
-# disabling the execution of host checks, leave this enabled!
-# Values: 1 = enable checks, 0 = disable checks
-
-execute_host_checks=1
-
-
-
-# PASSIVE HOST CHECK ACCEPTANCE OPTION
-# This determines whether or not Nagios will accept passive
-# host checks results when it initially (re)starts.
-# Values: 1 = accept passive checks, 0 = reject passive checks
-
-accept_passive_host_checks=1
-
-
-
-# NOTIFICATIONS OPTION
-# This determines whether or not Nagios will sent out any host or
-# service notifications when it is initially (re)started.
-# Values: 1 = enable notifications, 0 = disable notifications
-
-enable_notifications=1
-
-
-
-# EVENT HANDLER USE OPTION
-# This determines whether or not Nagios will run any host or
-# service event handlers when it is initially (re)started.  Unless
-# you're implementing redundant hosts, leave this option enabled.
-# Values: 1 = enable event handlers, 0 = disable event handlers
-
-enable_event_handlers=1
-
-
-
-# PROCESS PERFORMANCE DATA OPTION
-# This determines whether or not Nagios will process performance
-# data returned from service and host checks.  If this option is
-# enabled, host performance data will be processed using the
-# host_perfdata_command (defined below) and service performance
-# data will be processed using the service_perfdata_command (also
-# defined below).  Read the HTML docs for more information on
-# performance data.
-# Values: 1 = process performance data, 0 = do not process performance data
-
-process_performance_data=0
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA PROCESSING COMMANDS
-# These commands are run after every host and service check is
-# performed.  These commands are executed only if the
-# enable_performance_data option (above) is set to 1.  The command
-# argument is the short name of a command definition that you 
-# define in your host configuration file.  Read the HTML docs for
-# more information on performance data.
-
-#host_perfdata_command=process-host-perfdata
-#service_perfdata_command=process-service-perfdata
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILES
-# These files are used to store host and service performance data.
-# Performance data is only written to these files if the
-# enable_performance_data option (above) is set to 1.
-
-#host_perfdata_file=/tmp/host-perfdata
-#service_perfdata_file=/tmp/service-perfdata
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE TEMPLATES
-# These options determine what data is written (and how) to the
-# performance data files.  The templates may contain macros, special
-# characters (\t for tab, \r for carriage return, \n for newline)
-# and plain text.  A newline is automatically added after each write
-# to the performance data file.  Some examples of what you can do are
-# shown below.
-
-#host_perfdata_file_template=[HOSTPERFDATA]\t$TIMET$\t$HOSTNAME$\t$HOSTEXECUTIONTIME$\t$HOSTOUTPUT$\t$HOSTPERFDATA$
-#service_perfdata_file_template=[SERVICEPERFDATA]\t$TIMET$\t$HOSTNAME$\t$SERVICEDESC$\t$SERVICEEXECUTIONTIME$\t$SERVICELATENCY$\t$SERVICEOUTPUT$\t$SERVICEPERFDATA$
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE MODES
-# This option determines whether or not the host and service
-# performance data files are opened in write ("w") or append ("a")
-# mode. If you want to use named pipes, you should use the special
-# pipe ("p") mode which avoid blocking at startup, otherwise you will
-# likely want the defult append ("a") mode.
-
-#host_perfdata_file_mode=a
-#service_perfdata_file_mode=a
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING INTERVAL
-# These options determine how often (in seconds) the host and service
-# performance data files are processed using the commands defined
-# below.  A value of 0 indicates the files should not be periodically
-# processed.
-
-#host_perfdata_file_processing_interval=0
-#service_perfdata_file_processing_interval=0
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING COMMANDS
-# These commands are used to periodically process the host and
-# service performance data files.  The interval at which the
-# processing occurs is determined by the options above.
-
-#host_perfdata_file_processing_command=process-host-perfdata-file
-#service_perfdata_file_processing_command=process-service-perfdata-file
-
-
-
-# OBSESS OVER SERVICE CHECKS OPTION
-# This determines whether or not Nagios will obsess over service
-# checks and run the ocsp_command defined below.  Unless you're
-# planning on implementing distributed monitoring, do not enable
-# this option.  Read the HTML docs for more information on
-# implementing distributed monitoring.
-# Values: 1 = obsess over services, 0 = do not obsess (default)
-
-obsess_over_services=0
-
-
-
-# OBSESSIVE COMPULSIVE SERVICE PROCESSOR COMMAND
-# This is the command that is run for every service check that is
-# processed by Nagios.  This command is executed only if the
-# obsess_over_services option (above) is set to 1.  The command 
-# argument is the short name of a command definition that you
-# define in your host configuration file. Read the HTML docs for
-# more information on implementing distributed monitoring.
-
-#ocsp_command=somecommand
-
-
-
-# OBSESS OVER HOST CHECKS OPTION
-# This determines whether or not Nagios will obsess over host
-# checks and run the ochp_command defined below.  Unless you're
-# planning on implementing distributed monitoring, do not enable
-# this option.  Read the HTML docs for more information on
-# implementing distributed monitoring.
-# Values: 1 = obsess over hosts, 0 = do not obsess (default)
-
-obsess_over_hosts=0
-
-
-
-# OBSESSIVE COMPULSIVE HOST PROCESSOR COMMAND
-# This is the command that is run for every host check that is
-# processed by Nagios.  This command is executed only if the
-# obsess_over_hosts option (above) is set to 1.  The command 
-# argument is the short name of a command definition that you
-# define in your host configuration file. Read the HTML docs for
-# more information on implementing distributed monitoring.
-
-#ochp_command=somecommand
-
-
-
-# TRANSLATE PASSIVE HOST CHECKS OPTION
-# This determines whether or not Nagios will translate
-# DOWN/UNREACHABLE passive host check results into their proper
-# state for this instance of Nagios.  This option is useful
-# if you have distributed or failover monitoring setup.  In
-# these cases your other Nagios servers probably have a different
-# "view" of the network, with regards to the parent/child relationship
-# of hosts.  If a distributed monitoring server thinks a host
-# is DOWN, it may actually be UNREACHABLE from the point of
-# this Nagios instance.  Enabling this option will tell Nagios
-# to translate any DOWN or UNREACHABLE host states it receives
-# passively into the correct state from the view of this server.
-# Values: 1 = perform translation, 0 = do not translate (default)
-
-translate_passive_host_checks=0
-
-
-
-# PASSIVE HOST CHECKS ARE SOFT OPTION
-# This determines whether or not Nagios will treat passive host
-# checks as being HARD or SOFT.  By default, a passive host check
-# result will put a host into a HARD state type.  This can be changed
-# by enabling this option.
-# Values: 0 = passive checks are HARD, 1 = passive checks are SOFT
-
-passive_host_checks_are_soft=0
-
-
-
-# ORPHANED HOST/SERVICE CHECK OPTIONS
-# These options determine whether or not Nagios will periodically 
-# check for orphaned host service checks.  Since service checks are
-# not rescheduled until the results of their previous execution 
-# instance are processed, there exists a possibility that some
-# checks may never get rescheduled.  A similar situation exists for
-# host checks, although the exact scheduling details differ a bit
-# from service checks.  Orphaned checks seem to be a rare
-# problem and should not happen under normal circumstances.
-# If you have problems with service checks never getting
-# rescheduled, make sure you have orphaned service checks enabled.
-# Values: 1 = enable checks, 0 = disable checks
-
-check_for_orphaned_services=1
-check_for_orphaned_hosts=1
-
-
-
-# SERVICE FRESHNESS CHECK OPTION
-# This option determines whether or not Nagios will periodically
-# check the "freshness" of service results.  Enabling this option
-# is useful for ensuring passive checks are received in a timely
-# manner.
-# Values: 1 = enabled freshness checking, 0 = disable freshness checking
-
-check_service_freshness=0
-
-
-
-# SERVICE FRESHNESS CHECK INTERVAL
-# This setting determines how often (in seconds) Nagios will
-# check the "freshness" of service check results.  If you have
-# disabled service freshness checking, this option has no effect.
-
-service_freshness_check_interval=60
-
-
-
-# HOST FRESHNESS CHECK OPTION
-# This option determines whether or not Nagios will periodically
-# check the "freshness" of host results.  Enabling this option
-# is useful for ensuring passive checks are received in a timely
-# manner.
-# Values: 1 = enabled freshness checking, 0 = disable freshness checking
-
-check_host_freshness=0
-
-
-
-# HOST FRESHNESS CHECK INTERVAL
-# This setting determines how often (in seconds) Nagios will
-# check the "freshness" of host check results.  If you have
-# disabled host freshness checking, this option has no effect.
-
-host_freshness_check_interval=60
-
-
-
-
-# ADDITIONAL FRESHNESS THRESHOLD LATENCY
-# This setting determines the number of seconds that Nagios
-# will add to any host and service freshness thresholds that
-# it calculates (those not explicitly specified by the user).
-
-additional_freshness_latency=15
-
-
-
-
-# FLAP DETECTION OPTION
-# This option determines whether or not Nagios will try
-# and detect hosts and services that are "flapping".  
-# Flapping occurs when a host or service changes between
-# states too frequently.  When Nagios detects that a 
-# host or service is flapping, it will temporarily suppress
-# notifications for that host/service until it stops
-# flapping.  Flap detection is very experimental, so read
-# the HTML documentation before enabling this feature!
-# Values: 1 = enable flap detection
-#         0 = disable flap detection (default)
-
-enable_flap_detection=1
-
-
-
-# FLAP DETECTION THRESHOLDS FOR HOSTS AND SERVICES
-# Read the HTML documentation on flap detection for
-# an explanation of what this option does.  This option
-# has no effect if flap detection is disabled.
-
-low_service_flap_threshold=5.0
-high_service_flap_threshold=20.0
-low_host_flap_threshold=5.0
-high_host_flap_threshold=20.0
-
-
-
-# DATE FORMAT OPTION
-# This option determines how short dates are displayed. Valid options
-# include:
-#	us		(MM-DD-YYYY HH:MM:SS)
-#	euro    	(DD-MM-YYYY HH:MM:SS)
-#	iso8601		(YYYY-MM-DD HH:MM:SS)
-#	strict-iso8601	(YYYY-MM-DDTHH:MM:SS)
-#
-
-date_format=us
-
-
-
-
-# TIMEZONE OFFSET
-# This option is used to override the default timezone that this
-# instance of Nagios runs in.  If not specified, Nagios will use
-# the system configured timezone.
-#
-# NOTE: In order to display the correct timezone in the CGIs, you
-# will also need to alter the Apache directives for the CGI path 
-# to include your timezone.  Example:
-#
-#   <Directory "/usr/local/nagios/sbin/">
-#      SetEnv TZ "Australia/Brisbane"
-#      ...
-#   </Directory>
-
-#use_timezone=US/Mountain
-#use_timezone=Australia/Brisbane
-
-
-
-
-# P1.PL FILE LOCATION
-# This value determines where the p1.pl perl script (used by the
-# embedded Perl interpreter) is located.  If you didn't compile
-# Nagios with embedded Perl support, this option has no effect.
-
-p1_file = {{nagios_p1_pl}}
-
-
-
-# EMBEDDED PERL INTERPRETER OPTION
-# This option determines whether or not the embedded Perl interpreter
-# will be enabled during runtime.  This option has no effect if Nagios
-# has not been compiled with support for embedded Perl.
-# Values: 0 = disable interpreter, 1 = enable interpreter
-
-enable_embedded_perl=1
-
-
-
-# EMBEDDED PERL USAGE OPTION
-# This option determines whether or not Nagios will process Perl plugins
-# and scripts with the embedded Perl interpreter if the plugins/scripts
-# do not explicitly indicate whether or not it is okay to do so. Read
-# the HTML documentation on the embedded Perl interpreter for more 
-# information on how this option works.
-
-use_embedded_perl_implicitly=1
-
-
-
-# ILLEGAL OBJECT NAME CHARACTERS
-# This option allows you to specify illegal characters that cannot
-# be used in host names, service descriptions, or names of other
-# object types.
-
-illegal_object_name_chars=`~!$%^&*|'"<>?,()=
-
-
-
-# ILLEGAL MACRO OUTPUT CHARACTERS
-# This option allows you to specify illegal characters that are
-# stripped from macros before being used in notifications, event
-# handlers, etc.  This DOES NOT affect macros used in service or
-# host check commands.
-# The following macros are stripped of the characters you specify:
-#	$HOSTOUTPUT$
-#	$HOSTPERFDATA$
-#	$HOSTACKAUTHOR$
-#	$HOSTACKCOMMENT$
-#	$SERVICEOUTPUT$
-#	$SERVICEPERFDATA$
-#	$SERVICEACKAUTHOR$
-#	$SERVICEACKCOMMENT$
-
-illegal_macro_output_chars=`~$&|'"<>
-
-
-
-# REGULAR EXPRESSION MATCHING
-# This option controls whether or not regular expression matching
-# takes place in the object config files.  Regular expression
-# matching is used to match host, hostgroup, service, and service
-# group names/descriptions in some fields of various object types.
-# Values: 1 = enable regexp matching, 0 = disable regexp matching
-
-use_regexp_matching=0
-
-
-
-# "TRUE" REGULAR EXPRESSION MATCHING
-# This option controls whether or not "true" regular expression 
-# matching takes place in the object config files.  This option
-# only has an effect if regular expression matching is enabled
-# (see above).  If this option is DISABLED, regular expression
-# matching only occurs if a string contains wildcard characters
-# (* and ?).  If the option is ENABLED, regexp matching occurs
-# all the time (which can be annoying).
-# Values: 1 = enable true matching, 0 = disable true matching
-
-use_true_regexp_matching=0
-
-
-
-# ADMINISTRATOR EMAIL/PAGER ADDRESSES
-# The email and pager address of a global administrator (likely you).
-# Nagios never uses these values itself, but you can access them by
-# using the $ADMINEMAIL$ and $ADMINPAGER$ macros in your notification
-# commands.
-
-admin_email=nagios@localhost
-admin_pager=pagenagios@localhost
-
-
-
-# DAEMON CORE DUMP OPTION
-# This option determines whether or not Nagios is allowed to create
-# a core dump when it runs as a daemon.  Note that it is generally
-# considered bad form to allow this, but it may be useful for
-# debugging purposes.  Enabling this option doesn't guarantee that
-# a core file will be produced, but that's just life...
-# Values: 1 - Allow core dumps
-#         0 - Do not allow core dumps (default)
-
-daemon_dumps_core=0
-
-
-
-# LARGE INSTALLATION TWEAKS OPTION
-# This option determines whether or not Nagios will take some shortcuts
-# which can save on memory and CPU usage in large Nagios installations.
-# Read the documentation for more information on the benefits/tradeoffs
-# of enabling this option.
-# Values: 1 - Enabled tweaks
-#         0 - Disable tweaks (default)
-
-use_large_installation_tweaks=1
-
-
-
-# ENABLE ENVIRONMENT MACROS
-# This option determines whether or not Nagios will make all standard
-# macros available as environment variables when host/service checks
-# and system commands (event handlers, notifications, etc.) are
-# executed.  Enabling this option can cause performance issues in 
-# large installations, as it will consume a bit more memory and (more
-# importantly) consume more CPU.
-# Values: 1 - Enable environment variable macros (default)
-#         0 - Disable environment variable macros
-
-# NAGIOS_* macros are required for Ambari Maintenance Mode (mm_wrapper.py)
-enable_environment_macros=1
-
-
-
-# CHILD PROCESS MEMORY OPTION
-# This option determines whether or not Nagios will free memory in
-# child processes (processed used to execute system commands and host/
-# service checks).  If you specify a value here, it will override
-# program defaults.
-# Value: 1 - Free memory in child processes
-#        0 - Do not free memory in child processes
-
-#free_child_process_memory=1
-
-
-
-# CHILD PROCESS FORKING BEHAVIOR
-# This option determines how Nagios will fork child processes
-# (used to execute system commands and host/service checks).  Normally
-# child processes are fork()ed twice, which provides a very high level
-# of isolation from problems.  Fork()ing once is probably enough and will
-# save a great deal on CPU usage (in large installs), so you might
-# want to consider using this.  If you specify a value here, it will
-# program defaults.
-# Value: 1 - Child processes fork() twice
-#        0 - Child processes fork() just once
-
-#child_processes_fork_twice=1
-
-
-
-# DEBUG LEVEL
-# This option determines how much (if any) debugging information will
-# be written to the debug file.  OR values together to log multiple
-# types of information.
-# Values: 
-#          -1 = Everything
-#          0 = Nothing
-#	   1 = Functions
-#          2 = Configuration
-#          4 = Process information
-#	   8 = Scheduled events
-#          16 = Host/service checks
-#          32 = Notifications
-#          64 = Event broker
-#          128 = External commands
-#          256 = Commands
-#          512 = Scheduled downtime
-#          1024 = Comments
-#          2048 = Macros
-
-debug_level=0
-
-
-
-# DEBUG VERBOSITY
-# This option determines how verbose the debug log out will be.
-# Values: 0 = Brief output
-#         1 = More detailed
-#         2 = Very detailed
-
-debug_verbosity=1
-
-
-
-# DEBUG FILE
-# This option determines where Nagios should write debugging information.
-
-debug_file=/var/log/nagios/nagios.debug
-
-
-
-# MAX DEBUG FILE SIZE
-# This option determines the maximum size (in bytes) of the debug file.  If
-# the file grows larger than this size, it will be renamed with a .old
-# extension.  If a file already exists with a .old extension it will
-# automatically be deleted.  This helps ensure your disk space usage doesn't
-# get out of control when debugging Nagios.
-
-max_debug_file_size=1000000
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/nagios.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/nagios.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/nagios.conf.j2
deleted file mode 100644
index 7db06cf..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/nagios.conf.j2
+++ /dev/null
@@ -1,81 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-#
-# SAMPLE CONFIG SNIPPETS FOR APACHE WEB SERVER
-# Last Modified: 11-26-2005
-#
-# This file contains examples of entries that need
-# to be incorporated into your Apache web server
-# configuration file.  Customize the paths, etc. as
-# needed to fit your system.
-#
-
-ScriptAlias /nagios/cgi-bin "/usr/lib/nagios/cgi"
-
-<Directory "/usr/lib/nagios/cgi">
-#  SSLRequireSSL
-   Options ExecCGI
-   AllowOverride None
-   Order allow,deny
-   Allow from all
-#  Order deny,allow
-#  Deny from all
-#  Allow from 127.0.0.1
-   AuthName "Nagios Access"
-   AuthType Basic
-   AuthUserFile /etc/nagios/htpasswd.users
-   Require valid-user
-</Directory>
-
-Alias /nagios "/usr/share/nagios"
-
-<Directory "/usr/share/nagios">
-#  SSLRequireSSL
-   Options None
-   AllowOverride None
-   Order allow,deny
-   Allow from all
-#  Order deny,allow
-#  Deny from all
-#  Allow from 127.0.0.1
-   AuthName "Nagios Access"
-   AuthType Basic
-   AuthUserFile /etc/nagios/htpasswd.users
-   Require valid-user
-</Directory>
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/nagios.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/nagios.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/nagios.j2
deleted file mode 100644
index fd270c8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/nagios.j2
+++ /dev/null
@@ -1,165 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-
-#!/bin/sh
-# $Id$
-# Nagios	Startup script for the Nagios monitoring daemon
-#
-# chkconfig:	- 85 15
-# description:	Nagios is a service monitoring system
-# processname: nagios
-# config: /etc/nagios/nagios.cfg
-# pidfile: /var/nagios/nagios.pid
-#
-### BEGIN INIT INFO
-# Provides:		nagios
-# Required-Start:	$local_fs $syslog $network
-# Required-Stop:	$local_fs $syslog $network
-# Short-Description:    start and stop Nagios monitoring server
-# Description:		Nagios is is a service monitoring system 
-### END INIT INFO
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-# Source function library.
-. /etc/rc.d/init.d/functions
-
-prefix="/usr"
-exec_prefix="/usr"
-exec="/usr/sbin/nagios"
-prog="nagios"
-config="/etc/nagios/nagios.cfg"
-pidfile="{{nagios_pid_file}}"
-user="{{nagios_user}}"
-
-[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog
-
-lockfile=/var/lock/subsys/$prog
-
-start() {
-    [ -x $exec ] || exit 5
-    [ -f $config ] || exit 6
-    echo -n $"Starting $prog: "
-    daemon --user=$user $exec -d $config
-    retval=$?
-    echo
-    [ $retval -eq 0 ] && touch $lockfile
-    return $retval
-}
-
-stop() {
-    echo -n $"Stopping $prog: "
-    killproc -d 10 $exec
-    retval=$?
-    echo
-    [ $retval -eq 0 ] && rm -f $lockfile
-    return $retval
-}
-
-
-restart() {
-    stop
-    start
-}
-
-reload() {
-    echo -n $"Reloading $prog: "
-    killproc $exec -HUP
-    RETVAL=$?
-    echo
-}
-
-force_reload() {
-    restart
-}
-
-check_config() {
-        $nice runuser -s /bin/bash - $user -c "$corelimit >/dev/null 2>&1 ; $exec -v $config > /dev/null 2>&1"
-        RETVAL=$?
-        if [ $RETVAL -ne 0 ] ; then
-                echo -n $"Configuration validation failed"
-                failure
-                echo
-                exit 1
-
-        fi
-}
-
-
-case "$1" in
-    start)
-        status $prog && exit 0
-	check_config
-        $1
-        ;;
-    stop)
-        status $prog|| exit 0
-        $1
-        ;;
-    restart)
-	check_config
-        $1
-        ;;
-    reload)
-        status $prog || exit 7
-	check_config
-        $1
-        ;;
-    force-reload)
-	check_config
-        force_reload
-        ;;
-    status)
-        status $prog
-        ;;
-    condrestart|try-restart)
-        status $prog|| exit 0
-	check_config
-        restart
-        ;;
-    configtest)
-        echo -n  $"Checking config for $prog: "
-        check_config && success
-        echo
-	;;
-    *)
-        echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload|configtest}"
-        exit 2
-esac
-exit $?

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/resource.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/resource.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/resource.cfg.j2
deleted file mode 100644
index 7ea8d61..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/resource.cfg.j2
+++ /dev/null
@@ -1,70 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-###########################################################################
-#
-# RESOURCE.CFG - Sample Resource File for Nagios 3.2.3
-#
-# Last Modified: 09-10-2003
-#
-# You can define $USERx$ macros in this file, which can in turn be used
-# in command definitions in your host config file(s).  $USERx$ macros are
-# useful for storing sensitive information such as usernames, passwords,
-# etc.  They are also handy for specifying the path to plugins and
-# event handlers - if you decide to move the plugins or event handlers to
-# a different directory in the future, you can just update one or two
-# $USERx$ macros, instead of modifying a lot of command definitions.
-#
-# The CGIs will not attempt to read the contents of resource files, so
-# you can set restrictive permissions (600 or 660) on them.
-#
-# Nagios supports up to 32 $USERx$ macros ($USER1$ through $USER32$)
-#
-# Resource files may also be used to store configuration directives for
-# external data sources like MySQL...
-#
-###########################################################################
-
-# Sets $USER1$ to be the path to the plugins
-$USER1$={{plugins_dir}}
-
-# Sets $USER2$ to be the path to event handlers
-#$USER2$={{eventhandlers_dir}}
-
-# Store some usernames and passwords (hidden from the CGIs)
-#$USER3$=someuser
-#$USER4$=somepassword
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/stack_advisor.py
index ca7c1a1..77289f0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/stack_advisor.py
@@ -247,7 +247,7 @@ class HDP132StackAdvisor(DefaultStackAdvisor):
     return ['JOURNALNODE', 'ZKFC', 'GANGLIA_MONITOR']
 
   def getNotPreferableOnServerComponents(self):
-    return ['GANGLIA_SERVER', 'NAGIOS_SERVER']
+    return ['GANGLIA_SERVER']
 
   def getCardinalitiesDict(self):
     return {

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.3/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/role_command_order.json b/ambari-server/src/main/resources/stacks/HDP/1.3.3/role_command_order.json
index a05324f..372c851 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/role_command_order.json
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/role_command_order.json
@@ -3,8 +3,6 @@
   "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
   "general_deps" : {
     "_comment" : "dependencies for all cases",
-    "NAGIOS_SERVER-INSTALL" : ["HIVE_CLIENT-INSTALL", "HCAT-INSTALL",
-        "MAPREDUCE_CLIENT-INSTALL", "OOZIE_CLIENT-INSTALL"],
     "HBASE_MASTER-START": ["ZOOKEEPER_SERVER-START"],
     "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
     "OOZIE_SERVER-START": ["JOBTRACKER-START", "TASKTRACKER-START"],
@@ -12,12 +10,6 @@
     "HIVE_METASTORE-START": ["MYSQL_SERVER-START"],
     "HIVE_SERVER-START": ["TASKTRACKER-START", "MYSQL_SERVER-START"],
     "HUE_SERVER-START": ["HIVE_SERVER-START", "HCAT-START", "OOZIE_SERVER-START"],
-    "NAGIOS_SERVER-START": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START",
-        "GANGLIA_SERVER-START", "GANGLIA_MONITOR-START", "HCAT-START",
-        "HIVE_SERVER-START", "HIVE_METASTORE-START", "HUE_SERVER-START",
-        "JOBTRACKER-START", "TASKTRACKER-START", "ZOOKEEPER_SERVER-START",
-        "MYSQL_SERVER-START", "OOZIE_SERVER-START", "PIG-START", "SQOOP-START",
-        "WEBHCAT_SERVER-START"],
     "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"],
     "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START"],
     "WEBHCAT_SERVICE_CHECK-SERVICE_CHECK": ["WEBHCAT_SERVER-START"],
@@ -47,8 +39,7 @@
     "WEBHCAT_SERVER-UPGRADE" : ["OOZIE_CLIENT-UPGRADE"],
     "PIG-UPGRADE" : ["WEBHCAT_SERVER-UPGRADE"],
     "SQOOP-UPGRADE" : ["PIG-UPGRADE"],
-    "NAGIOS_SERVER-UPGRADE" : ["SQOOP-UPGRADE"],
-    "GANGLIA_SERVER-UPGRADE" : ["NAGIOS_SERVER-UPGRADE"],
+    "GANGLIA_SERVER-UPGRADE" : ["SQOOP-UPGRADE"],
     "GANGLIA_MONITOR-UPGRADE" : ["GANGLIA_SERVER-UPGRADE"]
   },
   "_comment" : "GLUSTERFS-specific dependencies",
@@ -70,8 +61,6 @@
     "TASKTRACKER-START": ["NAMENODE-START", "DATANODE-START"],
     "HIVE_SERVER-START": ["DATANODE-START"],
     "WEBHCAT_SERVER-START": ["DATANODE-START"],
-    "NAGIOS_SERVER-START": ["NAMENODE-START", "SECONDARY_NAMENODE-START",
-        "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START", "HISTORYSERVER-START"],
     "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START",
         "SECONDARY_NAMENODE-START"],
     "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START",
@@ -92,7 +81,6 @@
   "namenode_optional_ha": {
     "NAMENODE-START": ["JOURNALNODE-START", "ZOOKEEPER_SERVER-START"],
     "ZKFC-START": ["NAMENODE-START"],
-    "NAGIOS_SERVER-START": ["ZKFC-START", "JOURNALNODE-START"],
     "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["ZKFC-START"]
   },
   "_comment" : "Dependencies that are used in ResourceManager HA cluster",
@@ -100,4 +88,3 @@
     "RESOURCEMANAGER-START": ["ZOOKEEPER_SERVER-START"]
   }
 }
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3/role_command_order.json b/ambari-server/src/main/resources/stacks/HDP/1.3/role_command_order.json
index 5a75a8e..f610d79 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3/role_command_order.json
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3/role_command_order.json
@@ -3,8 +3,6 @@
   "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
   "general_deps" : {
     "_comment" : "dependencies for all cases",
-    "NAGIOS_SERVER-INSTALL" : ["HIVE_CLIENT-INSTALL", "HCAT-INSTALL",
-        "MAPREDUCE_CLIENT-INSTALL", "OOZIE_CLIENT-INSTALL"],
     "HBASE_MASTER-START": ["ZOOKEEPER_SERVER-START"],
     "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
     "OOZIE_SERVER-START": ["JOBTRACKER-START", "TASKTRACKER-START"],
@@ -12,12 +10,6 @@
     "HIVE_METASTORE-START": ["MYSQL_SERVER-START"],
     "HIVE_SERVER-START": ["TASKTRACKER-START", "MYSQL_SERVER-START"],
     "HUE_SERVER-START": ["HIVE_SERVER-START", "HCAT-START", "OOZIE_SERVER-START"],
-    "NAGIOS_SERVER-START": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START",
-        "GANGLIA_SERVER-START", "GANGLIA_MONITOR-START", "HCAT-START",
-        "HIVE_SERVER-START", "HIVE_METASTORE-START", "HUE_SERVER-START",
-        "JOBTRACKER-START", "TASKTRACKER-START", "ZOOKEEPER_SERVER-START",
-        "MYSQL_SERVER-START", "OOZIE_SERVER-START", "PIG-START", "SQOOP-START",
-        "WEBHCAT_SERVER-START"],
     "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"],
     "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START"],
     "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
@@ -45,8 +37,7 @@
     "WEBHCAT_SERVER-UPGRADE" : ["OOZIE_CLIENT-UPGRADE"],
     "PIG-UPGRADE" : ["WEBHCAT_SERVER-UPGRADE"],
     "SQOOP-UPGRADE" : ["PIG-UPGRADE"],
-    "NAGIOS_SERVER-UPGRADE" : ["SQOOP-UPGRADE"],
-    "GANGLIA_SERVER-UPGRADE" : ["NAGIOS_SERVER-UPGRADE"],
+    "GANGLIA_SERVER-UPGRADE" : ["SQOOP-UPGRADE"],
     "GANGLIA_MONITOR-UPGRADE" : ["GANGLIA_SERVER-UPGRADE"]
   },
   "_comment" : "GLUSTERFS-specific dependencies",
@@ -68,8 +59,6 @@
     "TASKTRACKER-START": ["NAMENODE-START", "DATANODE-START"],
     "HIVE_SERVER-START": ["DATANODE-START"],
     "WEBHCAT_SERVER-START": ["DATANODE-START"],
-    "NAGIOS_SERVER-START": ["NAMENODE-START", "SECONDARY_NAMENODE-START",
-        "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START", "HISTORYSERVER-START"],
     "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START",
         "SECONDARY_NAMENODE-START"],
     "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START",
@@ -90,7 +79,6 @@
   "namenode_optional_ha": {
     "NAMENODE-START": ["JOURNALNODE-START", "ZOOKEEPER_SERVER-START"],
     "ZKFC-START": ["NAMENODE-START"],
-    "NAGIOS_SERVER-START": ["ZKFC-START", "JOURNALNODE-START"],
     "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["ZKFC-START"]
   },
   "_comment" : "Dependencies that are used in ResourceManager HA cluster",
@@ -98,4 +86,3 @@
     "RESOURCEMANAGER-START": ["ZOOKEEPER_SERVER-START"]
   }
 }
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/role_command_order.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/role_command_order.json
index 82cbd79..ff6846c 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/role_command_order.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/role_command_order.json
@@ -3,8 +3,6 @@
   "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
   "general_deps" : {
     "_comment" : "dependencies for all cases",
-    "NAGIOS_SERVER-INSTALL" : ["HIVE_CLIENT-INSTALL", "HCAT-INSTALL",
-        "MAPREDUCE_CLIENT-INSTALL", "OOZIE_CLIENT-INSTALL"],
     "NIMBUS-START" : ["ZOOKEEPER_SERVER-START"],
     "SUPERVISOR-START" : ["NIMBUS-START"],
     "STORM_UI_SERVER-START" : ["NIMBUS-START"],
@@ -18,12 +16,6 @@
     "HIVE_SERVER-START": ["NODEMANAGER-START", "MYSQL_SERVER-START"],
     "HUE_SERVER-START": ["HIVE_SERVER-START", "HCAT-START", "OOZIE_SERVER-START"],
     "FLUME_HANDLER-START": ["OOZIE_SERVER-START"],
-    "NAGIOS_SERVER-START": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START",
-        "GANGLIA_SERVER-START", "GANGLIA_MONITOR-START", "HCAT-START",
-        "HIVE_SERVER-START", "HIVE_METASTORE-START", "HUE_SERVER-START",
-        "RESOURCEMANAGER-START", "NODEMANAGER-START", "ZOOKEEPER_SERVER-START",
-        "MYSQL_SERVER-START", "OOZIE_SERVER-START", "PIG-START", "SQOOP-START",
-        "WEBHCAT_SERVER-START", "FLUME_HANDLER-START"],
     "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START", "NODEMANAGER-START"],
     "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START"],
     "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
@@ -57,8 +49,6 @@
     "FALCON_SERVICE_CHECK-SERVICE_CHECK": ["FALCON_SERVER-START"],
     "HIVE_SERVER-START": ["DATANODE-START"],
     "WEBHCAT_SERVER-START": ["DATANODE-START"],
-    "NAGIOS_SERVER-START": ["NAMENODE-START", "SECONDARY_NAMENODE-START",
-        "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START", "HISTORYSERVER-START"],
     "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START",
         "SECONDARY_NAMENODE-START"],
     "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START",
@@ -74,12 +64,10 @@
   "_comment" : "Dependencies that are used in HA NameNode cluster",
   "namenode_optional_ha": {
     "NAMENODE-START": ["ZKFC-START", "JOURNALNODE-START", "ZOOKEEPER_SERVER-START"],
-    "ZKFC-START": ["ZOOKEEPER_SERVER-START"],
-    "NAGIOS_SERVER-START": ["ZKFC-START", "JOURNALNODE-START"]
+    "ZKFC-START": ["ZOOKEEPER_SERVER-START"]
   },
   "_comment" : "Dependencies that are used in ResourceManager HA cluster",
   "resourcemanager_optional_ha" : {
     "RESOURCEMANAGER-START": ["ZOOKEEPER_SERVER-START"]
   }
 }
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
index 67c08ac..e657002 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
@@ -129,7 +129,6 @@ hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
 
 #users and groups
 hbase_user = config['configurations']['hbase-env']['hbase_user']
-nagios_user = config['configurations']['nagios-env']['nagios_user']
 smoke_user =  config['configurations']['cluster-env']['smokeuser']
 gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
 gmond_user = config['configurations']['ganglia-env']["gmond_user"]
@@ -138,14 +137,12 @@ oozie_user = config['configurations']['oozie-env']["oozie_user"]
 
 user_group = config['configurations']['cluster-env']['user_group']
 
-hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
 ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
 namenode_host = default("/clusterHostInfo/namenode_host", [])
 hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
 oozie_servers = default("/clusterHostInfo/oozie_server", [])
 
 has_namenode = not len(namenode_host) == 0
-has_nagios = not len(hagios_server_hosts) == 0
 has_ganglia_server = not len(ganglia_server_hosts) == 0
 has_tez = 'tez-site' in config['configurations']
 has_hbase_masters = not len(hbase_master_hosts) == 0
@@ -154,7 +151,6 @@ has_oozie_server = not len(oozie_servers) == 0
 hbase_tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
 
 proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
-nagios_group = config['configurations']['nagios-env']['nagios_group']
 
 ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
 
@@ -176,8 +172,6 @@ if has_oozie_server:
   user_to_groups_dict[oozie_user] = [proxyuser_group]
 
 user_to_gid_dict = collections.defaultdict(lambda:user_group)
-if has_nagios:
-  user_to_gid_dict[nagios_user] = nagios_group
 
 user_list = json.loads(config['hostLevelParams']['user_list'])
 group_list = json.loads(config['hostLevelParams']['group_list'])

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py
index 4290506..0e4f906 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py
@@ -32,7 +32,6 @@ stack_is_hdp22_or_further = hdp_stack_version != "" and compare_versions(hdp_sta
 
 #users and groups
 hbase_user = config['configurations']['hbase-env']['hbase_user']
-nagios_user = config['configurations']['nagios-env']['nagios_user']
 smoke_user =  config['configurations']['cluster-env']['smokeuser']
 gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
 gmond_user = config['configurations']['ganglia-env']["gmond_user"]
@@ -40,7 +39,6 @@ tez_user = config['configurations']['tez-env']["tez_user"]
 
 user_group = config['configurations']['cluster-env']['user_group']
 proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
-nagios_group = config['configurations']['nagios-env']['nagios_group']
 
 hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
 
@@ -49,7 +47,6 @@ hostname = config["hostname"]
 ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
 rm_host = default("/clusterHostInfo/rm_host", [])
 slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
 oozie_servers = default("/clusterHostInfo/oozie_server", [])
 hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
 hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
@@ -67,7 +64,6 @@ has_namenode = not len(namenode_host) == 0
 has_hs = not len(hs_host) == 0
 has_resourcemanager = not len(rm_host) == 0
 has_slaves = not len(slave_hosts) == 0
-has_nagios = not len(hagios_server_hosts) == 0
 has_oozie_server = not len(oozie_servers)  == 0
 has_hcat_server_host = not len(hcat_server_hosts)  == 0
 has_hive_server_host = not len(hive_server_host)  == 0
@@ -117,8 +113,6 @@ if has_tez:
   user_to_groups_dict[tez_user] = [proxyuser_group]
 
 user_to_gid_dict = collections.defaultdict(lambda:user_group)
-if has_nagios:
-  user_to_gid_dict[nagios_user] = nagios_group
 
 user_list = json.loads(config['hostLevelParams']['user_list'])
 group_list = json.loads(config['hostLevelParams']['group_list'])

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
index 5bbd36f..97403e8 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
@@ -59,7 +59,6 @@ hostname = config["hostname"]
 ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
 rm_host = default("/clusterHostInfo/rm_host", [])
 slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
 oozie_servers = default("/clusterHostInfo/oozie_server", [])
 hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
 hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
@@ -73,7 +72,6 @@ ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
 has_namenode = not len(namenode_host) == 0
 has_resourcemanager = not len(rm_host) == 0
 has_slaves = not len(slave_hosts) == 0
-has_nagios = not len(hagios_server_hosts) == 0
 has_oozie_server = not len(oozie_servers)  == 0
 has_hcat_server_host = not len(hcat_server_hosts)  == 0
 has_hive_server_host = not len(hive_server_host)  == 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/role_command_order.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/role_command_order.json
index a92fa1d..d40d440 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/role_command_order.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/role_command_order.json
@@ -3,8 +3,6 @@
   "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
   "general_deps" : {
     "_comment" : "dependencies for all cases",
-    "NAGIOS_SERVER-INSTALL" : ["HIVE_CLIENT-INSTALL", "HCAT-INSTALL",
-        "MAPREDUCE_CLIENT-INSTALL", "OOZIE_CLIENT-INSTALL"],
     "HBASE_MASTER-START": ["ZOOKEEPER_SERVER-START"],
     "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
     "OOZIE_SERVER-START": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
@@ -13,12 +11,6 @@
     "HIVE_SERVER-START": ["NODEMANAGER-START", "MYSQL_SERVER-START"],
     "HUE_SERVER-START": ["HIVE_SERVER-START", "HCAT-START", "OOZIE_SERVER-START"],
     "FLUME_HANDLER-START": ["OOZIE_SERVER-START"],
-    "NAGIOS_SERVER-START": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START",
-        "GANGLIA_SERVER-START", "GANGLIA_MONITOR-START", "HCAT-START",
-        "HIVE_SERVER-START", "HIVE_METASTORE-START", "HUE_SERVER-START",
-        "ZOOKEEPER_SERVER-START", "NODEMANAGER-START", "RESOURCEMANAGER-START",
-        "MYSQL_SERVER-START", "OOZIE_SERVER-START", "PIG-START", "SQOOP-START",
-        "WEBHCAT_SERVER-START", "FLUME_HANDLER-START"],
     "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
     "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START", "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK"],
     "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
@@ -44,8 +36,6 @@
     "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START"],
     "HIVE_SERVER-START": ["DATANODE-START"],
     "WEBHCAT_SERVER-START": ["DATANODE-START"],
-    "NAGIOS_SERVER-START": ["NAMENODE-START", "SECONDARY_NAMENODE-START",
-        "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START", "HISTORYSERVER-START"],
     "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START",
         "SECONDARY_NAMENODE-START"],
     "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START",
@@ -61,12 +51,10 @@
   "_comment" : "Dependencies that are used in HA NameNode cluster",
   "namenode_optional_ha": {
     "NAMENODE-START": ["ZKFC-START", "JOURNALNODE-START", "ZOOKEEPER_SERVER-START"],
-    "ZKFC-START": ["ZOOKEEPER_SERVER-START"],
-    "NAGIOS_SERVER-START": ["ZKFC-START", "JOURNALNODE-START"]
+    "ZKFC-START": ["ZOOKEEPER_SERVER-START"]
   },
   "_comment" : "Dependencies that are used in ResourceManager HA cluster",
   "resourcemanager_optional_ha" : {
     "RESOURCEMANAGER-START": ["ZOOKEEPER_SERVER-START"]
   }
 }
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/gmondLib.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/gmondLib.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/gmondLib.sh
index e7ea83f..d06afd8 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/gmondLib.sh
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/gmondLib.sh
@@ -160,7 +160,6 @@ host {
  *
  * At the very least, every gmond must expose its XML state to 
  * queriers from localhost.
- * Also we use this port for Nagios monitoring
  */
 tcp_accept_channel {
   bind = 0.0.0.0

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
index 6d35468..49ac408 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
@@ -96,7 +96,6 @@ kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/
 hostname = config["hostname"]
 rm_host = default("/clusterHostInfo/rm_host", [])
 slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
 oozie_servers = default("/clusterHostInfo/oozie_server", [])
 hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
 hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
@@ -117,7 +116,6 @@ has_resourcemanager = not len(rm_host) == 0
 has_histroryserver = not len(hs_host) == 0
 has_hbase_masters = not len(hbase_master_hosts) == 0
 has_slaves = not len(slave_hosts) == 0
-has_nagios = not len(hagios_server_hosts) == 0
 has_oozie_server = not len(oozie_servers)  == 0
 has_hcat_server_host = not len(hcat_server_hosts)  == 0
 has_hive_server_host = not len(hive_server_host)  == 0
@@ -139,7 +137,6 @@ if has_ganglia_server:
 #users and groups
 yarn_user = config['configurations']['yarn-env']['yarn_user']
 hbase_user = config['configurations']['hbase-env']['hbase_user']
-nagios_user = config['configurations']['nagios-env']['nagios_user']
 oozie_user = config['configurations']['oozie-env']['oozie_user']
 webhcat_user = config['configurations']['hive-env']['hcat_user']
 hcat_user = config['configurations']['hive-env']['hcat_user']
@@ -150,7 +147,6 @@ hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_nam
 
 user_group = config['configurations']['cluster-env']['user_group']
 proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
-nagios_group = config['configurations']['nagios-env']['nagios_group']
 
 #hadoop params
 hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/configuration/nagios-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/configuration/nagios-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/configuration/nagios-env.xml
deleted file mode 100644
index fad8374..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/configuration/nagios-env.xml
+++ /dev/null
@@ -1,53 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>nagios_user</name>
-    <value>nagios</value>
-    <property-type>USER</property-type>
-    <description>Nagios Username.</description>
-  </property>
-  <property>
-    <name>nagios_group</name>
-    <value>nagios</value>
-    <property-type>GROUP</property-type>
-    <description>Nagios Group.</description>
-  </property>
-  <property>
-    <name>nagios_web_login</name>
-    <value>nagiosadmin</value>
-    <description>Nagios web user.</description>
-  </property>
-  <property require-input = "true">
-    <name>nagios_web_password</name>
-    <value></value>
-    <property-type>PASSWORD</property-type>
-    <description>Nagios Admin Password.</description>
-  </property>
-  <property require-input = "true">
-    <name>nagios_contact</name>
-    <value></value>
-    <description>Hadoop Admin Email.</description>
-  </property>
-
-</configuration>


[13/17] ambari git commit: AMBARI-8276 - Alerts: Remove Nagios Service From The Stack (jonathanhurley)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_cpu.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_cpu.php b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_cpu.php
deleted file mode 100755
index 0744e38..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_cpu.php
+++ /dev/null
@@ -1,109 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:k:r:t:u:e");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $hosts=$options['h'];
-  $port=$options['p'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['u'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-
-  foreach (preg_split('/,/', $hosts) as $host) {
-    /* Get the json document */
-
-    $ch = curl_init();
-    $username = rtrim(`id -un`, "\n");
-    curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=java.lang:type=OperatingSystem",
-                                  CURLOPT_RETURNTRANSFER => true,
-                                  CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                  CURLOPT_USERPWD => "$username:",
-                                  CURLOPT_SSL_VERIFYPEER => FALSE ));
-    $json_string = curl_exec($ch);
-    $info = curl_getinfo($ch);
-    if (intval($info['http_code']) == 401){
-      logout();
-      $json_string = curl_exec($ch);
-    }
-    $info = curl_getinfo($ch);
-    curl_close($ch);
-    $json_array = json_decode($json_string, true);
-
-    $object = $json_array['beans'][0];
-
-    if (count($object) == 0) {
-      echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-      exit(2);
-    }
-
-    $cpu_load = $object['SystemCpuLoad'];
-
-    if (!isset($object['SystemCpuLoad']) || $cpu_load < 0.0) {
-      echo "WARNING: Data unavailable, SystemCpuLoad is not set\n";
-      exit(1);
-    }
-
-    $cpu_count = $object['AvailableProcessors'];
-
-    $cpu_percent = $cpu_load*100;
-  }
-
-  $out_msg = $cpu_count . " CPU, load " . number_format($cpu_percent, 1, '.', '') . '%';
-
-  if ($cpu_percent > $crit) {
-    echo $out_msg . ' > ' . $crit . "% : CRITICAL\n";
-    exit(2);
-  }
-  if ($cpu_percent > $warn) {
-    echo $out_msg . ' > ' . $warn . "% : WARNING\n";
-    exit(1);
-  }
-
-  echo $out_msg . ' < ' . $warn . "% : OK\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -k keytab_path -r principal_name -t kinit_path -u security_enabled -e ssl_enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_cpu.pl
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_cpu.pl b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_cpu.pl
deleted file mode 100644
index a5680f7..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_cpu.pl
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/usr/bin/perl -w 
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-use strict;
-use Net::SNMP;
-use Getopt::Long;
-
-# Variable
-my $base_proc = "1.3.6.1.2.1.25.3.3.1";   
-my $proc_load = "1.3.6.1.2.1.25.3.3.1.2"; 
-my $o_host = 	undef;
-my $o_community = undef;
-my $o_warn=	undef;
-my $o_crit=	undef;
-my $o_timeout = 15;
-my $o_port = 161;
-
-sub Usage {
-    print "Usage: $0 -H <host> -C <snmp_community> -w <warn level> -c <crit level>\n";
-}
-
-Getopt::Long::Configure ("bundling");
-GetOptions(
-  'H:s'   => \$o_host,	
-  'C:s'   => \$o_community,	
-  'c:s'   => \$o_crit,        
-  'w:s'   => \$o_warn
-          );
-if (!defined $o_host || !defined $o_community || !defined $o_crit || !defined $o_warn) {
-  Usage();
-  exit 3;
-}
-$o_warn =~ s/\%//g; 
-$o_crit =~ s/\%//g;
-alarm ($o_timeout);
-$SIG{'ALRM'} = sub {
- print "Unable to contact host: $o_host\n";
- exit 3;
-};
-
-# Connect to host
-my ($session,$error);
-($session, $error) = Net::SNMP->session(
-		-hostname  => $o_host,
-		-community => $o_community,
-		-port      => $o_port,
-		-timeout   => $o_timeout
-	  );
-if (!defined($session)) {
-   printf("Error opening session: %s.\n", $error);
-   exit 3;
-}
-
-my $exit_val=undef;
-my $resultat =  (Net::SNMP->VERSION < 4) ?
-	  $session->get_table($base_proc)
-	: $session->get_table(Baseoid => $base_proc);
-
-if (!defined($resultat)) {
-   printf("ERROR: Description table : %s.\n", $session->error);
-   $session->close;
-   exit 3;
-}
-
-$session->close;
-
-my ($cpu_used,$ncpu)=(0,0);
-foreach my $key ( keys %$resultat) {
-  if ($key =~ /$proc_load/) {
-    $cpu_used += $$resultat{$key};
-    $ncpu++;
-  }
-}
-
-if ($ncpu==0) {
-  print "Can't find CPU usage information : UNKNOWN\n";
-  exit 3;
-}
-
-$cpu_used /= $ncpu;
-
-print "$ncpu CPU, ", $ncpu==1 ? "load" : "average load";
-printf(" %.1f%%",$cpu_used);
-$exit_val=0;
-
-if ($cpu_used > $o_crit) {
- print " > $o_crit% : CRITICAL\n";
- $exit_val=2;
-} else {
-  if ($cpu_used > $o_warn) {
-   print " > $o_warn% : WARNING\n";
-   $exit_val=1;
-  }
-}
-print " < $o_warn% : OK\n" if ($exit_val eq 0);
-exit $exit_val;

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_cpu_ha.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_cpu_ha.php b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_cpu_ha.php
deleted file mode 100644
index 91a7c64..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_cpu_ha.php
+++ /dev/null
@@ -1,116 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:k:r:t:u:e");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $hosts=$options['h'];
-  $port=$options['p'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['u'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-  $jmx_response_available = false;
-  $jmx_response;
-
-  foreach (preg_split('/,/', $hosts) as $host) {
-    /* Get the json document */
-
-    $ch = curl_init();
-    $username = rtrim(`id -un`, "\n");
-    curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=java.lang:type=OperatingSystem",
-                                  CURLOPT_RETURNTRANSFER => true,
-                                  CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                  CURLOPT_USERPWD => "$username:",
-                                  CURLOPT_SSL_VERIFYPEER => FALSE ));
-    $json_string = curl_exec($ch);
-    $info = curl_getinfo($ch);
-    if (intval($info['http_code']) == 401){
-      logout();
-      $json_string = curl_exec($ch);
-    }
-    $info = curl_getinfo($ch);
-    curl_close($ch);
-    $json_array = json_decode($json_string, true);
-
-    $object = $json_array['beans'][0];
-
-    if (count($object) > 0) {
-      $jmx_response_available = true;
-      $jmx_response = $object;
-    }
-  }
-
-  if ($jmx_response_available === false) {
-    echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-    exit(2);
-  }
-
-  $cpu_load = $jmx_response['SystemCpuLoad'];
-
-  if (!isset($jmx_response['SystemCpuLoad']) || $cpu_load < 0.0) {
-    echo "WARNING: Data unavailable, SystemCpuLoad is not set\n";
-    exit(1);
-  }
-
-  $cpu_count = $jmx_response['AvailableProcessors'];
-
-  $cpu_percent = $cpu_load*100;
-
-  $out_msg = $cpu_count . " CPU, load " . number_format($cpu_percent, 1, '.', '') . '%';
-
-  if ($cpu_percent > $crit) {
-    echo $out_msg . ' > ' . $crit . "% : CRITICAL\n";
-    exit(2);
-  }
-  if ($cpu_percent > $warn) {
-    echo $out_msg . ' > ' . $warn . "% : WARNING\n";
-    exit(1);
-  }
-
-  echo $out_msg . ' < ' . $warn . "% : OK\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -k keytab_path -r principal_name -t kinit_path -u security_enabled -e ssl_enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_datanode_storage.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_datanode_storage.php b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_datanode_storage.php
deleted file mode 100644
index dee22b4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_datanode_storage.php
+++ /dev/null
@@ -1,100 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * check the storage capacity remaining on local datanode storage
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:e:k:r:t:s:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options) 
-      || !array_key_exists('c', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['s'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-  /* Get the json document */
-  $ch = curl_init();
-  $username = rtrim(`id -un`, "\n");
-  curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=DataNode,name=FSDatasetState-*",
-                                CURLOPT_RETURNTRANSFER => true,
-                                CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                CURLOPT_USERPWD => "$username:",
-                                CURLOPT_SSL_VERIFYPEER => FALSE ));
-  $json_string = curl_exec($ch);
-  $info = curl_getinfo($ch);
-  if (intval($info['http_code']) == 401){
-    logout();
-    $json_string = curl_exec($ch);
-  }
-  $info = curl_getinfo($ch);
-  curl_close($ch);
-  $json_array = json_decode($json_string, true);
-  $object = $json_array['beans'][0];
-  $cap_remain = $object['Remaining']; /* Total capacity - any extenal files created in data directories by non-hadoop app */
-  $cap_total = $object['Capacity']; /* Capacity used by all data partitions minus space reserved for M/R */
-  if (count($object) == 0) {
-    echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-    exit(2);
-  }  
-  $percent_full = ($cap_total - $cap_remain)/$cap_total * 100;
-
-  $out_msg = "Capacity:[" . $cap_total . 
-             "], Remaining Capacity:[" . $cap_remain . 
-             "], percent_full:[" . $percent_full  . "]";
-  
-  if ($percent_full > $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($percent_full > $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_hdfs_blocks.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_hdfs_blocks.php b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_hdfs_blocks.php
deleted file mode 100644
index ca52ccc..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_hdfs_blocks.php
+++ /dev/null
@@ -1,102 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * check the corrupt or missing blocks % is > threshod
- * check_jmx -H hostaddress -p port -w 1% -c 1%
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:s:e:k:r:t:u:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('s', $options)) {
-    usage();
-    exit(3);
-  }
-  $hosts=$options['h'];
-  $port=$options['p'];
-  $nn_jmx_property=$options['s'];
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['u'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-
-  foreach (preg_split('/,/', $hosts) as $host) {
-    /* Get the json document */
-
-    $ch = curl_init();
-    $username = rtrim(`id -un`, "\n");
-    curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=".$nn_jmx_property,
-                                  CURLOPT_RETURNTRANSFER => true,
-                                  CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                  CURLOPT_USERPWD => "$username:",
-                                  CURLOPT_SSL_VERIFYPEER => FALSE ));
-    $json_string = curl_exec($ch);
-    $info = curl_getinfo($ch);
-    if (intval($info['http_code']) == 401){
-      logout();
-      $json_string = curl_exec($ch);
-    }
-    $info = curl_getinfo($ch);
-    curl_close($ch);
-    $json_array = json_decode($json_string, true);
-    $m_percent = 0;
-    $object = $json_array['beans'][0];
-    $missing_blocks = $object['MissingBlocks'];
-    $total_blocks = $object['BlocksTotal'];
-    if (count($object) == 0) {
-      echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-      exit(2);
-    }    
-    if($total_blocks == 0) {
-      $m_percent = 0;
-    } else {
-      $m_percent = ($missing_blocks/$total_blocks)*100;
-      break;
-    }
-  }
-  $out_msg = "missing_blocks:<" . $missing_blocks .
-             ">, total_blocks:<" . $total_blocks . ">";
-
-  if ($m_percent > 0) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -s <namenode bean name> -k keytab path -r principal name -t kinit path -u security enabled -e ssl enabled\n";
-  }
-?>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_hdfs_capacity.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_hdfs_capacity.php b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_hdfs_capacity.php
deleted file mode 100644
index af72723..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_hdfs_capacity.php
+++ /dev/null
@@ -1,109 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * check the % HDFS capacity used >= warn and critical limits.
- * check_jmx -H hostaddress -p port -w 1 -c 1
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:e:k:r:t:s:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $hosts=$options['h'];
-  $port=$options['p'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['s'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-
-  foreach (preg_split('/,/', $hosts) as $host) {
-    /* Get the json document */
-    $ch = curl_init();
-    $username = rtrim(`id -un`, "\n");
-    curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=FSNamesystemState",
-                                  CURLOPT_RETURNTRANSFER => true,
-                                  CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                  CURLOPT_USERPWD => "$username:",
-                                  CURLOPT_SSL_VERIFYPEER => FALSE ));
-    $json_string = curl_exec($ch);
-    $info = curl_getinfo($ch);
-    if (intval($info['http_code']) == 401){
-      logout();
-      $json_string = curl_exec($ch);
-    }
-    $info = curl_getinfo($ch);
-    curl_close($ch);
-    $json_array = json_decode($json_string, true);
-    $percent = 0;
-    $object = $json_array['beans'][0];
-    $CapacityUsed = $object['CapacityUsed'];
-    $CapacityRemaining = $object['CapacityRemaining'];
-    if (count($object) == 0) {
-      echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-      exit(2);
-    }    
-    $CapacityTotal = $CapacityUsed + $CapacityRemaining;
-    if($CapacityTotal == 0) {
-      $percent = 0;
-    } else {
-      $percent = ($CapacityUsed/$CapacityTotal)*100;
-      break;
-    }
-  }
-  $out_msg = "DFSUsedGB:<" . round ($CapacityUsed/(1024*1024*1024),1) .
-             ">, DFSTotalGB:<" . round($CapacityTotal/(1024*1024*1024),1) . ">";
-
-  if ($percent >= $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($percent >= $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_hive_metastore_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_hive_metastore_status.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_hive_metastore_status.sh
deleted file mode 100644
index 640c077..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_hive_metastore_status.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#The uri is of the form thrift://<hostname>:<port>
-HOST=$1
-PORT=$2
-JAVA_HOME=$3
-SEC_ENABLED=$4
-if [[ "$SEC_ENABLED" == "true" ]]; then
-  NAGIOS_KEYTAB=$5
-  NAGIOS_USER=$6
-  KINIT_PATH=$7
-  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
-  if [[ "$?" -ne 0 ]]; then
-    echo "CRITICAL: Error doing kinit for nagios [$out1]";
-    exit 2;
-  fi
-fi
-HCAT_URL=-Dhive.metastore.uris="thrift://$HOST:$PORT"
-export JAVA_HOME=$JAVA_HOME
-out=`hcat $HCAT_URL -e "show databases" 2>&1`
-if [[ "$?" -ne 0 ]]; then
-  echo "CRITICAL: Error accessing Hive Metastore status [$out]";
-  exit 2;
-fi
-echo "OK: Hive Metastore status OK";
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_hive_thrift_port.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_hive_thrift_port.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_hive_thrift_port.py
deleted file mode 100644
index c9414f7..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_hive_thrift_port.py
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/usr/bin/env python
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-import os
-import optparse
-import json
-import traceback
-from resource_management import *
-from time import time
-
-
-OK_MESSAGE = "TCP OK - %.3f second response time on port %s"
-CRITICAL_MESSAGE = "Connection to %s on port %s failed"
-
-def main():
-
-  parser = optparse.OptionParser()
-
-  parser.add_option("-H", "--host", dest="address", help="Hive thrift host")
-  parser.add_option("-p", "--port", type="int", dest="port", help="Hive thrift port")
-  parser.add_option("--security-enabled", action="store_true", dest="security_enabled")
-
-  (options, args) = parser.parse_args()
-
-  if options.address is None:
-    print "Specify hive thrift host (--host or -H)"
-    exit(-1)
-
-  if options.port is None:
-    print "Specify hive thrift port (--port or -p)"
-    exit(-1)
-
-  if options.security_enabled:
-    security_enabled = options.security_enabled
-  else:
-    security_enabled = False
-
-  address = options.address
-  port = options.port
-
-  starttime = time()
-  if check_thrift_port_sasl(address, port, security_enabled=security_enabled):
-    timetaken = time() - starttime
-    print OK_MESSAGE % (timetaken, port)
-    exit(0)
-  else:
-    print CRITICAL_MESSAGE % (address, port)
-    exit(2)
-
-
-if __name__ == "__main__":
-  main()
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_hue_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_hue_status.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_hue_status.sh
deleted file mode 100644
index 076d9b3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_hue_status.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-status=`/etc/init.d/hue status 2>&1`
-
-if [[ "$?" -ne 0 ]]; then
-	echo "WARNING: Hue is stopped";
-	exit 1;
-fi
-
-echo "OK: Hue is running";
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_mapred_local_dir_used.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_mapred_local_dir_used.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_mapred_local_dir_used.sh
deleted file mode 100644
index 15c85eb..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_mapred_local_dir_used.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-MAPRED_LOCAL_DIRS=$1
-CRITICAL=`echo $2 | cut -d % -f 1`
-IFS=","
-for mapred_dir in $MAPRED_LOCAL_DIRS
-do
-  percent=`df -hl $mapred_dir | awk '{percent=$5;} END{print percent}' | cut -d % -f 1`
-  if [ $percent -ge $CRITICAL ]; then
-    echo "CRITICAL: MapReduce local dir is full."
-    exit 2
-  fi
-done
-echo "OK: MapReduce local dir space is available."
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_name_dir_status.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_name_dir_status.php b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_name_dir_status.php
deleted file mode 100644
index 186166d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_name_dir_status.php
+++ /dev/null
@@ -1,93 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to namenode, get the jmx-json document
- * check the NameDirStatuses to find any offline (failed) directories
- * check_jmx -H hostaddress -p port -k keytab path -r principal name -t kinit path -s security enabled
- */
- 
-  include "hdp_nagios_init.php";
-
-  $options = getopt("h:p:e:k:r:t:s:");
-  //Check only for mandatory options
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['s'];
-  $ssl_enabled=$options['e'];
-  
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-  /* Get the json document */
-  $ch = curl_init();
-  $username = rtrim(`id -un`, "\n");
-  curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo",
-                                CURLOPT_RETURNTRANSFER => true,
-                                CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                CURLOPT_USERPWD => "$username:",
-                                CURLOPT_SSL_VERIFYPEER => FALSE ));
-  $json_string = curl_exec($ch);
-  $info = curl_getinfo($ch);
-  if (intval($info['http_code']) == 401){
-    logout();
-    $json_string = curl_exec($ch);
-  }
-  $info = curl_getinfo($ch);
-  curl_close($ch);
-  $json_array = json_decode($json_string, true);
-  $object = $json_array['beans'][0];
-  if ($object['NameDirStatuses'] == "") {
-    echo "WARNING: NameNode directory status not available via ".$protocol."://".$host.":".$port."/jmx url, code " . $info['http_code'] ."\n";
-    exit(1);
-  }
-  $NameDirStatuses = json_decode($object['NameDirStatuses'], true);
-  $failed_dir_count = count($NameDirStatuses['failed']);
-  $out_msg = "CRITICAL: Offline NameNode directories: ";
-  if ($failed_dir_count > 0) {
-    foreach ($NameDirStatuses['failed'] as $key => $value) {
-      $out_msg = $out_msg . $key . ":" . $value . ", ";
-    }
-    echo $out_msg . "\n";
-    exit (2);
-  }
-  echo "OK: All NameNode directories are active" . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled";
-  }
-?>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_namenodes_ha.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_namenodes_ha.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_namenodes_ha.sh
deleted file mode 100644
index 83c1aca..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_namenodes_ha.sh
+++ /dev/null
@@ -1,83 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-IFS=',' read -a namenodes <<< "$1"
-port=$2
-totalNN=${#namenodes[@]}
-activeNN=()
-standbyNN=()
-unavailableNN=()
-
-for nn in "${namenodes[@]}"
-do
-  export no_proxy=$nn
-  status=$(curl -m 5 -s http://$nn:$port/jmx?qry=Hadoop:service=NameNode,name=FSNamesystem | grep -i "tag.HAState" | grep -o -E "standby|active")
-  if [ "$status" == "active" ]; then
-    activeNN[${#activeNN[*]}]="$nn"
-  elif [ "$status" == "standby" ]; then
-    standbyNN[${#standbyNN[*]}]="$nn"
-  elif [ "$status" == "" ]; then
-    unavailableNN[${#unavailableNN[*]}]="$nn"
-  fi
-done
-
-message=""
-critical=false
-
-if [ ${#activeNN[@]} -gt 1 ]; then
-  critical=true
-  message=$message" Only one NN can have HAState=active;"
-elif [ ${#activeNN[@]} == 0 ]; then
-  critical=true
-  message=$message" No Active NN available;"
-elif [ ${#standbyNN[@]} == 0 ]; then
-  critical=true
-  message=$message" No Standby NN available;"
-fi
-
-NNstats=" Active<"
-for nn in "${activeNN[@]}"
-do
-  NNstats="$NNstats$nn;"
-done
-NNstats=${NNstats%\;}
-NNstats=$NNstats">, Standby<"
-for nn in "${standbyNN[@]}"
-do
-  NNstats="$NNstats$nn;"
-done
-NNstats=${NNstats%\;}
-NNstats=$NNstats">, Unavailable<"
-for nn in "${unavailableNN[@]}"
-do
-  NNstats="$NNstats$nn;"
-done
-NNstats=${NNstats%\;}
-NNstats=$NNstats">"
-
-if [ $critical == false ]; then
-  echo "OK: NameNode HA healthy;"$NNstats
-  exit 0
-fi
-
-echo "CRITICAL:"$message$NNstats
-exit 2

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_nodemanager_health.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_nodemanager_health.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_nodemanager_health.sh
deleted file mode 100644
index eedcd62..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_nodemanager_health.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-HOST=$1
-PORT=$2
-NODEMANAGER_URL="http://$HOST:$PORT/ws/v1/node/info"
-SEC_ENABLED=$3
-export PATH="/usr/bin:$PATH"
-if [[ "$SEC_ENABLED" == "true" ]]; then
-  NAGIOS_KEYTAB=$4
-  NAGIOS_USER=$5
-  KINIT_PATH=$6
-  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
-  if [[ "$?" -ne 0 ]]; then
-    echo "CRITICAL: Error doing kinit for nagios [$out1]";
-    exit 2;
-  fi
-fi
-
-export no_proxy=$HOST
-RESPONSE=`curl --negotiate -u : -s $NODEMANAGER_URL`
-if [[ "$RESPONSE" == *'"nodeHealthy":true'* ]]; then 
-  echo "OK: NodeManager healthy";
-  exit 0;
-fi
-echo "CRITICAL: NodeManager unhealthy";
-exit 2;

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_oozie_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_oozie_status.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_oozie_status.sh
deleted file mode 100644
index 820ee99..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_oozie_status.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-# OOZIE_URL is of the form http://<hostname>:<port>/oozie
-HOST=`echo $1 | tr '[:upper:]' '[:lower:]'`
-PORT=$2
-JAVA_HOME=$3
-SEC_ENABLED=$4
-if [[ "$SEC_ENABLED" == "true" ]]; then
-  NAGIOS_KEYTAB=$5
-  NAGIOS_USER=$6
-  KINIT_PATH=$7
-  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
-  if [[ "$?" -ne 0 ]]; then
-    echo "CRITICAL: Error doing kinit for nagios [$out1]";
-    exit 2;
-  fi
-fi
-OOZIE_URL="http://$HOST:$PORT/oozie"
-export JAVA_HOME=$JAVA_HOME
-out=`oozie admin -oozie ${OOZIE_URL} -status 2>&1`
-if [[ "$?" -ne 0 ]]; then 
-  echo "CRITICAL: Error accessing Oozie Server status [$out]";
-  exit 2;
-fi
-echo "OK: Oozie Server status [$out]";
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_rpcq_latency.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_rpcq_latency.php b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_rpcq_latency.php
deleted file mode 100644
index 463f69b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_rpcq_latency.php
+++ /dev/null
@@ -1,104 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * It checks the rpc wait time in the queue, RpcQueueTime_avg_time
- * check_rpcq_latency -h hostaddress -p port -t ServiceName -w 1 -c 1
- * Warning and Critical values are in seconds
- * Service Name = JobTracker, NameNode, JobHistoryServer
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:n:e:k:r:t:s:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options) || !array_key_exists('n', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-  $master=$options['n'];
-  $warn=$options['w'];
-  $crit=$options['c'];
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['s'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-
-  /* Get the json document */
-  $ch = curl_init();
-  $username = rtrim(`id -un`, "\n");
-  curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=".$master.",name=RpcActivityForPort*",
-                                CURLOPT_RETURNTRANSFER => true,
-                                CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                CURLOPT_USERPWD => "$username:",
-                                CURLOPT_SSL_VERIFYPEER => FALSE ));
-  $json_string = curl_exec($ch);
-  $info = curl_getinfo($ch);
-  if (intval($info['http_code']) == 401){
-    logout();
-    $json_string = curl_exec($ch);
-  }
-  $info = curl_getinfo($ch);
-  curl_close($ch);
-  $json_array = json_decode($json_string, true);
-  $object = $json_array['beans'][0];
-  if (count($object) == 0) {
-    echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-    exit(2);
-  } 
-  $RpcQueueTime_avg_time = round($object['RpcQueueTime_avg_time'], 2); 
-  $RpcProcessingTime_avg_time = round($object['RpcProcessingTime_avg_time'], 2);
-
-  $out_msg = "RpcQueueTime_avg_time:<" . $RpcQueueTime_avg_time .
-             "> Secs, RpcProcessingTime_avg_time:<" . $RpcProcessingTime_avg_time .
-             "> Secs";
-
-  if ($RpcQueueTime_avg_time >= $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($RpcQueueTime_avg_time >= $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -n <JobTracker/NameNode/JobHistoryServer> -w <warn_in_sec> -c <crit_in_sec> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_templeton_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_templeton_status.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_templeton_status.sh
deleted file mode 100644
index 3e2ba0f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_templeton_status.sh
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-# out='{"status":"ok","version":"v1"}<status_code:200>'
-HOST=$1
-PORT=$2
-VERSION=$3
-SEC_ENABLED=$4
-if [[ "$SEC_ENABLED" == "true" ]]; then 
-  NAGIOS_KEYTAB=$5
-  NAGIOS_USER=$6
-  KINIT_PATH=$7
-  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
-  if [[ "$?" -ne 0 ]]; then
-    echo "CRITICAL: Error doing kinit for nagios [$out1]";
-    exit 2;
-  fi
-fi
-regex="^.*\"status\":\"ok\".*<status_code:200>$"
-export no_proxy=$HOST
-out=`curl --negotiate -u : -s -w '<status_code:%{http_code}>' http://$HOST:$PORT/templeton/$VERSION/status 2>&1`
-if [[ $out =~ $regex ]]; then
-  out=`echo "$out" | sed -e 's/{/[/g' | sed -e 's/}/]/g'` 
-  echo "OK: WebHCat Server status [$out]";
-  exit 0;
-fi
-echo "CRITICAL: Error accessing WebHCat Server, status [$out]";
-exit 2;

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_webui.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_webui.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_webui.sh
deleted file mode 100644
index f1f6641..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_webui.sh
+++ /dev/null
@@ -1,89 +0,0 @@
-#!/bin/bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-service=$1
-host=$2
-port=$3
-
-checkurl () {
-  url=$1
-  export no_proxy=$host
-  curl $url -k -o /dev/null
-  echo $?
-}
-
-if [[ -z "$service" || -z "$host" ]]; then
-  echo "UNKNOWN: Invalid arguments; Usage: check_webui.sh service_name host_name";
-  exit 3;
-fi
-
-case "$service" in
-
-jobtracker) 
-    jtweburl="http://$host:$port"
-    if [[ `checkurl "$jtweburl"` -ne 0 ]]; then 
-      echo "WARNING: Jobtracker web UI not accessible : $jtweburl";
-      exit 1;
-    fi
-    ;;
-namenode)
-    nnweburl="http://$host:$port"
-    if [[ `checkurl "$nnweburl"` -ne 0 ]] ; then 
-      echo "WARNING: NameNode Web UI not accessible : $nnweburl";
-      exit 1;
-    fi
-    ;;
-jobhistory)
-    jhweburl="http://$host:$port/jobhistoryhome.jsp"
-    if [[ `checkurl "$jhweburl"` -ne 0 ]]; then 
-      echo "WARNING: HistoryServer Web UI not accessible : $jhweburl";
-      exit 1;
-    fi
-    ;;
-hbase)
-    hbaseweburl="http://$host:$port/master-status"
-    if [[ `checkurl "$hbaseweburl"` -ne 0 ]]; then 
-      echo "WARNING: HBase Master Web UI not accessible : $hbaseweburl";
-      exit 1;
-    fi
-    ;;
-resourcemanager)
-    rmweburl="http://$host:$port/cluster"
-    if [[ `checkurl "$rmweburl"` -ne 0 ]]; then 
-      echo "WARNING: ResourceManager Web UI not accessible : $rmweburl";
-      exit 1;
-    fi
-    ;;
-historyserver2)
-    hsweburl="http://$host:$port/jobhistory"
-    if [[ `checkurl "$hsweburl"` -ne 0 ]]; then 
-      echo "WARNING: HistoryServer Web UI not accessible : $hsweburl";
-      exit 1;
-    fi
-    ;;
-*) echo "UNKNOWN: Invalid service name [$service], valid options [jobtracker|jobhistory|hbase|namenode|resourcemanager|historyserver2]"
-   exit 3
-   ;;
-esac
-
-echo "OK: Successfully accessed $service Web UI"
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/hdp_nagios_init.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/hdp_nagios_init.php b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/hdp_nagios_init.php
deleted file mode 100644
index 487eb43..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/hdp_nagios_init.php
+++ /dev/null
@@ -1,81 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* Common functions called from other alerts
- *
- */
- 
- /*
- * Function for kinit. Checks if security enabled and klist for this principal doesn't returns nothing,
- * make kinit call in this case.
- */
-  function kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name) {
-    if($security_enabled === 'true') {
-    
-      $is_logined = is_logined($principal_name);
-      
-      if (!$is_logined)
-        $status = kinit($kinit_path_local, $keytab_path, $principal_name);
-      else
-        $status = array(0, '');
-    } else {
-      $status = array(0, '');
-    }
-  
-    return $status;
-  }
-  
-  
-  /*
-  * Checks if user is logined on kerberos
-  */
-  function is_logined($principal_name) {
-    $check_cmd = "klist|grep $principal_name 1> /dev/null 2>/dev/null ; [[ $? != 0 ]] && echo 1";
-    $check_output =  shell_exec($check_cmd);
-    
-    if ($check_output)
-      return false;
-    else
-      return true;
-  }
-
-  /*
-  * Runs kinit command.
-  */
-  function kinit($kinit_path_local, $keytab_path, $principal_name) {
-    $init_cmd = "$kinit_path_local -kt $keytab_path $principal_name 2>&1";
-    $kinit_output = shell_exec($init_cmd);
-    if ($kinit_output) 
-      $status = array(1, $kinit_output);
-    else
-      $status = array(0, '');
-      
-    return $status;
-  }
-
-  function logout() {
-    if (shell_exec("rm -f /tmp/krb5cc_".trim(shell_exec('id -u'))) == "" ) 
-      $status = true;
-    else
-      $status = false;
-      
-    return $status;
-  }
- 
- ?>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/mm_wrapper.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/mm_wrapper.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/mm_wrapper.py
deleted file mode 100644
index bd7a94f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/mm_wrapper.py
+++ /dev/null
@@ -1,334 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-import sys
-import subprocess
-import os
-
-N_SGN = 'NAGIOS_SERVICEGROUPNAME'
-N_SD = 'NAGIOS__SERVICEHOST_COMPONENT'
-N_HOST = 'NAGIOS_HOSTNAME'
-
-LIST_SEPARATOR = "--"
-HOSTNAME_PLACEHOLDER = "^^"
-IGNORE_DAT_FILE = "/var/nagios/ignore.dat"
-
-# Mode constants
-OR = 0
-AND = 1
-ENV_ONLY = 2
-FILTER_MM = 3
-LEGACY_CHECK_WRAPPER = 4
-MODES = ['or', 'and', 'env_only', 'filter_mm', 'legacy_check_wrapper']
-
-
-def ignored_host_list(service, component):
-  """
-  :param service: current service
-  :param component: current component
-  :return: all hosts where specified host component is in ignored state
-  """
-  def str_norm(s):
-    return s.strip().upper()
-
-  result = []
-
-  try:
-    with open(IGNORE_DAT_FILE, 'r') as f:
-      lines = filter(None, f.read().split(os.linesep))
-  except IOError:
-    return result
-
-  if lines:
-    for l in lines:
-      tokens = l.split(' ')
-      if len(tokens) == 3 and str_norm(tokens[1]) == str_norm(service) \
-          and str_norm(tokens[2]) == str_norm(component):
-        result.append(tokens[0])
-  return result
-
-
-def get_real_service():
-  try:
-    service = os.environ[N_SGN].strip().upper()  # e.g. 'HBASE'
-  except KeyError:
-    service = ''
-  return service
-
-
-def get_real_component():
-  try:
-    comp_name = os.environ[N_SD].strip()
-  except KeyError:
-    comp_name = ''
-  mapping = {
-    'HBASEMASTER': 'HBASE_MASTER',
-    'REGIONSERVER': 'HBASE_REGIONSERVER',
-    'JOBHISTORY': 'MAPREDUCE2',
-    'HIVE-METASTORE': 'HIVE_METASTORE',
-    'HIVE-SERVER': 'HIVE_SERVER',
-    'FLUME': 'FLUME_HANDLER',
-    'HUE': 'HUE_SERVER',
-    'WEBHCAT': 'WEBHCAT_SERVER',
-  }
-  if comp_name in mapping:
-    comp_name = mapping.get(comp_name)
-  return comp_name
-
-
-def check_output(*popenargs, **kwargs):
-  """
-  Imitate subprocess.check_output() for python 2.6
-  """
-  process = subprocess.Popen(stdout=subprocess.PIPE, stderr=subprocess.PIPE,
-                             *popenargs, **kwargs)
-  output, unused_err = process.communicate()
-  retcode = process.poll()
-  if retcode:
-    cmd = kwargs.get("args")
-    if cmd is None:
-      cmd = popenargs[0]
-    err = subprocess.CalledProcessError(retcode, cmd)
-    # Monkey-patching for python 2.6
-    err.output = output
-    raise err
-  return output
-
-
-def print_usage():
-  """
-  Prints usage and exits with a non-zero exit code
-  """
-  print "Usage: mm_wrapper.py MODE HOST1 HOST2 .. HOSTN %s command arg1 arg2 .. argN" % LIST_SEPARATOR
-  print "MODE is one of the following: or, and, env_only, filter_mm, legacy_check_wrapper"
-  print "%s is a separator between list of hostnames and command with args" % LIST_SEPARATOR
-  print "%s is used as a hostname placeholder at command args" % HOSTNAME_PLACEHOLDER
-  print "Also script provides $MM_HOSTS shell variable to commands"
-  print "NOTE: Script makes use of Nagios-populated env vars %s and %s" % (N_SGN, N_SD)
-  print "For more info, please see docstrings at %s" % os.path.realpath(__file__)
-  sys.exit(1)
-
-
-def parse_args(args):
-  if not args or not LIST_SEPARATOR in args or args[0] not in MODES:
-    print_usage()
-  else:
-    mode = MODES.index(args[0])  # identify operation mode
-    args = args[1:]  # Shift args left
-    hostnames = []
-    command_line = []
-    # Parse command line args
-    passed_separator = False  # True if met LIST_SEPARATOR
-    for arg in args:
-      if not passed_separator:
-        if arg != LIST_SEPARATOR:
-          #check if was passed list of hosts instead of one
-          if ',' in arg:
-            hostnames += arg.split(',')
-          else:
-            hostnames.append(arg)
-        else:
-          passed_separator = True
-      else:
-        if arg != LIST_SEPARATOR:
-          command_line.append(arg)
-        else:  # Something definitely goes wrong
-          print "Could not parse arguments: " \
-                "There is more than one %s argument." % LIST_SEPARATOR
-          print_usage()
-
-    if not command_line:
-      print "No command provided."
-      print_usage()
-    return mode, hostnames, command_line
-
-
-def do_work(mode, hostnames, command_line):
-  # Execute commands
-  ignored_hosts = ignored_host_list(get_real_service(), get_real_component())
-  empty_check_result = {
-    'message': 'No checks have been run (no hostnames provided)',
-    'retcode': -1,
-    'real_retcode': None
-  }
-  custom_env = os.environ.copy()
-  if ignored_hosts:
-    custom_env['MM_HOSTS'] = \
-      reduce(lambda a, b: "%s %s" % (a, b), ignored_hosts)
-  if mode == OR:
-    check_result = work_in_or_mode(hostnames, ignored_hosts, command_line, custom_env, empty_check_result)
-  elif mode == AND:
-    check_result = work_in_and_mode(hostnames, ignored_hosts, command_line, custom_env, empty_check_result)
-  elif mode == ENV_ONLY:
-    check_result = work_in_env_only_mode(hostnames, command_line, custom_env)
-  elif mode == FILTER_MM:
-    check_result = work_in_filter_mm_mode(hostnames, ignored_hosts, command_line, custom_env, empty_check_result)
-  else:  # mode == LEGACY_CHECK_WRAPPER:
-    check_result = work_in_legacy_check_wrapper_mode(ignored_hosts, command_line, custom_env)
-  # Build the final output
-  final_output = []
-  output = check_result.get('message')
-  if output is not None:
-    for string in output.splitlines():
-      final_output.append(string.strip())
-  real_retcode = check_result.get('real_retcode')
-  if real_retcode:
-    # This string is used at check_aggregate.php when aggregating alerts
-    final_output.append("AMBARIPASSIVE=%s" % real_retcode)
-  return final_output, check_result.get('retcode')
-
-
-def work_in_or_mode(hostnames, ignored_hosts, command_line, custom_env, empty_check_result):
-  check_result = empty_check_result
-  for hostname in hostnames:
-    concrete_command_line = map(  # Substitute hostname where needed
-                                  lambda x: hostname if x == HOSTNAME_PLACEHOLDER else x,
-                                  command_line)
-    try:
-      returncode = 0
-      real_retcode = None
-      message = check_output(concrete_command_line, env=custom_env)
-    except subprocess.CalledProcessError, e:
-      if hostname not in ignored_hosts:
-        returncode = e.returncode
-      else:  # Host is in MM
-        real_retcode = e.returncode
-      message = e.output
-    really_positive_result = hostname not in ignored_hosts and returncode == 0
-    if check_result.get('retcode') <= returncode or really_positive_result:
-      check_result = {
-        'message': message,
-        'retcode': returncode,
-        'real_retcode': real_retcode  # Real (not suppressed) program retcode
-      }
-    if really_positive_result:
-      break  # Exit on first real success
-  return check_result
-
-
-def work_in_and_mode(hostnames, ignored_hosts, command_line, custom_env, empty_check_result):
-  check_result = empty_check_result
-  for hostname in hostnames:
-    concrete_command_line = map(  # Substitute hostname where needed
-                                  lambda x: hostname if x == HOSTNAME_PLACEHOLDER else x,
-                                  command_line)
-    try:
-      returncode = 0
-      real_retcode = None
-      message = check_output(concrete_command_line, env=custom_env)
-    except subprocess.CalledProcessError, e:
-      if hostname not in ignored_hosts:
-        returncode = e.returncode
-      else:
-        real_retcode = e.returncode
-      message = e.output
-    if check_result.get('retcode') <= returncode:
-      check_result = {
-        'message': message,
-        'retcode': returncode,
-        'real_retcode': real_retcode  # Real (not suppressed) program retcode
-      }
-  return check_result
-
-
-def work_in_env_only_mode(hostnames, command_line, custom_env):
-  concrete_command_line = []
-  for item in command_line:
-    if item == HOSTNAME_PLACEHOLDER:
-      concrete_command_line.extend(hostnames)
-    else:
-      concrete_command_line.append(item)
-  try:
-    returncode = 0
-    message = check_output(concrete_command_line, env=custom_env)
-  except subprocess.CalledProcessError, e:
-    returncode = e.returncode
-    message = e.output
-  check_result = {
-    'message': message,
-    'retcode': returncode,
-    'real_retcode': None  # Real (not suppressed) program retcode
-  }
-  return check_result
-
-
-def work_in_filter_mm_mode(hostnames, ignored_hosts, command_line, custom_env, empty_check_result):
-  not_mm_hosts = [hostname for hostname in hostnames if hostname not in ignored_hosts]
-  if not not_mm_hosts:  # All hosts have been filtered
-    return empty_check_result
-  else:
-    return work_in_env_only_mode(not_mm_hosts, command_line, custom_env)
-
-
-def work_in_legacy_check_wrapper_mode(ignored_hosts, command_line, custom_env):
-  host = os.environ[N_HOST]
-  result = work_in_env_only_mode([host], command_line, custom_env)
-  real_retcode = result['retcode']
-  if host in ignored_hosts and real_retcode != 0:  # Ignore fail
-    result['retcode'] = 0
-    result['real_retcode'] = real_retcode
-  return result
-
-
-def main():
-  """
-  This script allows to run nagios service check commands for host components
-  located at different hosts.
-  Also script passes to every command a $MM_HOSTS shell variable with a list of
-  hosts that are in MM
-
-  or mode: return 0 exit code if at least one service check succeeds.
-  Command exits on a first success.
-  Failures for host components that are in MM are suppressed (return code
-  is set to 0).
-  If command fails for all provided hostnames, script returns alert with the
-  greatest exit code value.
-
-  and mode:
-  Perform checks of all host components (effectively ignoring negative results
-  for MM components). If service check is successful for all hosts, script
-  also returns zero exit code. Otherwise alert with the greatest exit code is
-  returned.
-
-  env_only mode:
-  Pass list of all hosts to command and run it once. The only role of
-  mm_wrapper script in this mode is to provide properly initialized
-  $MM_HOSTS env variable to command being run. All duties of ignoring failures
-  of MM host components are delegated to a command being run.
-
-  filter_mm
-  Similar to env_only mode. The only difference is that hostnames for
-  host components that are in MM are filtered (not passed to command at all)
-
-  legacy_check_wrapper
-  Designed as a drop-in replacement for check_wrapper.sh . It reads $NAGIOS_HOSTNAME
-  env var and ignores check results if host component on this host is in MM.
-  When host subtitution symbol is encountered, hostname defined by $NAGIOS_HOSTNAME
-  is substituted,
-  """
-  args = sys.argv[1:]  # Shift args left
-  mode, hostnames, command_line = parse_args(args)
-  output, ret_code = do_work(mode, hostnames, command_line)
-  for line in output:
-    print line
-  sys.exit(ret_code)
-
-
-if __name__ == "__main__":
-  main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/functions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/functions.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/functions.py
deleted file mode 100644
index 7252f8f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/functions.py
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-from resource_management import *
-
-# Gets if the java version is greater than 6
-def is_jdk_greater_6(java64_home):
-  import os
-  import re
-  java_bin = os.path.join(java64_home, 'bin', 'java')
-  ver_check = shell.call([java_bin, '-version'])
-
-  ver = ''
-  if 0 != ver_check[0]:
-    # java is not local, try the home name as a fallback
-    ver = java64_home
-  else:
-    ver = ver_check[1]
-
-  regex = re.compile('"1\.([0-9]*)\.0_([0-9]*)"', re.IGNORECASE)
-  r = regex.search(ver)
-  if r:
-    strs = r.groups()
-    if 2 == len(strs):
-      minor = int(strs[0])
-      if minor > 6:
-        return True
-
-  return False

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/nagios.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/nagios.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/nagios.py
deleted file mode 100644
index 1f7a04c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/nagios.py
+++ /dev/null
@@ -1,94 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-from nagios_server_config import nagios_server_config
-
-def nagios():
-  import params
-
-  File( params.nagios_httpd_config_file,
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    content = Template("nagios.conf.j2"),
-    mode   = 0644
-  )
-  
-  Directory( params.conf_dir,
-    owner = params.nagios_user,
-    group = params.nagios_group
-  )
-
-  Directory( [params.plugins_dir, params.nagios_obj_dir])
-
-  Directory( params.nagios_pid_dir,
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    mode = 0755,
-    recursive = True
-  )
-
-  Directory( [params.nagios_var_dir, params.check_result_path, params.nagios_rw_dir],
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    recursive = True
-  )
-  
-  Directory( [params.nagios_log_dir, params.nagios_log_archives_dir],
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    mode = 0755
-  )
-
-  nagios_server_config()
-
-  set_web_permisssions()
-
-  File( format("{conf_dir}/command.cfg"),
-    owner = params.nagios_user,
-    group = params.nagios_group
-  )
-
-  File(format("{nagios_var_dir}/ignore.dat"),
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    mode = 0664)
-  
-  
-def set_web_permisssions():
-  import params
-
-  cmd = format("{htpasswd_cmd} -c -b  /etc/nagios/htpasswd.users {nagios_web_login} {nagios_web_password!p}")
-  Execute(cmd)
-
-  File( "/etc/nagios/htpasswd.users",
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    mode  = 0640
-  )
-
-  if System.get_instance().os_family == "suse":
-    command = format("usermod -G {nagios_group} wwwrun")
-  else:
-    command = format("usermod -a -G {nagios_group} apache")
-  
-  Execute( command)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/nagios_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/nagios_server.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/nagios_server.py
deleted file mode 100644
index 32b6cd9..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/nagios_server.py
+++ /dev/null
@@ -1,103 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-from nagios import nagios
-from nagios_service import nagios_service
-
-         
-class NagiosServer(Script):
-  def install(self, env):
-    remove_conflicting_packages()
-    self.install_packages(env)
-    self.configure(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    nagios()
-
-    
-  def start(self, env):
-    import params
-    env.set_params(params)
-
-    update_ignorable(params)
-
-    self.configure(env) # done for updating configs after Security enabled
-    nagios_service(action='start')
-
-    
-  def stop(self, env):
-    import params
-    env.set_params(params)
-    
-    nagios_service(action='stop')
-
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.nagios_pid_file)
-    
-def remove_conflicting_packages():  
-  Package( 'hdp_mon_nagios_addons',
-    action = "remove"
-  )
-
-  Package( 'nagios-plugins',
-    action = "remove"
-  )
-
-  Execute( "rpm -e --allmatches --nopostun nagios",
-    path    = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
-    ignore_failures = True 
-  )
-
-def update_ignorable(params):
-  if not params.config.has_key('passiveInfo'):
-    return
-  else:
-    buf = ""
-    for define in params.config['passiveInfo']:
-      try:
-        host = str(define['host'])
-        service = str(define['service'])
-        component = str(define['component'])
-        buf += host + " " + service + " " + component + "\n"
-      except KeyError:
-        pass
-
-    f = None
-    try:
-      f = open('/var/nagios/ignore.dat', 'w')
-      f.write(buf)
-    except:
-      pass
-    finally:
-      if f is not None:
-        f.close()
-
-
-if __name__ == "__main__":
-  NagiosServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/nagios_server_config.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/nagios_server_config.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/nagios_server_config.py
deleted file mode 100644
index b0eabdc..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/nagios_server_config.py
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-def nagios_server_config():
-  import params
-  
-  nagios_server_configfile( 'nagios.cfg', 
-                            config_dir = params.conf_dir, 
-                            group = params.nagios_group
-  )
-  nagios_server_configfile( 'resource.cfg', 
-                            config_dir = params.conf_dir, 
-                            group = params.nagios_group
-  )
-  nagios_server_configfile( 'hadoop-hosts.cfg')
-  nagios_server_configfile( 'hadoop-hostgroups.cfg')
-  nagios_server_configfile( 'hadoop-servicegroups.cfg')
-  nagios_server_configfile( 'hadoop-services.cfg')
-  nagios_server_configfile( 'hadoop-commands.cfg')
-  nagios_server_configfile( 'contacts.cfg')
-  
-  if System.get_instance().os_family != "suse":
-    nagios_server_configfile( 'nagios',
-                              config_dir = '/etc/init.d',
-                              mode = 0755, 
-                              owner = 'root', 
-                              group = 'root'
-    )
-
-  nagios_server_check( 'check_cpu.pl')
-  nagios_server_check( 'check_cpu.php')
-  nagios_server_check( 'check_cpu_ha.php')
-  nagios_server_check( 'check_datanode_storage.php')
-  nagios_server_check( 'check_aggregate.php')
-  nagios_server_check( 'check_hdfs_blocks.php')
-  nagios_server_check( 'check_hdfs_capacity.php')
-  nagios_server_check( 'check_rpcq_latency.php')
-  nagios_server_check( 'check_webui.sh')
-  nagios_server_check( 'check_name_dir_status.php')
-  nagios_server_check( 'check_oozie_status.sh')
-  nagios_server_check( 'check_templeton_status.sh')
-  nagios_server_check( 'check_hive_metastore_status.sh')
-  nagios_server_check( 'check_hue_status.sh')
-  nagios_server_check( 'check_mapred_local_dir_used.sh')
-  nagios_server_check( 'check_nodemanager_health.sh')
-  nagios_server_check( 'check_namenodes_ha.sh')
-  nagios_server_check( 'hdp_nagios_init.php')
-  nagios_server_check( 'mm_wrapper.py' )
-  nagios_server_check( 'check_hive_thrift_port.py' )
-
-
-def nagios_server_configfile(
-  name,
-  owner = None,
-  group = None,
-  config_dir = None,
-  mode = None
-):
-  import params
-  owner = params.nagios_user if not owner else owner
-  group = params.user_group if not group else group
-  config_dir = params.nagios_obj_dir if not config_dir else config_dir
-  
-  TemplateConfig( format("{config_dir}/{name}"),
-    owner          = owner,
-    group          = group,
-    mode           = mode
-  )
-
-def nagios_server_check(name):
-  File( format("{plugins_dir}/{name}"),
-    content = StaticFile(name), 
-    mode = 0755
-  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/nagios_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/nagios_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/nagios_service.py
deleted file mode 100644
index 1bcb14e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/nagios_service.py
+++ /dev/null
@@ -1,69 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import os
-import signal
-
-from resource_management import *
-from os.path import isfile
-
-
-def nagios_service(action='start'): # start or stop
-  import params
-  
-  nagios_pid_file = format("{nagios_pid_file}")
-
-  command_path = "/usr/local/bin/:/bin/:/sbin/"
-
-  if action == 'start': 
-    Execute("service nagios start", path = command_path )   
-  elif action == 'stop':
-    # attempt to grab the pid in case we need it later
-    nagios_pid = 0  
-    if isfile(nagios_pid_file):   
-      with open(nagios_pid_file, "r") as file:
-        try:
-          nagios_pid = int(file.read())
-          Logger.info("Nagios is running with a PID of {0}".format(nagios_pid))
-        except:
-          Logger.info("Unable to read PID file {0}".format(nagios_pid_file))
-        finally:
-          file.close()
-  
-    Execute("service nagios stop", path = command_path)
-
-    # on SUSE, there is a bug where Nagios doesn't kill the process 
-    # but this could also affect any OS, so don't restrict this to SUSE
-    if nagios_pid > 0:
-      try:
-        os.kill(nagios_pid, 0)
-      except:
-        Logger.info("The Nagios process has successfully terminated")
-      else:
-        Logger.info("The Nagios process with ID {0} failed to terminate; explicitly killing.".format(nagios_pid))
-        os.kill(nagios_pid, signal.SIGKILL)
-
-    # in the event that the Nagios scripts don't remove the pid file
-    if isfile( nagios_pid_file ):   
-      Execute(format("rm -f {nagios_pid_file}"))
-        
-  MonitorWebserver("restart")
\ No newline at end of file


[14/17] ambari git commit: AMBARI-8276 - Alerts: Remove Nagios Service From The Stack (jonathanhurley)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/nagios.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/nagios.cfg.j2 b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/nagios.cfg.j2
deleted file mode 100644
index d51471b..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/nagios.cfg.j2
+++ /dev/null
@@ -1,1365 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-##############################################################################
-#
-# NAGIOS.CFG - Sample Main Config File for Nagios 3.2.3
-#
-# Read the documentation for more information on this configuration
-# file.  I've provided some comments here, but things may not be so
-# clear without further explanation.
-#
-# Last Modified: 12-14-2008
-#
-##############################################################################
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-# LOG FILE
-# This is the main log file where service and host events are logged
-# for historical purposes.  This should be the first option specified 
-# in the config file!!!
-
-log_file=/var/log/nagios/nagios.log
-
-
-# OBJECT CONFIGURATION FILE(S)
-# These are the object configuration files in which you define hosts,
-# host groups, contacts, contact groups, services, etc.
-# You can split your object definitions across several config files
-# if you wish (as shown below), or keep them all in a single config file.
-
-{% for cfg_file in cfg_files %}
-cfg_file={{cfg_file}}
-{% endfor %}
-
-# Definitions for monitoring the local (Linux) host
-#cfg_file={{conf_dir}}/objects/localhost.cfg
-
-# Definitions for monitoring a Windows machine
-#cfg_file={{conf_dir}}/objects/windows.cfg
-
-# Definitions for monitoring a router/switch
-#cfg_file={{conf_dir}}/objects/switch.cfg
-
-# Definitions for monitoring a network printer
-#cfg_file={{conf_dir}}/objects/printer.cfg
-
-# Definitions for hadoop servers
-cfg_file={{nagios_host_cfg}}
-cfg_file={{nagios_hostgroup_cfg}}
-cfg_file={{nagios_servicegroup_cfg}}
-cfg_file={{nagios_service_cfg}}
-cfg_file={{nagios_command_cfg}}
-
-
-# You can also tell Nagios to process all config files (with a .cfg
-# extension) in a particular directory by using the cfg_dir
-# directive as shown below:
-
-#cfg_dir={{conf_dir}}/servers
-#cfg_dir={{conf_dir}}/printers
-#cfg_dir={{conf_dir}}/switches
-#cfg_dir={{conf_dir}}/routers
-
-
-
-
-# OBJECT CACHE FILE
-# This option determines where object definitions are cached when
-# Nagios starts/restarts.  The CGIs read object definitions from 
-# this cache file (rather than looking at the object config files
-# directly) in order to prevent inconsistencies that can occur
-# when the config files are modified after Nagios starts.
-
-object_cache_file=/var/nagios/objects.cache
-
-
-
-# PRE-CACHED OBJECT FILE
-# This options determines the location of the precached object file.
-# If you run Nagios with the -p command line option, it will preprocess
-# your object configuration file(s) and write the cached config to this
-# file.  You can then start Nagios with the -u option to have it read
-# object definitions from this precached file, rather than the standard
-# object configuration files (see the cfg_file and cfg_dir options above).
-# Using a precached object file can speed up the time needed to (re)start 
-# the Nagios process if you've got a large and/or complex configuration.
-# Read the documentation section on optimizing Nagios to find our more
-# about how this feature works.
-
-precached_object_file=/var/nagios/objects.precache
-
-
-
-# RESOURCE FILE
-# This is an optional resource file that contains $USERx$ macro
-# definitions. Multiple resource files can be specified by using
-# multiple resource_file definitions.  The CGIs will not attempt to
-# read the contents of resource files, so information that is
-# considered to be sensitive (usernames, passwords, etc) can be
-# defined as macros in this file and restrictive permissions (600)
-# can be placed on this file.
-
-resource_file={{nagios_resource_cfg}}
-
-
-
-# STATUS FILE
-# This is where the current status of all monitored services and
-# hosts is stored.  Its contents are read and processed by the CGIs.
-# The contents of the status file are deleted every time Nagios
-#  restarts.
-
-status_file=/var/nagios/status.dat
-
-
-
-# STATUS FILE UPDATE INTERVAL
-# This option determines the frequency (in seconds) that
-# Nagios will periodically dump program, host, and 
-# service status data.
-
-status_update_interval=10
-
-
-
-# NAGIOS USER
-# This determines the effective user that Nagios should run as.  
-# You can either supply a username or a UID.
-
-nagios_user={{nagios_user}}
-
-
-
-# NAGIOS GROUP
-# This determines the effective group that Nagios should run as.  
-# You can either supply a group name or a GID.
-
-nagios_group={{nagios_group}}
-
-
-
-# EXTERNAL COMMAND OPTION
-# This option allows you to specify whether or not Nagios should check
-# for external commands (in the command file defined below).  By default
-# Nagios will *not* check for external commands, just to be on the
-# cautious side.  If you want to be able to use the CGI command interface
-# you will have to enable this.
-# Values: 0 = disable commands, 1 = enable commands
-
-check_external_commands=1
-
-
-
-# EXTERNAL COMMAND CHECK INTERVAL
-# This is the interval at which Nagios should check for external commands.
-# This value works of the interval_length you specify later.  If you leave
-# that at its default value of 60 (seconds), a value of 1 here will cause
-# Nagios to check for external commands every minute.  If you specify a
-# number followed by an "s" (i.e. 15s), this will be interpreted to mean
-# actual seconds rather than a multiple of the interval_length variable.
-# Note: In addition to reading the external command file at regularly 
-# scheduled intervals, Nagios will also check for external commands after
-# event handlers are executed.
-# NOTE: Setting this value to -1 causes Nagios to check the external
-# command file as often as possible.
-
-#command_check_interval=15s
-command_check_interval=-1
-
-
-
-# EXTERNAL COMMAND FILE
-# This is the file that Nagios checks for external command requests.
-# It is also where the command CGI will write commands that are submitted
-# by users, so it must be writeable by the user that the web server
-# is running as (usually 'nobody').  Permissions should be set at the 
-# directory level instead of on the file, as the file is deleted every
-# time its contents are processed.
-
-command_file=/var/nagios/rw/nagios.cmd
-
-
-
-# EXTERNAL COMMAND BUFFER SLOTS
-# This settings is used to tweak the number of items or "slots" that
-# the Nagios daemon should allocate to the buffer that holds incoming 
-# external commands before they are processed.  As external commands 
-# are processed by the daemon, they are removed from the buffer.  
-
-external_command_buffer_slots=4096
-
-
-
-# LOCK FILE
-# This is the lockfile that Nagios will use to store its PID number
-# in when it is running in daemon mode.
-
-lock_file={{nagios_pid_file}}
-
-
-
-# TEMP FILE
-# This is a temporary file that is used as scratch space when Nagios
-# updates the status log, cleans the comment file, etc.  This file
-# is created, used, and deleted throughout the time that Nagios is
-# running.
-
-temp_file=/var/nagios/nagios.tmp
-
-
-
-# TEMP PATH
-# This is path where Nagios can create temp files for service and
-# host check results, etc.
-
-temp_path=/tmp
-
-
-
-# EVENT BROKER OPTIONS
-# Controls what (if any) data gets sent to the event broker.
-# Values:  0      = Broker nothing
-#         -1      = Broker everything
-#         <other> = See documentation
-
-event_broker_options=-1
-
-
-
-# EVENT BROKER MODULE(S)
-# This directive is used to specify an event broker module that should
-# by loaded by Nagios at startup.  Use multiple directives if you want
-# to load more than one module.  Arguments that should be passed to
-# the module at startup are seperated from the module path by a space.
-#
-#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-# WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING
-#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-#
-# Do NOT overwrite modules while they are being used by Nagios or Nagios
-# will crash in a fiery display of SEGFAULT glory.  This is a bug/limitation
-# either in dlopen(), the kernel, and/or the filesystem.  And maybe Nagios...
-#
-# The correct/safe way of updating a module is by using one of these methods:
-#    1. Shutdown Nagios, replace the module file, restart Nagios
-#    2. Delete the original module file, move the new module file into place, restart Nagios
-#
-# Example:
-#
-#   broker_module=<modulepath> [moduleargs]
-
-#broker_module=/somewhere/module1.o
-#broker_module=/somewhere/module2.o arg1 arg2=3 debug=0
-
-
-
-# LOG ROTATION METHOD
-# This is the log rotation method that Nagios should use to rotate
-# the main log file. Values are as follows..
-#	n	= None - don't rotate the log
-#	h	= Hourly rotation (top of the hour)
-#	d	= Daily rotation (midnight every day)
-#	w	= Weekly rotation (midnight on Saturday evening)
-#	m	= Monthly rotation (midnight last day of month)
-
-log_rotation_method=d
-
-
-
-# LOG ARCHIVE PATH
-# This is the directory where archived (rotated) log files should be 
-# placed (assuming you've chosen to do log rotation).
-
-log_archive_path=/var/log/nagios/archives
-
-
-
-# LOGGING OPTIONS
-# If you want messages logged to the syslog facility, as well as the
-# Nagios log file set this option to 1.  If not, set it to 0.
-
-use_syslog=1
-
-
-
-# NOTIFICATION LOGGING OPTION
-# If you don't want notifications to be logged, set this value to 0.
-# If notifications should be logged, set the value to 1.
-
-log_notifications=1
-
-
-
-# SERVICE RETRY LOGGING OPTION
-# If you don't want service check retries to be logged, set this value
-# to 0.  If retries should be logged, set the value to 1.
-
-log_service_retries=1
-
-
-
-# HOST RETRY LOGGING OPTION
-# If you don't want host check retries to be logged, set this value to
-# 0.  If retries should be logged, set the value to 1.
-
-log_host_retries=1
-
-
-
-# EVENT HANDLER LOGGING OPTION
-# If you don't want host and service event handlers to be logged, set
-# this value to 0.  If event handlers should be logged, set the value
-# to 1.
-
-log_event_handlers=1
-
-
-
-# INITIAL STATES LOGGING OPTION
-# If you want Nagios to log all initial host and service states to
-# the main log file (the first time the service or host is checked)
-# you can enable this option by setting this value to 1.  If you
-# are not using an external application that does long term state
-# statistics reporting, you do not need to enable this option.  In
-# this case, set the value to 0.
-
-log_initial_states=0
-
-
-
-# EXTERNAL COMMANDS LOGGING OPTION
-# If you don't want Nagios to log external commands, set this value
-# to 0.  If external commands should be logged, set this value to 1.
-# Note: This option does not include logging of passive service
-# checks - see the option below for controlling whether or not
-# passive checks are logged.
-
-log_external_commands=1
-
-
-
-# PASSIVE CHECKS LOGGING OPTION
-# If you don't want Nagios to log passive host and service checks, set
-# this value to 0.  If passive checks should be logged, set
-# this value to 1.
-
-log_passive_checks=1
-
-
-
-# GLOBAL HOST AND SERVICE EVENT HANDLERS
-# These options allow you to specify a host and service event handler
-# command that is to be run for every host or service state change.
-# The global event handler is executed immediately prior to the event
-# handler that you have optionally specified in each host or
-# service definition. The command argument is the short name of a
-# command definition that you define in your host configuration file.
-# Read the HTML docs for more information.
-
-#global_host_event_handler=somecommand
-#global_service_event_handler=somecommand
-
-
-
-# SERVICE INTER-CHECK DELAY METHOD
-# This is the method that Nagios should use when initially
-# "spreading out" service checks when it starts monitoring.  The
-# default is to use smart delay calculation, which will try to
-# space all service checks out evenly to minimize CPU load.
-# Using the dumb setting will cause all checks to be scheduled
-# at the same time (with no delay between them)!  This is not a
-# good thing for production, but is useful when testing the
-# parallelization functionality.
-#	n	= None - don't use any delay between checks
-#	d	= Use a "dumb" delay of 1 second between checks
-#	s	= Use "smart" inter-check delay calculation
-#       x.xx    = Use an inter-check delay of x.xx seconds
-
-service_inter_check_delay_method=s
-
-
-
-# MAXIMUM SERVICE CHECK SPREAD
-# This variable determines the timeframe (in minutes) from the
-# program start time that an initial check of all services should
-# be completed.  Default is 30 minutes.
-
-max_service_check_spread=30
-
-
-
-# SERVICE CHECK INTERLEAVE FACTOR
-# This variable determines how service checks are interleaved.
-# Interleaving the service checks allows for a more even
-# distribution of service checks and reduced load on remote
-# hosts.  Setting this value to 1 is equivalent to how versions
-# of Nagios previous to 0.0.5 did service checks.  Set this
-# value to s (smart) for automatic calculation of the interleave
-# factor unless you have a specific reason to change it.
-#       s       = Use "smart" interleave factor calculation
-#       x       = Use an interleave factor of x, where x is a
-#                 number greater than or equal to 1.
-
-service_interleave_factor=s
-
-
-
-# HOST INTER-CHECK DELAY METHOD
-# This is the method that Nagios should use when initially
-# "spreading out" host checks when it starts monitoring.  The
-# default is to use smart delay calculation, which will try to
-# space all host checks out evenly to minimize CPU load.
-# Using the dumb setting will cause all checks to be scheduled
-# at the same time (with no delay between them)!
-#	n	= None - don't use any delay between checks
-#	d	= Use a "dumb" delay of 1 second between checks
-#	s	= Use "smart" inter-check delay calculation
-#       x.xx    = Use an inter-check delay of x.xx seconds
-
-host_inter_check_delay_method=s
-
-
-
-# MAXIMUM HOST CHECK SPREAD
-# This variable determines the timeframe (in minutes) from the
-# program start time that an initial check of all hosts should
-# be completed.  Default is 30 minutes.
-
-max_host_check_spread=30
-
-
-
-# MAXIMUM CONCURRENT SERVICE CHECKS
-# This option allows you to specify the maximum number of 
-# service checks that can be run in parallel at any given time.
-# Specifying a value of 1 for this variable essentially prevents
-# any service checks from being parallelized.  A value of 0
-# will not restrict the number of concurrent checks that are
-# being executed.
-
-max_concurrent_checks=0
-
-
-
-# HOST AND SERVICE CHECK REAPER FREQUENCY
-# This is the frequency (in seconds!) that Nagios will process
-# the results of host and service checks.
-
-check_result_reaper_frequency=10
-
-
-
-
-# MAX CHECK RESULT REAPER TIME
-# This is the max amount of time (in seconds) that  a single
-# check result reaper event will be allowed to run before 
-# returning control back to Nagios so it can perform other
-# duties.
-
-max_check_result_reaper_time=30
-
-
-
-
-# CHECK RESULT PATH
-# This is directory where Nagios stores the results of host and
-# service checks that have not yet been processed.
-#
-# Note: Make sure that only one instance of Nagios has access
-# to this directory!  
-
-check_result_path=/var/nagios/spool/checkresults
-
-
-
-
-# MAX CHECK RESULT FILE AGE
-# This option determines the maximum age (in seconds) which check
-# result files are considered to be valid.  Files older than this 
-# threshold will be mercilessly deleted without further processing.
-
-max_check_result_file_age=3600
-
-
-
-
-# CACHED HOST CHECK HORIZON
-# This option determines the maximum amount of time (in seconds)
-# that the state of a previous host check is considered current.
-# Cached host states (from host checks that were performed more
-# recently that the timeframe specified by this value) can immensely
-# improve performance in regards to the host check logic.
-# Too high of a value for this option may result in inaccurate host
-# states being used by Nagios, while a lower value may result in a
-# performance hit for host checks.  Use a value of 0 to disable host
-# check caching.
-
-cached_host_check_horizon=15
-
-
-
-# CACHED SERVICE CHECK HORIZON
-# This option determines the maximum amount of time (in seconds)
-# that the state of a previous service check is considered current.
-# Cached service states (from service checks that were performed more
-# recently that the timeframe specified by this value) can immensely
-# improve performance in regards to predictive dependency checks.
-# Use a value of 0 to disable service check caching.
-
-cached_service_check_horizon=15
-
-
-
-# ENABLE PREDICTIVE HOST DEPENDENCY CHECKS
-# This option determines whether or not Nagios will attempt to execute
-# checks of hosts when it predicts that future dependency logic test
-# may be needed.  These predictive checks can help ensure that your
-# host dependency logic works well.
-# Values:
-#  0 = Disable predictive checks
-#  1 = Enable predictive checks (default)
-
-enable_predictive_host_dependency_checks=1
-
-
-
-# ENABLE PREDICTIVE SERVICE DEPENDENCY CHECKS
-# This option determines whether or not Nagios will attempt to execute
-# checks of service when it predicts that future dependency logic test
-# may be needed.  These predictive checks can help ensure that your
-# service dependency logic works well.
-# Values:
-#  0 = Disable predictive checks
-#  1 = Enable predictive checks (default)
-
-enable_predictive_service_dependency_checks=1
-
-
-
-# SOFT STATE DEPENDENCIES
-# This option determines whether or not Nagios will use soft state 
-# information when checking host and service dependencies. Normally 
-# Nagios will only use the latest hard host or service state when 
-# checking dependencies. If you want it to use the latest state (regardless
-# of whether its a soft or hard state type), enable this option. 
-# Values:
-#  0 = Don't use soft state dependencies (default) 
-#  1 = Use soft state dependencies 
-
-soft_state_dependencies=0
-
-
-
-# TIME CHANGE ADJUSTMENT THRESHOLDS
-# These options determine when Nagios will react to detected changes
-# in system time (either forward or backwards).
-
-#time_change_threshold=900
-
-
-
-# AUTO-RESCHEDULING OPTION
-# This option determines whether or not Nagios will attempt to
-# automatically reschedule active host and service checks to
-# "smooth" them out over time.  This can help balance the load on
-# the monitoring server.  
-# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
-# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
-
-auto_reschedule_checks=0
-
-
-
-# AUTO-RESCHEDULING INTERVAL
-# This option determines how often (in seconds) Nagios will
-# attempt to automatically reschedule checks.  This option only
-# has an effect if the auto_reschedule_checks option is enabled.
-# Default is 30 seconds.
-# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
-# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
-
-auto_rescheduling_interval=30
-
-
-
-# AUTO-RESCHEDULING WINDOW
-# This option determines the "window" of time (in seconds) that
-# Nagios will look at when automatically rescheduling checks.
-# Only host and service checks that occur in the next X seconds
-# (determined by this variable) will be rescheduled. This option
-# only has an effect if the auto_reschedule_checks option is
-# enabled.  Default is 180 seconds (3 minutes).
-# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
-# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
-
-auto_rescheduling_window=180
-
-
-
-# SLEEP TIME
-# This is the number of seconds to sleep between checking for system
-# events and service checks that need to be run.
-
-sleep_time=0.25
-
-
-
-# TIMEOUT VALUES
-# These options control how much time Nagios will allow various
-# types of commands to execute before killing them off.  Options
-# are available for controlling maximum time allotted for
-# service checks, host checks, event handlers, notifications, the
-# ocsp command, and performance data commands.  All values are in
-# seconds.
-
-service_check_timeout=60
-host_check_timeout=30
-event_handler_timeout=30
-notification_timeout=30
-ocsp_timeout=5
-perfdata_timeout=5
-
-
-
-# RETAIN STATE INFORMATION
-# This setting determines whether or not Nagios will save state
-# information for services and hosts before it shuts down.  Upon
-# startup Nagios will reload all saved service and host state
-# information before starting to monitor.  This is useful for 
-# maintaining long-term data on state statistics, etc, but will
-# slow Nagios down a bit when it (re)starts.  Since its only
-# a one-time penalty, I think its well worth the additional
-# startup delay.
-
-retain_state_information=1
-
-
-
-# STATE RETENTION FILE
-# This is the file that Nagios should use to store host and
-# service state information before it shuts down.  The state 
-# information in this file is also read immediately prior to
-# starting to monitor the network when Nagios is restarted.
-# This file is used only if the retain_state_information
-# variable is set to 1.
-
-state_retention_file=/var/nagios/retention.dat
-
-
-
-# RETENTION DATA UPDATE INTERVAL
-# This setting determines how often (in minutes) that Nagios
-# will automatically save retention data during normal operation.
-# If you set this value to 0, Nagios will not save retention
-# data at regular interval, but it will still save retention
-# data before shutting down or restarting.  If you have disabled
-# state retention, this option has no effect.
-
-retention_update_interval=60
-
-
-
-# USE RETAINED PROGRAM STATE
-# This setting determines whether or not Nagios will set 
-# program status variables based on the values saved in the
-# retention file.  If you want to use retained program status
-# information, set this value to 1.  If not, set this value
-# to 0.
-
-use_retained_program_state=1
-
-
-
-# USE RETAINED SCHEDULING INFO
-# This setting determines whether or not Nagios will retain
-# the scheduling info (next check time) for hosts and services
-# based on the values saved in the retention file.  If you
-# If you want to use retained scheduling info, set this
-# value to 1.  If not, set this value to 0.
-
-use_retained_scheduling_info=1
-
-
-
-# RETAINED ATTRIBUTE MASKS (ADVANCED FEATURE)
-# The following variables are used to specify specific host and
-# service attributes that should *not* be retained by Nagios during
-# program restarts.
-#
-# The values of the masks are bitwise ANDs of values specified
-# by the "MODATTR_" definitions found in include/common.h.  
-# For example, if you do not want the current enabled/disabled state
-# of flap detection and event handlers for hosts to be retained, you
-# would use a value of 24 for the host attribute mask...
-# MODATTR_EVENT_HANDLER_ENABLED (8) + MODATTR_FLAP_DETECTION_ENABLED (16) = 24
-
-# This mask determines what host attributes are not retained
-retained_host_attribute_mask=0
-
-# This mask determines what service attributes are not retained
-retained_service_attribute_mask=0
-
-# These two masks determine what process attributes are not retained.
-# There are two masks, because some process attributes have host and service
-# options.  For example, you can disable active host checks, but leave active
-# service checks enabled.
-retained_process_host_attribute_mask=0
-retained_process_service_attribute_mask=0
-
-# These two masks determine what contact attributes are not retained.
-# There are two masks, because some contact attributes have host and
-# service options.  For example, you can disable host notifications for
-# a contact, but leave service notifications enabled for them.
-retained_contact_host_attribute_mask=0
-retained_contact_service_attribute_mask=0
-
-
-
-# INTERVAL LENGTH
-# This is the seconds per unit interval as used in the
-# host/contact/service configuration files.  Setting this to 60 means
-# that each interval is one minute long (60 seconds).  Other settings
-# have not been tested much, so your mileage is likely to vary...
-
-interval_length=60
-
-
-
-# CHECK FOR UPDATES
-# This option determines whether Nagios will automatically check to
-# see if new updates (releases) are available.  It is recommend that you
-# enable this option to ensure that you stay on top of the latest critical
-# patches to Nagios.  Nagios is critical to you - make sure you keep it in
-# good shape.  Nagios will check once a day for new updates. Data collected
-# by Nagios Enterprises from the update check is processed in accordance 
-# with our privacy policy - see http://api.nagios.org for details.
-
-check_for_updates=1
-
-
-
-# BARE UPDATE CHECK
-# This option deterines what data Nagios will send to api.nagios.org when
-# it checks for updates.  By default, Nagios will send information on the 
-# current version of Nagios you have installed, as well as an indicator as
-# to whether this was a new installation or not.  Nagios Enterprises uses
-# this data to determine the number of users running specific version of 
-# Nagios.  Enable this option if you do not want this information to be sent.
-
-bare_update_check=0
-
-
-
-# AGGRESSIVE HOST CHECKING OPTION
-# If you don't want to turn on aggressive host checking features, set
-# this value to 0 (the default).  Otherwise set this value to 1 to
-# enable the aggressive check option.  Read the docs for more info
-# on what aggressive host check is or check out the source code in
-# base/checks.c
-
-use_aggressive_host_checking=0
-
-
-
-# SERVICE CHECK EXECUTION OPTION
-# This determines whether or not Nagios will actively execute
-# service checks when it initially starts.  If this option is 
-# disabled, checks are not actively made, but Nagios can still
-# receive and process passive check results that come in.  Unless
-# you're implementing redundant hosts or have a special need for
-# disabling the execution of service checks, leave this enabled!
-# Values: 1 = enable checks, 0 = disable checks
-
-execute_service_checks=1
-
-
-
-# PASSIVE SERVICE CHECK ACCEPTANCE OPTION
-# This determines whether or not Nagios will accept passive
-# service checks results when it initially (re)starts.
-# Values: 1 = accept passive checks, 0 = reject passive checks
-
-accept_passive_service_checks=1
-
-
-
-# HOST CHECK EXECUTION OPTION
-# This determines whether or not Nagios will actively execute
-# host checks when it initially starts.  If this option is 
-# disabled, checks are not actively made, but Nagios can still
-# receive and process passive check results that come in.  Unless
-# you're implementing redundant hosts or have a special need for
-# disabling the execution of host checks, leave this enabled!
-# Values: 1 = enable checks, 0 = disable checks
-
-execute_host_checks=1
-
-
-
-# PASSIVE HOST CHECK ACCEPTANCE OPTION
-# This determines whether or not Nagios will accept passive
-# host checks results when it initially (re)starts.
-# Values: 1 = accept passive checks, 0 = reject passive checks
-
-accept_passive_host_checks=1
-
-
-
-# NOTIFICATIONS OPTION
-# This determines whether or not Nagios will sent out any host or
-# service notifications when it is initially (re)started.
-# Values: 1 = enable notifications, 0 = disable notifications
-
-enable_notifications=1
-
-
-
-# EVENT HANDLER USE OPTION
-# This determines whether or not Nagios will run any host or
-# service event handlers when it is initially (re)started.  Unless
-# you're implementing redundant hosts, leave this option enabled.
-# Values: 1 = enable event handlers, 0 = disable event handlers
-
-enable_event_handlers=1
-
-
-
-# PROCESS PERFORMANCE DATA OPTION
-# This determines whether or not Nagios will process performance
-# data returned from service and host checks.  If this option is
-# enabled, host performance data will be processed using the
-# host_perfdata_command (defined below) and service performance
-# data will be processed using the service_perfdata_command (also
-# defined below).  Read the HTML docs for more information on
-# performance data.
-# Values: 1 = process performance data, 0 = do not process performance data
-
-process_performance_data=0
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA PROCESSING COMMANDS
-# These commands are run after every host and service check is
-# performed.  These commands are executed only if the
-# enable_performance_data option (above) is set to 1.  The command
-# argument is the short name of a command definition that you 
-# define in your host configuration file.  Read the HTML docs for
-# more information on performance data.
-
-#host_perfdata_command=process-host-perfdata
-#service_perfdata_command=process-service-perfdata
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILES
-# These files are used to store host and service performance data.
-# Performance data is only written to these files if the
-# enable_performance_data option (above) is set to 1.
-
-#host_perfdata_file=/tmp/host-perfdata
-#service_perfdata_file=/tmp/service-perfdata
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE TEMPLATES
-# These options determine what data is written (and how) to the
-# performance data files.  The templates may contain macros, special
-# characters (\t for tab, \r for carriage return, \n for newline)
-# and plain text.  A newline is automatically added after each write
-# to the performance data file.  Some examples of what you can do are
-# shown below.
-
-#host_perfdata_file_template=[HOSTPERFDATA]\t$TIMET$\t$HOSTNAME$\t$HOSTEXECUTIONTIME$\t$HOSTOUTPUT$\t$HOSTPERFDATA$
-#service_perfdata_file_template=[SERVICEPERFDATA]\t$TIMET$\t$HOSTNAME$\t$SERVICEDESC$\t$SERVICEEXECUTIONTIME$\t$SERVICELATENCY$\t$SERVICEOUTPUT$\t$SERVICEPERFDATA$
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE MODES
-# This option determines whether or not the host and service
-# performance data files are opened in write ("w") or append ("a")
-# mode. If you want to use named pipes, you should use the special
-# pipe ("p") mode which avoid blocking at startup, otherwise you will
-# likely want the defult append ("a") mode.
-
-#host_perfdata_file_mode=a
-#service_perfdata_file_mode=a
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING INTERVAL
-# These options determine how often (in seconds) the host and service
-# performance data files are processed using the commands defined
-# below.  A value of 0 indicates the files should not be periodically
-# processed.
-
-#host_perfdata_file_processing_interval=0
-#service_perfdata_file_processing_interval=0
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING COMMANDS
-# These commands are used to periodically process the host and
-# service performance data files.  The interval at which the
-# processing occurs is determined by the options above.
-
-#host_perfdata_file_processing_command=process-host-perfdata-file
-#service_perfdata_file_processing_command=process-service-perfdata-file
-
-
-
-# OBSESS OVER SERVICE CHECKS OPTION
-# This determines whether or not Nagios will obsess over service
-# checks and run the ocsp_command defined below.  Unless you're
-# planning on implementing distributed monitoring, do not enable
-# this option.  Read the HTML docs for more information on
-# implementing distributed monitoring.
-# Values: 1 = obsess over services, 0 = do not obsess (default)
-
-obsess_over_services=0
-
-
-
-# OBSESSIVE COMPULSIVE SERVICE PROCESSOR COMMAND
-# This is the command that is run for every service check that is
-# processed by Nagios.  This command is executed only if the
-# obsess_over_services option (above) is set to 1.  The command 
-# argument is the short name of a command definition that you
-# define in your host configuration file. Read the HTML docs for
-# more information on implementing distributed monitoring.
-
-#ocsp_command=somecommand
-
-
-
-# OBSESS OVER HOST CHECKS OPTION
-# This determines whether or not Nagios will obsess over host
-# checks and run the ochp_command defined below.  Unless you're
-# planning on implementing distributed monitoring, do not enable
-# this option.  Read the HTML docs for more information on
-# implementing distributed monitoring.
-# Values: 1 = obsess over hosts, 0 = do not obsess (default)
-
-obsess_over_hosts=0
-
-
-
-# OBSESSIVE COMPULSIVE HOST PROCESSOR COMMAND
-# This is the command that is run for every host check that is
-# processed by Nagios.  This command is executed only if the
-# obsess_over_hosts option (above) is set to 1.  The command 
-# argument is the short name of a command definition that you
-# define in your host configuration file. Read the HTML docs for
-# more information on implementing distributed monitoring.
-
-#ochp_command=somecommand
-
-
-
-# TRANSLATE PASSIVE HOST CHECKS OPTION
-# This determines whether or not Nagios will translate
-# DOWN/UNREACHABLE passive host check results into their proper
-# state for this instance of Nagios.  This option is useful
-# if you have distributed or failover monitoring setup.  In
-# these cases your other Nagios servers probably have a different
-# "view" of the network, with regards to the parent/child relationship
-# of hosts.  If a distributed monitoring server thinks a host
-# is DOWN, it may actually be UNREACHABLE from the point of
-# this Nagios instance.  Enabling this option will tell Nagios
-# to translate any DOWN or UNREACHABLE host states it receives
-# passively into the correct state from the view of this server.
-# Values: 1 = perform translation, 0 = do not translate (default)
-
-translate_passive_host_checks=0
-
-
-
-# PASSIVE HOST CHECKS ARE SOFT OPTION
-# This determines whether or not Nagios will treat passive host
-# checks as being HARD or SOFT.  By default, a passive host check
-# result will put a host into a HARD state type.  This can be changed
-# by enabling this option.
-# Values: 0 = passive checks are HARD, 1 = passive checks are SOFT
-
-passive_host_checks_are_soft=0
-
-
-
-# ORPHANED HOST/SERVICE CHECK OPTIONS
-# These options determine whether or not Nagios will periodically 
-# check for orphaned host service checks.  Since service checks are
-# not rescheduled until the results of their previous execution 
-# instance are processed, there exists a possibility that some
-# checks may never get rescheduled.  A similar situation exists for
-# host checks, although the exact scheduling details differ a bit
-# from service checks.  Orphaned checks seem to be a rare
-# problem and should not happen under normal circumstances.
-# If you have problems with service checks never getting
-# rescheduled, make sure you have orphaned service checks enabled.
-# Values: 1 = enable checks, 0 = disable checks
-
-check_for_orphaned_services=1
-check_for_orphaned_hosts=1
-
-
-
-# SERVICE FRESHNESS CHECK OPTION
-# This option determines whether or not Nagios will periodically
-# check the "freshness" of service results.  Enabling this option
-# is useful for ensuring passive checks are received in a timely
-# manner.
-# Values: 1 = enabled freshness checking, 0 = disable freshness checking
-
-check_service_freshness=0
-
-
-
-# SERVICE FRESHNESS CHECK INTERVAL
-# This setting determines how often (in seconds) Nagios will
-# check the "freshness" of service check results.  If you have
-# disabled service freshness checking, this option has no effect.
-
-service_freshness_check_interval=60
-
-
-
-# HOST FRESHNESS CHECK OPTION
-# This option determines whether or not Nagios will periodically
-# check the "freshness" of host results.  Enabling this option
-# is useful for ensuring passive checks are received in a timely
-# manner.
-# Values: 1 = enabled freshness checking, 0 = disable freshness checking
-
-check_host_freshness=0
-
-
-
-# HOST FRESHNESS CHECK INTERVAL
-# This setting determines how often (in seconds) Nagios will
-# check the "freshness" of host check results.  If you have
-# disabled host freshness checking, this option has no effect.
-
-host_freshness_check_interval=60
-
-
-
-
-# ADDITIONAL FRESHNESS THRESHOLD LATENCY
-# This setting determines the number of seconds that Nagios
-# will add to any host and service freshness thresholds that
-# it calculates (those not explicitly specified by the user).
-
-additional_freshness_latency=15
-
-
-
-
-# FLAP DETECTION OPTION
-# This option determines whether or not Nagios will try
-# and detect hosts and services that are "flapping".  
-# Flapping occurs when a host or service changes between
-# states too frequently.  When Nagios detects that a 
-# host or service is flapping, it will temporarily suppress
-# notifications for that host/service until it stops
-# flapping.  Flap detection is very experimental, so read
-# the HTML documentation before enabling this feature!
-# Values: 1 = enable flap detection
-#         0 = disable flap detection (default)
-
-enable_flap_detection=1
-
-
-
-# FLAP DETECTION THRESHOLDS FOR HOSTS AND SERVICES
-# Read the HTML documentation on flap detection for
-# an explanation of what this option does.  This option
-# has no effect if flap detection is disabled.
-
-low_service_flap_threshold=5.0
-high_service_flap_threshold=20.0
-low_host_flap_threshold=5.0
-high_host_flap_threshold=20.0
-
-
-
-# DATE FORMAT OPTION
-# This option determines how short dates are displayed. Valid options
-# include:
-#	us		(MM-DD-YYYY HH:MM:SS)
-#	euro    	(DD-MM-YYYY HH:MM:SS)
-#	iso8601		(YYYY-MM-DD HH:MM:SS)
-#	strict-iso8601	(YYYY-MM-DDTHH:MM:SS)
-#
-
-date_format=us
-
-
-
-
-# TIMEZONE OFFSET
-# This option is used to override the default timezone that this
-# instance of Nagios runs in.  If not specified, Nagios will use
-# the system configured timezone.
-#
-# NOTE: In order to display the correct timezone in the CGIs, you
-# will also need to alter the Apache directives for the CGI path 
-# to include your timezone.  Example:
-#
-#   <Directory "/usr/local/nagios/sbin/">
-#      SetEnv TZ "Australia/Brisbane"
-#      ...
-#   </Directory>
-
-#use_timezone=US/Mountain
-#use_timezone=Australia/Brisbane
-
-
-
-
-# P1.PL FILE LOCATION
-# This value determines where the p1.pl perl script (used by the
-# embedded Perl interpreter) is located.  If you didn't compile
-# Nagios with embedded Perl support, this option has no effect.
-
-p1_file = {{nagios_p1_pl}}
-
-
-
-# EMBEDDED PERL INTERPRETER OPTION
-# This option determines whether or not the embedded Perl interpreter
-# will be enabled during runtime.  This option has no effect if Nagios
-# has not been compiled with support for embedded Perl.
-# Values: 0 = disable interpreter, 1 = enable interpreter
-
-enable_embedded_perl=1
-
-
-
-# EMBEDDED PERL USAGE OPTION
-# This option determines whether or not Nagios will process Perl plugins
-# and scripts with the embedded Perl interpreter if the plugins/scripts
-# do not explicitly indicate whether or not it is okay to do so. Read
-# the HTML documentation on the embedded Perl interpreter for more 
-# information on how this option works.
-
-use_embedded_perl_implicitly=1
-
-
-
-# ILLEGAL OBJECT NAME CHARACTERS
-# This option allows you to specify illegal characters that cannot
-# be used in host names, service descriptions, or names of other
-# object types.
-
-illegal_object_name_chars=`~!$%^&*|'"<>?,()=
-
-
-
-# ILLEGAL MACRO OUTPUT CHARACTERS
-# This option allows you to specify illegal characters that are
-# stripped from macros before being used in notifications, event
-# handlers, etc.  This DOES NOT affect macros used in service or
-# host check commands.
-# The following macros are stripped of the characters you specify:
-#	$HOSTOUTPUT$
-#	$HOSTPERFDATA$
-#	$HOSTACKAUTHOR$
-#	$HOSTACKCOMMENT$
-#	$SERVICEOUTPUT$
-#	$SERVICEPERFDATA$
-#	$SERVICEACKAUTHOR$
-#	$SERVICEACKCOMMENT$
-
-illegal_macro_output_chars=`~$&|'"<>
-
-
-
-# REGULAR EXPRESSION MATCHING
-# This option controls whether or not regular expression matching
-# takes place in the object config files.  Regular expression
-# matching is used to match host, hostgroup, service, and service
-# group names/descriptions in some fields of various object types.
-# Values: 1 = enable regexp matching, 0 = disable regexp matching
-
-use_regexp_matching=0
-
-
-
-# "TRUE" REGULAR EXPRESSION MATCHING
-# This option controls whether or not "true" regular expression 
-# matching takes place in the object config files.  This option
-# only has an effect if regular expression matching is enabled
-# (see above).  If this option is DISABLED, regular expression
-# matching only occurs if a string contains wildcard characters
-# (* and ?).  If the option is ENABLED, regexp matching occurs
-# all the time (which can be annoying).
-# Values: 1 = enable true matching, 0 = disable true matching
-
-use_true_regexp_matching=0
-
-
-
-# ADMINISTRATOR EMAIL/PAGER ADDRESSES
-# The email and pager address of a global administrator (likely you).
-# Nagios never uses these values itself, but you can access them by
-# using the $ADMINEMAIL$ and $ADMINPAGER$ macros in your notification
-# commands.
-
-admin_email=nagios@localhost
-admin_pager=pagenagios@localhost
-
-
-
-# DAEMON CORE DUMP OPTION
-# This option determines whether or not Nagios is allowed to create
-# a core dump when it runs as a daemon.  Note that it is generally
-# considered bad form to allow this, but it may be useful for
-# debugging purposes.  Enabling this option doesn't guarantee that
-# a core file will be produced, but that's just life...
-# Values: 1 - Allow core dumps
-#         0 - Do not allow core dumps (default)
-
-daemon_dumps_core=0
-
-
-
-# LARGE INSTALLATION TWEAKS OPTION
-# This option determines whether or not Nagios will take some shortcuts
-# which can save on memory and CPU usage in large Nagios installations.
-# Read the documentation for more information on the benefits/tradeoffs
-# of enabling this option.
-# Values: 1 - Enabled tweaks
-#         0 - Disable tweaks (default)
-
-use_large_installation_tweaks=1
-
-
-
-# ENABLE ENVIRONMENT MACROS
-# This option determines whether or not Nagios will make all standard
-# macros available as environment variables when host/service checks
-# and system commands (event handlers, notifications, etc.) are
-# executed.  Enabling this option can cause performance issues in 
-# large installations, as it will consume a bit more memory and (more
-# importantly) consume more CPU.
-# Values: 1 - Enable environment variable macros (default)
-#         0 - Disable environment variable macros
-
-# NAGIOS_* macros are required for Ambari Maintenance Mode (check_wrapper.sh)
-enable_environment_macros=1
-
-
-
-# CHILD PROCESS MEMORY OPTION
-# This option determines whether or not Nagios will free memory in
-# child processes (processed used to execute system commands and host/
-# service checks).  If you specify a value here, it will override
-# program defaults.
-# Value: 1 - Free memory in child processes
-#        0 - Do not free memory in child processes
-
-#free_child_process_memory=1
-
-
-
-# CHILD PROCESS FORKING BEHAVIOR
-# This option determines how Nagios will fork child processes
-# (used to execute system commands and host/service checks).  Normally
-# child processes are fork()ed twice, which provides a very high level
-# of isolation from problems.  Fork()ing once is probably enough and will
-# save a great deal on CPU usage (in large installs), so you might
-# want to consider using this.  If you specify a value here, it will
-# program defaults.
-# Value: 1 - Child processes fork() twice
-#        0 - Child processes fork() just once
-
-#child_processes_fork_twice=1
-
-
-
-# DEBUG LEVEL
-# This option determines how much (if any) debugging information will
-# be written to the debug file.  OR values together to log multiple
-# types of information.
-# Values: 
-#          -1 = Everything
-#          0 = Nothing
-#	   1 = Functions
-#          2 = Configuration
-#          4 = Process information
-#	   8 = Scheduled events
-#          16 = Host/service checks
-#          32 = Notifications
-#          64 = Event broker
-#          128 = External commands
-#          256 = Commands
-#          512 = Scheduled downtime
-#          1024 = Comments
-#          2048 = Macros
-
-debug_level=0
-
-
-
-# DEBUG VERBOSITY
-# This option determines how verbose the debug log out will be.
-# Values: 0 = Brief output
-#         1 = More detailed
-#         2 = Very detailed
-
-debug_verbosity=1
-
-
-
-# DEBUG FILE
-# This option determines where Nagios should write debugging information.
-
-debug_file=/var/log/nagios/nagios.debug
-
-
-
-# MAX DEBUG FILE SIZE
-# This option determines the maximum size (in bytes) of the debug file.  If
-# the file grows larger than this size, it will be renamed with a .old
-# extension.  If a file already exists with a .old extension it will
-# automatically be deleted.  This helps ensure your disk space usage doesn't
-# get out of control when debugging Nagios.
-
-max_debug_file_size=1000000
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/nagios.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/nagios.conf.j2 b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/nagios.conf.j2
deleted file mode 100644
index f415e65..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/nagios.conf.j2
+++ /dev/null
@@ -1,84 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-#
-# SAMPLE CONFIG SNIPPETS FOR APACHE WEB SERVER
-# Last Modified: 11-26-2005
-#
-# This file contains examples of entries that need
-# to be incorporated into your Apache web server
-# configuration file.  Customize the paths, etc. as
-# needed to fit your system.
-#
-
-ScriptAlias {{cgi_weblink}} "{{cgi_dir}}"
-
-<Directory "{{cgi_dir}}">
-#  SSLRequireSSL
-   Options ExecCGI
-   AllowOverride None
-   Order allow,deny
-   Allow from all
-#  Order deny,allow
-#  Deny from all
-#  Allow from 127.0.0.1
-   AuthName "Nagios Access"
-   AuthType Basic
-   AuthUserFile {{conf_dir}}/htpasswd.users
-   Require valid-user
-</Directory>
-
-Alias /nagios "{{nagios_web_dir}}"
-{# Ubuntu has different nagios url #}
-{% if os_family == "ubuntu" %}
-Alias /nagios3 "{{nagios_web_dir}}"
-{% endif %}
-
-<Directory "{{nagios_web_dir}}">
-#  SSLRequireSSL
-   Options FollowSymLinks
-   AllowOverride None
-   Order allow,deny
-   Allow from all
-#  Order deny,allow
-#  Deny from all
-#  Allow from 127.0.0.1
-   AuthName "Nagios Access"
-   AuthType Basic
-   AuthUserFile {{conf_dir}}/htpasswd.users
-   Require valid-user
-</Directory>
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/nagios.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/nagios.j2 b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/nagios.j2
deleted file mode 100644
index 0927915..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/nagios.j2
+++ /dev/null
@@ -1,164 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#!/bin/sh
-# $Id$
-# Nagios	Startup script for the Nagios monitoring daemon
-#
-# chkconfig:	- 85 15
-# description:	Nagios is a service monitoring system
-# processname: nagios
-# config: /etc/nagios/nagios.cfg
-# pidfile: /var/nagios/nagios.pid
-#
-### BEGIN INIT INFO
-# Provides:		nagios
-# Required-Start:	$local_fs $syslog $network
-# Required-Stop:	$local_fs $syslog $network
-# Short-Description:    start and stop Nagios monitoring server
-# Description:		Nagios is is a service monitoring system 
-### END INIT INFO
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-# Source function library.
-. /etc/rc.d/init.d/functions
-
-prefix="/usr"
-exec_prefix="/usr"
-exec="/usr/sbin/nagios"
-prog="nagios"
-config="{{conf_dir}}/nagios.cfg"
-pidfile="{{nagios_pid_file}}"
-user="{{nagios_user}}"
-
-[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog
-
-lockfile=/var/lock/subsys/$prog
-
-start() {
-    [ -x $exec ] || exit 5
-    [ -f $config ] || exit 6
-    echo -n $"Starting $prog: "
-    daemon --user=$user $exec -d $config
-    retval=$?
-    echo
-    [ $retval -eq 0 ] && touch $lockfile
-    return $retval
-}
-
-stop() {
-    echo -n $"Stopping $prog: "
-    killproc -d 10 $exec
-    retval=$?
-    echo
-    [ $retval -eq 0 ] && rm -f $lockfile
-    return $retval
-}
-
-
-restart() {
-    stop
-    start
-}
-
-reload() {
-    echo -n $"Reloading $prog: "
-    killproc $exec -HUP
-    RETVAL=$?
-    echo
-}
-
-force_reload() {
-    restart
-}
-
-check_config() {
-        $nice runuser -s /bin/bash - $user -c "$corelimit >/dev/null 2>&1 ; $exec -v $config > /dev/null 2>&1"
-        RETVAL=$?
-        if [ $RETVAL -ne 0 ] ; then
-                echo -n $"Configuration validation failed"
-                failure
-                echo
-                exit 1
-
-        fi
-}
-
-
-case "$1" in
-    start)
-        status $prog && exit 0
-	check_config
-        $1
-        ;;
-    stop)
-        status $prog|| exit 0
-        $1
-        ;;
-    restart)
-	check_config
-        $1
-        ;;
-    reload)
-        status $prog || exit 7
-	check_config
-        $1
-        ;;
-    force-reload)
-	check_config
-        force_reload
-        ;;
-    status)
-        status $prog
-        ;;
-    condrestart|try-restart)
-        status $prog|| exit 0
-	check_config
-        restart
-        ;;
-    configtest)
-        echo -n  $"Checking config for $prog: "
-        check_config && success
-        echo
-	;;
-    *)
-        echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload|configtest}"
-        exit 2
-esac
-exit $?

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/resource.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/resource.cfg.j2 b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/resource.cfg.j2
deleted file mode 100644
index 291d90f..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/resource.cfg.j2
+++ /dev/null
@@ -1,51 +0,0 @@
-{#
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
-  limitations under the License.
-#}
-
-###########################################################################
-#
-# RESOURCE.CFG - Sample Resource File for Nagios 3.2.3
-#
-# Last Modified: 09-10-2003
-#
-# You can define $USERx$ macros in this file, which can in turn be used
-# in command definitions in your host config file(s).  $USERx$ macros are
-# useful for storing sensitive information such as usernames, passwords,
-# etc.  They are also handy for specifying the path to plugins and
-# event handlers - if you decide to move the plugins or event handlers to
-# a different directory in the future, you can just update one or two
-# $USERx$ macros, instead of modifying a lot of command definitions.
-#
-# The CGIs will not attempt to read the contents of resource files, so
-# you can set restrictive permissions (600 or 660) on them.
-#
-# Nagios supports up to 32 $USERx$ macros ($USER1$ through $USER32$)
-#
-# Resource files may also be used to store configuration directives for
-# external data sources like MySQL...
-#
-###########################################################################
-
-# Sets $USER1$ to be the path to the plugins
-$USER1$={{plugins_dir}}
-
-# Sets $USER2$ to be the path to event handlers
-#$USER2$={{eventhandlers_dir}}
-
-# Store some usernames and passwords (hidden from the CGIs)
-#$USER3$=someuser
-#$USER4$=somepassword
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/stack_advisor.py
index 6c6aeea..7ae4447 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/stack_advisor.py
@@ -282,7 +282,7 @@ class BaseBIGTOP08StackAdvisor(DefaultStackAdvisor):
     return ['JOURNALNODE', 'ZKFC', 'GANGLIA_MONITOR']
 
   def getNotPreferableOnServerComponents(self):
-    return ['GANGLIA_SERVER', 'NAGIOS_SERVER']
+    return ['GANGLIA_SERVER']
 
   def getCardinalitiesDict(self):
     return {
@@ -344,7 +344,7 @@ class BIGTOP08StackAdvisor(BaseBIGTOP08StackAdvisor):
                    + "m -Djava.net.preferIPv4Stack=true -XX:+UseNUMA -XX:+UseParallelGC")
 
   def getNotPreferableOnServerComponents(self):
-    return ['STORM_UI_SERVER', 'DRPC_SERVER', 'STORM_REST_API', 'NIMBUS', 'GANGLIA_SERVER', 'NAGIOS_SERVER']
+    return ['STORM_UI_SERVER', 'DRPC_SERVER', 'STORM_REST_API', 'NIMBUS', 'GANGLIA_SERVER']
 
   def getNotValuableComponents(self):
     return ['JOURNALNODE', 'ZKFC', 'GANGLIA_MONITOR', 'APP_TIMELINE_SERVER']

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-ANY/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-ANY/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-ANY/scripts/params.py
index 94a4ced..b7e3a70 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-ANY/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-ANY/scripts/params.py
@@ -73,13 +73,11 @@ hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 user_group = config['configurations']['cluster-env']['user_group']
 
-hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
 ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
 namenode_host = default("/clusterHostInfo/namenode_host", [])
 hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
 
 has_namenode = not len(namenode_host) == 0
-has_nagios = not len(hagios_server_hosts) == 0
 has_ganglia_server = not len(ganglia_server_hosts) == 0
 has_tez = 'tez-site' in config['configurations']
 has_hbase_masters = not len(hbase_master_hosts) == 0
@@ -88,13 +86,11 @@ hbase_tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
 
 #users and groups
 hbase_user = config['configurations']['hbase-env']['hbase_user']
-nagios_user = config['configurations']['nagios-env']['nagios_user']
 smoke_user =  config['configurations']['cluster-env']['smokeuser']
 gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
 gmond_user = config['configurations']['ganglia-env']["gmond_user"]
 
 proxyuser_group =  default("/configurations/hadoop-env/proxyuser_group","users")
-nagios_group = config['configurations']['nagios-env']['nagios_group']
 
 ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
 
@@ -114,8 +110,6 @@ if has_tez:
   user_to_groups_dict[tez_user] = [proxyuser_group]
 
 user_to_gid_dict = collections.defaultdict(lambda:user_group)
-if has_nagios:
-  user_to_gid_dict[nagios_user] = nagios_group
 
 user_list = json.loads(config['hostLevelParams']['user_list'])
 group_list = json.loads(config['hostLevelParams']['group_list'])

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/params.py
index 160fd60..a4d9578 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/params.py
@@ -67,21 +67,18 @@ hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_p
 
 #users and groups
 hbase_user = config['configurations']['hbase-env']['hbase_user']
-nagios_user = config['configurations']['nagios-env']['nagios_user']
 smoke_user =  config['configurations']['cluster-env']['smokeuser']
 gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
 gmond_user = config['configurations']['ganglia-env']["gmond_user"]
 
 user_group = config['configurations']['cluster-env']['user_group']
 proxyuser_group =  default("/configurations/hadoop-env/proxyuser_group","users")
-nagios_group = config['configurations']['nagios-env']['nagios_group']
 
 #hosts
 hostname = config["hostname"]
 ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
 rm_host = default("/clusterHostInfo/rm_host", [])
 slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
 oozie_servers = default("/clusterHostInfo/oozie_server", [])
 hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
 hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
@@ -97,7 +94,6 @@ has_resourcemanager = not len(rm_host) == 0
 has_namenode = not len(namenode_host) == 0
 has_jt = not len(jtnode_host) == 0
 has_slaves = not len(slave_hosts) == 0
-has_nagios = not len(hagios_server_hosts) == 0
 has_oozie_server = not len(oozie_servers)  == 0
 has_hcat_server_host = not len(hcat_server_hosts)  == 0
 has_hive_server_host = not len(hive_server_host)  == 0
@@ -131,8 +127,6 @@ if has_ganglia_server:
   user_to_groups_dict[gmetad_user] = [gmetad_user]
 
 user_to_gid_dict = collections.defaultdict(lambda:user_group)
-if has_nagios:
-  user_to_gid_dict[nagios_user] = nagios_group
 
 user_list = json.loads(config['hostLevelParams']['user_list'])
 group_list = json.loads(config['hostLevelParams']['group_list'])

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/params.py
index 0788c50..0b269c3 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/params.py
@@ -37,7 +37,6 @@ current_service = config['serviceName']
 ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
 rm_host = default("/clusterHostInfo/rm_host", [])
 slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
 oozie_servers = default("/clusterHostInfo/oozie_server", [])
 hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
 hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
@@ -51,7 +50,6 @@ ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
 has_namenode = not len(namenode_host) == 0
 has_resourcemanager = not len(rm_host) == 0
 has_slaves = not len(slave_hosts) == 0
-has_nagios = not len(hagios_server_hosts) == 0
 has_oozie_server = not len(oozie_servers)  == 0
 has_hcat_server_host = not len(hcat_server_hosts)  == 0
 has_hive_server_host = not len(hive_server_host)  == 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/role_command_order.json b/ambari-server/src/main/resources/stacks/HDP/1.3.2/role_command_order.json
index 67eeff3..5f819d2 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/role_command_order.json
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/role_command_order.json
@@ -3,8 +3,6 @@
   "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
   "general_deps" : {
     "_comment" : "dependencies for all cases",
-    "NAGIOS_SERVER-INSTALL" : ["HIVE_CLIENT-INSTALL", "HCAT-INSTALL",
-        "MAPREDUCE_CLIENT-INSTALL", "OOZIE_CLIENT-INSTALL"],
     "HBASE_MASTER-START": ["ZOOKEEPER_SERVER-START"],
     "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
     "OOZIE_SERVER-START": ["JOBTRACKER-START", "TASKTRACKER-START"],
@@ -12,12 +10,6 @@
     "HIVE_METASTORE-START": ["MYSQL_SERVER-START"],
     "HIVE_SERVER-START": ["TASKTRACKER-START", "MYSQL_SERVER-START"],
     "HUE_SERVER-START": ["HIVE_SERVER-START", "HCAT-START", "OOZIE_SERVER-START"],
-    "NAGIOS_SERVER-START": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START",
-        "GANGLIA_SERVER-START", "GANGLIA_MONITOR-START", "HCAT-START",
-        "HIVE_SERVER-START", "HIVE_METASTORE-START", "HUE_SERVER-START",
-        "JOBTRACKER-START", "TASKTRACKER-START", "ZOOKEEPER_SERVER-START",
-        "MYSQL_SERVER-START", "OOZIE_SERVER-START", "PIG-START", "SQOOP-START",
-        "WEBHCAT_SERVER-START"],
     "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"],
     "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START"],
     "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
@@ -45,8 +37,7 @@
     "WEBHCAT_SERVER-UPGRADE" : ["OOZIE_CLIENT-UPGRADE"],
     "PIG-UPGRADE" : ["WEBHCAT_SERVER-UPGRADE"],
     "SQOOP-UPGRADE" : ["PIG-UPGRADE"],
-    "NAGIOS_SERVER-UPGRADE" : ["SQOOP-UPGRADE"],
-    "GANGLIA_SERVER-UPGRADE" : ["NAGIOS_SERVER-UPGRADE"],
+    "GANGLIA_SERVER-UPGRADE" : ["SQOOP-UPGRADE"],
     "GANGLIA_MONITOR-UPGRADE" : ["GANGLIA_SERVER-UPGRADE"]
   },
   "_comment" : "GLUSTERFS-specific dependencies",
@@ -68,8 +59,6 @@
     "TASKTRACKER-START": ["NAMENODE-START", "DATANODE-START"],
     "HIVE_SERVER-START": ["DATANODE-START"],
     "WEBHCAT_SERVER-START": ["DATANODE-START"],
-    "NAGIOS_SERVER-START": ["NAMENODE-START", "SECONDARY_NAMENODE-START",
-        "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START", "HISTORYSERVER-START"],
     "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START",
         "SECONDARY_NAMENODE-START"],
     "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START",
@@ -90,7 +79,6 @@
   "namenode_optional_ha": {
     "NAMENODE-START": ["JOURNALNODE-START", "ZOOKEEPER_SERVER-START"],
     "ZKFC-START": ["NAMENODE-START"],
-    "NAGIOS_SERVER-START": ["ZKFC-START", "JOURNALNODE-START"],
     "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["ZKFC-START"]
   },
   "_comment" : "Dependencies that are used in ResourceManager HA cluster",
@@ -98,4 +86,3 @@
     "RESOURCEMANAGER-START": ["ZOOKEEPER_SERVER-START"]
   }
 }
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/gmondLib.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/gmondLib.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/gmondLib.sh
index 75a18a8..1e053b1 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/gmondLib.sh
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/gmondLib.sh
@@ -160,7 +160,6 @@ host {
  *
  * At the very least, every gmond must expose its XML state to 
  * queriers from localhost.
- * Also we use this port for Nagios monitoring
  */
 tcp_accept_channel {
   bind = 0.0.0.0

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/params.py
index bae6fee..15c0ef0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/params.py
@@ -42,7 +42,6 @@ kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/
 hostname = config["hostname"]
 rm_host = default("/clusterHostInfo/rm_host", [])
 slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
 oozie_servers = default("/clusterHostInfo/oozie_server", [])
 hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
 hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
@@ -62,7 +61,6 @@ has_resourcemanager = not len(rm_host) == 0
 has_histroryserver = not len(hs_host) == 0
 has_hbase_masters = not len(hbase_master_hosts) == 0
 has_slaves = not len(slave_hosts) == 0
-has_nagios = not len(hagios_server_hosts) == 0
 has_oozie_server = not len(oozie_servers)  == 0
 has_hcat_server_host = not len(hcat_server_hosts)  == 0
 has_hive_server_host = not len(hive_server_host)  == 0
@@ -82,7 +80,6 @@ if has_ganglia_server:
 
 #users and groups
 hbase_user = config['configurations']['hbase-env']['hbase_user']
-nagios_user = config['configurations']['nagios-env']['nagios_user']
 oozie_user = config['configurations']['oozie-env']['oozie_user']
 webhcat_user = config['configurations']['hive-env']['hcat_user']
 hcat_user = config['configurations']['hive-env']['hcat_user']
@@ -93,7 +90,6 @@ hdfs_user = status_params.hdfs_user
 
 user_group = config['configurations']['cluster-env']['user_group']
 proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
-nagios_group = config['configurations']['nagios-env']['nagios_group']
 
 #hadoop params
 hadoop_conf_dir = "/etc/hadoop/conf"

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/configuration/nagios-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/configuration/nagios-env.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/configuration/nagios-env.xml
deleted file mode 100644
index fad8374..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/configuration/nagios-env.xml
+++ /dev/null
@@ -1,53 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>nagios_user</name>
-    <value>nagios</value>
-    <property-type>USER</property-type>
-    <description>Nagios Username.</description>
-  </property>
-  <property>
-    <name>nagios_group</name>
-    <value>nagios</value>
-    <property-type>GROUP</property-type>
-    <description>Nagios Group.</description>
-  </property>
-  <property>
-    <name>nagios_web_login</name>
-    <value>nagiosadmin</value>
-    <description>Nagios web user.</description>
-  </property>
-  <property require-input = "true">
-    <name>nagios_web_password</name>
-    <value></value>
-    <property-type>PASSWORD</property-type>
-    <description>Nagios Admin Password.</description>
-  </property>
-  <property require-input = "true">
-    <name>nagios_contact</name>
-    <value></value>
-    <description>Hadoop Admin Email.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/metainfo.xml
deleted file mode 100644
index 137ab71..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/metainfo.xml
+++ /dev/null
@@ -1,125 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>NAGIOS</name>
-      <displayName>Nagios</displayName>
-      <comment>Nagios Monitoring and Alerting system</comment>
-      <version>3.5.0</version>
-      <components>
-        <component>
-          <name>NAGIOS_SERVER</name>
-          <displayName>Nagios Server</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>MAPREDUCE/MAPREDUCE_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>OOZIE/OOZIE_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>HIVE/HCAT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/nagios_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>perl</name>
-            </package>
-            <package>
-              <name>nagios-plugins-1.4.9</name>
-            </package>
-            <package>
-              <name>nagios-3.5.0-99</name>
-            </package>
-            <package>
-              <name>nagios-www-3.5.0-99</name>
-            </package>
-            <package>
-              <name>nagios-devel-3.5.0-99</name>
-            </package>
-            <package>
-              <name>fping</name>
-            </package>
-            <package>
-              <name>hdp_mon_nagios_addons</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>suse11</osFamily>
-          <packages>
-            <package>
-              <name>php5*-json</name>
-            </package>
-            <package>
-              <name>apache2?mod_php*</name>
-            </package>
-            <package>
-              <name>php-curl</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>redhat5</osFamily>
-          <packages>
-            <package>
-              <name>php-pecl-json.x86_64</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <configuration-dependencies>
-        <config-type>nagios-env</config-type>
-      </configuration-dependencies>      
-      <monitoringService>true</monitoringService>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_aggregate.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_aggregate.php b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_aggregate.php
deleted file mode 100644
index db2a20f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_aggregate.php
+++ /dev/null
@@ -1,247 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-define("PASSIVE_MODE_STR", "AMBARIPASSIVE=");
-
-  $options = getopt ("f:s:n:w:c:t:");
-  if (!array_key_exists('t', $options) || !array_key_exists('f', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options) || !array_key_exists('s', $options)) {
-    usage();
-    exit(3);
-  }
-  $status_file=$options['f'];
-  $status_code=$options['s'];
-  $type=$options['t'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  if ($type == "service" && !array_key_exists('n', $options)) {
-    echo "Service description not provided -n option\n";
-    exit(3);
-  }
-  if ($type == "service") {
-    $service_name=$options['n'];
-    /* echo "DESC: " . $service_name . "\n"; */
-  }
-
-  $result = array();
-  $status_file_content = file_get_contents($status_file);
-
-  $counts;
-  if ($type == "service") {
-    $counts=query_alert_count($status_file_content, $service_name, $status_code);
-  } else {
-    $counts=query_host_count($status_file_content, $status_code);
-  }
-
-  if ($counts['total'] == 0) {
-    $percent = 0;
-  } else {
-    $percent = ($counts['actual']/$counts['total'])*100;
-  }
-  if ($percent >= $crit) {
-    echo "CRITICAL: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
-    exit (2);
-  }
-  if ($percent >= $warn) {
-    echo "WARNING: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
-    exit (1);
-  }
-  echo "OK: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
-  exit(0);
-
-
-  # Functions
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -f <status_file_path> -t type(host/service) -s <status_codes> -n <service description> -w <warn%> -c <crit%>\n";
-  }
-
-  /* Query host count */
-  function query_host_count ($status_file_content, $status_code) {
-    $num_matches = preg_match_all("/hoststatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    $hostcounts_object = array ();
-    $total_hosts = 0;
-    $hosts = 0;
-    foreach ($matches[0] as $object) {
-      $total_hosts++;
-      if (getParameter($object, "current_state") == $status_code) {
-        $hosts++;
-      }
-    }
-    $hostcounts_object['total'] = $total_hosts;
-    $hostcounts_object['actual'] = $hosts;
-    return $hostcounts_object;
-  }
-
-  /* Query Alert counts */
-  function query_alert_count ($status_file_content, $service_name, $status_code) {
-    $num_matches = preg_match_all("/servicestatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    $alertcounts_objects = array ();
-    $total_alerts=0;
-    $alerts=0;
-    foreach ($matches[0] as $object) {
-      $long_out = getParameter($object, "long_plugin_output");
-      $skip_if_match=!strncmp($long_out, PASSIVE_MODE_STR, strlen(PASSIVE_MODE_STR));
-
-      if (getParameter($object, "service_description") == $service_name && !$skip_if_match) {
-        $total_alerts++;
-        if (getParameter($object, "current_state") >= $status_code) {
-          $alerts++;
-        }
-      }
-    }
-    $alertcounts_objects['total'] = $total_alerts;
-    $alertcounts_objects['actual'] = $alerts;
-    return $alertcounts_objects;
-  }
-
-  function get_service_type($service_description)
-  {
-    $pieces = explode("::", $service_description);
-    switch ($pieces[0]) {
-      case "NAMENODE":
-        $pieces[0] = "HDFS";
-        break;
-      case "JOBTRACKER":
-        $pieces[0] = "MAPREDUCE";
-        break;
-      case "HBASEMASTER":
-        $pieces[0] = "HBASE";
-        break;
-      case "SYSTEM":
-      case "HDFS":
-      case "MAPREDUCE":
-      case "HBASE":
-        break;
-      default:
-        $pieces[0] = "UNKNOWN";
-    }
-    return $pieces[0];
-  }
-
-  function getParameter($object, $key)
-  {
-    $pattern="/\s" . $key . "[\s= ]*([\S, ]*)\n/";
-    $num_mat = preg_match($pattern, $object, $matches);
-    $value = "";
-    if ($num_mat) {
-      $value = $matches[1];
-    }
-    return $value;
-  }
-
-function indent($json) {
-
-    $result      = '';
-    $pos         = 0;
-    $strLen      = strlen($json);
-    $indentStr   = '  ';
-    $newLine     = "\n";
-    $prevChar    = '';
-    $outOfQuotes = true;
-
-    for ($i=0; $i<=$strLen; $i++) {
-
-        // Grab the next character in the string.
-        $char = substr($json, $i, 1);
-
-        // Are we inside a quoted string?
-        if ($char == '"' && $prevChar != '\\') {
-            $outOfQuotes = !$outOfQuotes;
-
-        // If this character is the end of an element,
-        // output a new line and indent the next line.
-        } else if(($char == '}' || $char == ']') && $outOfQuotes) {
-            $result .= $newLine;
-            $pos --;
-            for ($j=0; $j<$pos; $j++) {
-                $result .= $indentStr;
-            }
-        }
-
-        // Add the character to the result string.
-        $result .= $char;
-
-        // If the last character was the beginning of an element,
-        // output a new line and indent the next line.
-        if (($char == ',' || $char == '{' || $char == '[') && $outOfQuotes) {
-            $result .= $newLine;
-            if ($char == '{' || $char == '[') {
-                $pos ++;
-            }
-
-            for ($j = 0; $j < $pos; $j++) {
-                $result .= $indentStr;
-            }
-        }
-
-        $prevChar = $char;
-    }
-
-    return $result;
-}
-
-/* JSON documment format */
-/*
-{
-  "programstatus":{
-    "last_command_check":"1327385743"
-  },
-  "hostcounts":{
-    "up_nodes":"",
-    "down_nodes":""
-  },
-  "hoststatus":[
-    {
-      "host_name"="ip-10-242-191-48.ec2.internal",
-      "current_state":"0",
-      "last_hard_state":"0",
-      "plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms",
-      "last_check":"1327385564",
-      "current_attempt":"1",
-      "last_hard_state_change":"1327362079",
-      "last_time_up":"1327385574",
-      "last_time_down":"0",
-      "last_time_unreachable":"0",
-      "is_flapping":"0",
-      "last_check":"1327385574",
-      "servicestatus":[
-      ]
-    }
-  ],
-  "servicestatus":[
-    {
-      "service_type":"HDFS",  {HBASE, MAPREDUCE, HIVE, ZOOKEEPER}
-      "service_description":"HDFS Current Load",
-      "host_name"="ip-10-242-191-48.ec2.internal",
-      "current_attempt":"1",
-      "current_state":"0",
-      "plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms",
-      "last_hard_state_change":"1327362079",
-      "last_time_ok":"1327385479",
-      "last_time_warning":"0",
-      "last_time_unknown":"0",
-      "last_time_critical":"0",
-      "last_check":"1327385574",
-      "is_flapping":"0"
-    }
-  ]
-}
-*/
-
-?>


[10/17] ambari git commit: AMBARI-8276 - Alerts: Remove Nagios Service From The Stack (jonathanhurley)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/metainfo.xml
deleted file mode 100644
index a41e261..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/metainfo.xml
+++ /dev/null
@@ -1,163 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>NAGIOS</name>
-      <displayName>Nagios</displayName>
-      <comment>Nagios Monitoring and Alerting system</comment>
-      <version>3.5.0</version>
-      <components>
-        <component>
-           <name>NAGIOS_SERVER</name>
-          <displayName>Nagios Server</displayName>
-           <category>MASTER</category>
-           <cardinality>1</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>OOZIE/OOZIE_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>YARN/YARN_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>HIVE/HCAT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-           <commandScript>
-             <script>scripts/nagios_server.py</script>
-             <scriptType>PYTHON</scriptType>
-             <timeout>600</timeout>
-           </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>perl</name>
-            </package>
-            <package>
-              <name>fping</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>ubuntu12</osFamily>
-          <packages>
-            <package>
-              <name>nagios3</name>
-            </package>
-            <package>
-              <name>nagios3-common</name>
-            </package>
-            <package>
-              <name>nagios3-dbg</name>
-            </package>
-            <package>
-              <name>nagios3-doc</name>
-            </package>
-            <package>
-              <name>nagios-plugins-extra</name>
-            </package>
-            <package>
-              <name>php5-curl</name>
-            </package>
-            <package>
-              <name>libapache2-mod-php5</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>redhat5,redhat6,suse11</osFamily>
-          <packages>
-            <package>
-              <name>nagios-plugins-1.4.9</name>
-            </package>
-            <package>
-              <name>nagios-3.5.0-99</name>
-            </package>
-            <package>
-              <name>nagios-www-3.5.0-99</name>
-            </package>
-            <package>
-              <name>nagios-devel-3.5.0-99</name>
-            </package>
-            <package>
-              <name>php</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>suse11</osFamily>
-          <packages>
-            <package>
-              <name>php5*-json</name>
-            </package>
-            <package>
-              <name>apache2?mod_php*</name>
-            </package>
-            <package>
-              <name>php-curl</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>redhat5</osFamily>
-          <packages>
-            <package>
-              <name>php-pecl-json.x86_64</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <configuration-dependencies>
-        <config-type>nagios-env</config-type>
-      </configuration-dependencies>
-      <monitoringService>true</monitoringService>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_aggregate.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_aggregate.php b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_aggregate.php
deleted file mode 100644
index 792b25b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_aggregate.php
+++ /dev/null
@@ -1,248 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-define("PASSIVE_MODE_STR", "AMBARIPASSIVE=");
-
-  $options = getopt ("f:s:n:w:c:t:");
-  if (!array_key_exists('t', $options) || !array_key_exists('f', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options) || !array_key_exists('s', $options)) {
-    usage();
-    exit(3);
-  }
-  $status_file=$options['f'];
-  $status_code=$options['s'];
-  $type=$options['t'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  if ($type == "service" && !array_key_exists('n', $options)) {
-    echo "Service description not provided -n option\n";
-    exit(3);
-  }
-  if ($type == "service") {
-    $service_name=$options['n'];
-    /* echo "DESC: " . $service_name . "\n"; */
-  }
-
-  $result = array();
-  $status_file_content = file_get_contents($status_file);
-
-  $counts;
-  if ($type == "service") {
-    $counts=query_alert_count($status_file_content, $service_name, $status_code);
-  } else {
-    $counts=query_host_count($status_file_content, $status_code);
-  }
-
-  if ($counts['total'] == 0) {
-    $percent = 0;
-  } else {
-    $percent = ($counts['actual']/$counts['total'])*100;
-  }
-  if ($percent >= $crit) {
-    echo "CRITICAL: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
-    exit (2);
-  }
-  if ($percent >= $warn) {
-    echo "WARNING: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
-    exit (1);
-  }
-  echo "OK: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
-  exit(0);
-
-
-  # Functions
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -f <status_file_path> -t type(host/service) -s <status_codes> -n <service description> -w <warn%> -c <crit%>\n";
-  }
-
-  /* Query host count */
-  function query_host_count ($status_file_content, $status_code) {
-    $num_matches = preg_match_all("/hoststatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    $hostcounts_object = array ();
-    $total_hosts = 0;
-    $hosts = 0;
-    foreach ($matches[0] as $object) {
-      $total_hosts++;
-      if (getParameter($object, "current_state") == $status_code) {
-        $hosts++;
-      }
-    }
-    $hostcounts_object['total'] = $total_hosts;
-    $hostcounts_object['actual'] = $hosts;
-    return $hostcounts_object;
-  }
-
-  /* Query Alert counts */
-  function query_alert_count ($status_file_content, $service_name, $status_code) {
-    $num_matches = preg_match_all("/servicestatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    $alertcounts_objects = array ();
-    $total_alerts=0;
-    $alerts=0;
-    foreach ($matches[0] as $object) {
-      $long_out = getParameter($object, "long_plugin_output");
-      $skip_if_match=!strncmp($long_out, PASSIVE_MODE_STR, strlen(PASSIVE_MODE_STR));
-
-      if (getParameter($object, "service_description") == $service_name && !$skip_if_match) {
-        $total_alerts++;
-        if (getParameter($object, "current_state") >= $status_code) {
-          $alerts++;
-        }
-      }
-    }
-    $alertcounts_objects['total'] = $total_alerts;
-    $alertcounts_objects['actual'] = $alerts;
-    return $alertcounts_objects;
-  }
-
-  function get_service_type($service_description)
-  {
-    $pieces = explode("::", $service_description);
-    switch ($pieces[0]) {
-      case "NAMENODE":
-        $pieces[0] = "HDFS";
-        break;
-      case "JOBTRACKER":
-        $pieces[0] = "MAPREDUCE";
-        break;
-      case "HBASEMASTER":
-        $pieces[0] = "HBASE";
-        break;
-      case "SYSTEM":
-      case "HDFS":
-      case "MAPREDUCE":
-      case "HBASE":
-      case "STORM":
-        break;
-      default:
-        $pieces[0] = "UNKNOWN";
-    }
-    return $pieces[0];
-  }
-
-  function getParameter($object, $key)
-  {
-    $pattern="/\s" . $key . "[\s= ]*([\S, ]*)\n/";
-    $num_mat = preg_match($pattern, $object, $matches);
-    $value = "";
-    if ($num_mat) {
-      $value = $matches[1];
-    }
-    return $value;
-  }
-
-function indent($json) {
-
-    $result      = '';
-    $pos         = 0;
-    $strLen      = strlen($json);
-    $indentStr   = '  ';
-    $newLine     = "\n";
-    $prevChar    = '';
-    $outOfQuotes = true;
-
-    for ($i=0; $i<=$strLen; $i++) {
-
-        // Grab the next character in the string.
-        $char = substr($json, $i, 1);
-
-        // Are we inside a quoted string?
-        if ($char == '"' && $prevChar != '\\') {
-            $outOfQuotes = !$outOfQuotes;
-
-        // If this character is the end of an element,
-        // output a new line and indent the next line.
-        } else if(($char == '}' || $char == ']') && $outOfQuotes) {
-            $result .= $newLine;
-            $pos --;
-            for ($j=0; $j<$pos; $j++) {
-                $result .= $indentStr;
-            }
-        }
-
-        // Add the character to the result string.
-        $result .= $char;
-
-        // If the last character was the beginning of an element,
-        // output a new line and indent the next line.
-        if (($char == ',' || $char == '{' || $char == '[') && $outOfQuotes) {
-            $result .= $newLine;
-            if ($char == '{' || $char == '[') {
-                $pos ++;
-            }
-
-            for ($j = 0; $j < $pos; $j++) {
-                $result .= $indentStr;
-            }
-        }
-
-        $prevChar = $char;
-    }
-
-    return $result;
-}
-
-/* JSON documment format */
-/*
-{
-  "programstatus":{
-    "last_command_check":"1327385743"
-  },
-  "hostcounts":{
-    "up_nodes":"",
-    "down_nodes":""
-  },
-  "hoststatus":[
-    {
-      "host_name"="ip-10-242-191-48.ec2.internal",
-      "current_state":"0",
-      "last_hard_state":"0",
-      "plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms",
-      "last_check":"1327385564",
-      "current_attempt":"1",
-      "last_hard_state_change":"1327362079",
-      "last_time_up":"1327385574",
-      "last_time_down":"0",
-      "last_time_unreachable":"0",
-      "is_flapping":"0",
-      "last_check":"1327385574",
-      "servicestatus":[
-      ]
-    }
-  ],
-  "servicestatus":[
-    {
-      "service_type":"HDFS",  {HBASE, MAPREDUCE, HIVE, ZOOKEEPER}
-      "service_description":"HDFS Current Load",
-      "host_name"="ip-10-242-191-48.ec2.internal",
-      "current_attempt":"1",
-      "current_state":"0",
-      "plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms",
-      "last_hard_state_change":"1327362079",
-      "last_time_ok":"1327385479",
-      "last_time_warning":"0",
-      "last_time_unknown":"0",
-      "last_time_critical":"0",
-      "last_check":"1327385574",
-      "is_flapping":"0"
-    }
-  ]
-}
-*/
-
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_ambari_alerts.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_ambari_alerts.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_ambari_alerts.py
deleted file mode 100644
index 833a798..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_ambari_alerts.py
+++ /dev/null
@@ -1,80 +0,0 @@
-#!/usr/bin/env python
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-import os
-import optparse
-import json
-import traceback
-
-def main():
-
-  parser = optparse.OptionParser()
-
-  parser.add_option("-H", "--host", dest="host", default="localhost", help="NameNode host")
-  parser.add_option("-n", "--name", dest="alert_name", help="Alert name to check")
-  parser.add_option("-f", "--file", dest="alert_file", help="File containing the alert structure")
-
-  (options, args) = parser.parse_args()
-
-  if options.alert_name is None:
-    print "Alert name is required (--name or -n)"
-    exit(-1)
-
-  if options.alert_file is None:
-    print "Alert file is required (--file or -f)"
-    exit(-1)
-
-  if not os.path.exists(options.alert_file):
-    print "Status is unreported"
-    exit(3)
-
-  try:
-    with open(options.alert_file, 'r') as f:
-      data = json.load(f)
-
-      buf_list = []
-      exit_code = 0
-
-      for_hosts = data[options.alert_name]
-      if for_hosts.has_key(options.host):
-        for host_entry in for_hosts[options.host]:
-          buf_list.append(host_entry['text'])
-          alert_state = host_entry['state']
-          if alert_state == 'CRITICAL' and exit_code < 2:
-            exit_code = 2
-          elif alert_state == 'WARNING' and exit_code < 1:
-            exit_code = 1
-
-      if 0 == len(buf_list):
-        print "Status is not reported"
-        exit(3)
-      else:
-        print ", ".join(buf_list)
-        exit(exit_code)
-      
-  except Exception:
-    traceback.print_exc()
-    exit(3)
-
-if __name__ == "__main__":
-  main()
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_checkpoint_time.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_checkpoint_time.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_checkpoint_time.py
deleted file mode 100644
index 04e8d60..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_checkpoint_time.py
+++ /dev/null
@@ -1,123 +0,0 @@
-#!/usr/bin/env python
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-import os
-import optparse
-import time
-import urllib2
-import json
-
-CRIT_MESSAGE = "CRITICAL: Last checkpoint time is below acceptable. Checkpoint was done {h}h. {m}m. ago"
-WARNING_MESSAGE = "WARNING: Last checkpoint time is below acceptable. Checkpoint was done {h}h. {m}m. ago"
-OK_MESSAGE = "OK: Last checkpoint time"
-WARNING_JMX_MESSAGE = "WARNING: NameNode JMX not accessible"
-
-def main():
-  current_time = int(round(time.time() * 1000))
-
-  parser = optparse.OptionParser()
-
-  parser.add_option("-H", "--host", dest="host",
-                    default="localhost", help="NameNode host")
-  parser.add_option("-p", "--port", dest="port",
-                    default="50070", help="NameNode jmx port")
-  parser.add_option("-s", "--ssl-enabled", dest="is_ssl_enabled",
-                    default=False, help="SSL Enabled")  
-  parser.add_option("-w", "--warning", dest="warning",
-                    default="200", help="Percent for warning alert")
-  parser.add_option("-c", "--critical", dest="crit",
-                    default="200", help="Percent for critical alert")
-  parser.add_option("-t", "--period", dest="period",
-                    default="21600", help="Period time")
-  parser.add_option("-x", "--txns", dest="txns",
-                    default="1000000",
-                    help="CheckpointNode will create a checkpoint of the namespace every 'dfs.namenode.checkpoint.txns'")
-  
-  (options, args) = parser.parse_args()
-
-  scheme = "http"
-  if options.is_ssl_enabled == "true":
-    scheme = "https"
-
-  host = get_available_nn_host(options,scheme)
-
-  last_checkpoint_time_qry = "{scheme}://{host}:{port}/jmx?qry=Hadoop:service=NameNode,name=FSNamesystem".format(
-      scheme=scheme, host=host, port=options.port)
-
-  print last_checkpoint_time_qry
-    
-  last_checkpoint_time = int(get_value_from_jmx(last_checkpoint_time_qry,"LastCheckpointTime"))
-
-  journal_transaction_info_qry = "{scheme}://{host}:{port}/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo".format(
-      scheme=scheme, host=host, port=options.port)
-  
-  journal_transaction_info = get_value_from_jmx(journal_transaction_info_qry,"JournalTransactionInfo")
-  journal_transaction_info_dict = json.loads(journal_transaction_info)
-
-  last_txid = int(journal_transaction_info_dict['LastAppliedOrWrittenTxId'])
-  most_txid = int(journal_transaction_info_dict['MostRecentCheckpointTxId'])
-
-  delta = (current_time - last_checkpoint_time)/1000
-
-  if ((last_txid - most_txid) > int(options.txns)) and (float(delta) / int(options.period)*100 >= int(options.crit)):
-    print CRIT_MESSAGE.format(h=get_time(delta)['h'], m=get_time(delta)['m'])
-    exit(2)
-  elif ((last_txid - most_txid) > int(options.txns)) and (float(delta) / int(options.period)*100 >= int(options.warning)):
-    print WARNING_MESSAGE.format(h=get_time(delta)['h'], m=get_time(delta)['m'])
-    exit(1)
-  else:
-    print OK_MESSAGE
-    exit(0)
-
-
-def get_time(delta):
-  h = int(delta/3600)
-  m = int((delta % 3600)/60)
-  return {'h':h, 'm':m}
-
-
-def get_value_from_jmx(qry, property):
-  try:
-    response = urllib2.urlopen(qry)
-    data=response.read()
-  except Exception:
-    print WARNING_JMX_MESSAGE
-    exit(1)
-
-  data_dict = json.loads(data)
-  return data_dict["beans"][0][property]
-
-
-def get_available_nn_host(options, scheme):
-  nn_hosts = options.host.split(" ")
-  for nn_host in nn_hosts:
-    try:
-      urllib2.urlopen("{scheme}://{host}:{port}/jmx".format(scheme=scheme, host=nn_host, port=options.port))
-      return nn_host
-    except Exception:
-      pass
-  print WARNING_JMX_MESSAGE
-  exit(1)
-
-
-if __name__ == "__main__":
-  main()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_cpu.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_cpu.php b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_cpu.php
deleted file mode 100755
index 0744e38..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_cpu.php
+++ /dev/null
@@ -1,109 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:k:r:t:u:e");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $hosts=$options['h'];
-  $port=$options['p'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['u'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-
-  foreach (preg_split('/,/', $hosts) as $host) {
-    /* Get the json document */
-
-    $ch = curl_init();
-    $username = rtrim(`id -un`, "\n");
-    curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=java.lang:type=OperatingSystem",
-                                  CURLOPT_RETURNTRANSFER => true,
-                                  CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                  CURLOPT_USERPWD => "$username:",
-                                  CURLOPT_SSL_VERIFYPEER => FALSE ));
-    $json_string = curl_exec($ch);
-    $info = curl_getinfo($ch);
-    if (intval($info['http_code']) == 401){
-      logout();
-      $json_string = curl_exec($ch);
-    }
-    $info = curl_getinfo($ch);
-    curl_close($ch);
-    $json_array = json_decode($json_string, true);
-
-    $object = $json_array['beans'][0];
-
-    if (count($object) == 0) {
-      echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-      exit(2);
-    }
-
-    $cpu_load = $object['SystemCpuLoad'];
-
-    if (!isset($object['SystemCpuLoad']) || $cpu_load < 0.0) {
-      echo "WARNING: Data unavailable, SystemCpuLoad is not set\n";
-      exit(1);
-    }
-
-    $cpu_count = $object['AvailableProcessors'];
-
-    $cpu_percent = $cpu_load*100;
-  }
-
-  $out_msg = $cpu_count . " CPU, load " . number_format($cpu_percent, 1, '.', '') . '%';
-
-  if ($cpu_percent > $crit) {
-    echo $out_msg . ' > ' . $crit . "% : CRITICAL\n";
-    exit(2);
-  }
-  if ($cpu_percent > $warn) {
-    echo $out_msg . ' > ' . $warn . "% : WARNING\n";
-    exit(1);
-  }
-
-  echo $out_msg . ' < ' . $warn . "% : OK\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -k keytab_path -r principal_name -t kinit_path -u security_enabled -e ssl_enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_cpu.pl
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_cpu.pl b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_cpu.pl
deleted file mode 100644
index a5680f7..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_cpu.pl
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/usr/bin/perl -w 
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-use strict;
-use Net::SNMP;
-use Getopt::Long;
-
-# Variable
-my $base_proc = "1.3.6.1.2.1.25.3.3.1";   
-my $proc_load = "1.3.6.1.2.1.25.3.3.1.2"; 
-my $o_host = 	undef;
-my $o_community = undef;
-my $o_warn=	undef;
-my $o_crit=	undef;
-my $o_timeout = 15;
-my $o_port = 161;
-
-sub Usage {
-    print "Usage: $0 -H <host> -C <snmp_community> -w <warn level> -c <crit level>\n";
-}
-
-Getopt::Long::Configure ("bundling");
-GetOptions(
-  'H:s'   => \$o_host,	
-  'C:s'   => \$o_community,	
-  'c:s'   => \$o_crit,        
-  'w:s'   => \$o_warn
-          );
-if (!defined $o_host || !defined $o_community || !defined $o_crit || !defined $o_warn) {
-  Usage();
-  exit 3;
-}
-$o_warn =~ s/\%//g; 
-$o_crit =~ s/\%//g;
-alarm ($o_timeout);
-$SIG{'ALRM'} = sub {
- print "Unable to contact host: $o_host\n";
- exit 3;
-};
-
-# Connect to host
-my ($session,$error);
-($session, $error) = Net::SNMP->session(
-		-hostname  => $o_host,
-		-community => $o_community,
-		-port      => $o_port,
-		-timeout   => $o_timeout
-	  );
-if (!defined($session)) {
-   printf("Error opening session: %s.\n", $error);
-   exit 3;
-}
-
-my $exit_val=undef;
-my $resultat =  (Net::SNMP->VERSION < 4) ?
-	  $session->get_table($base_proc)
-	: $session->get_table(Baseoid => $base_proc);
-
-if (!defined($resultat)) {
-   printf("ERROR: Description table : %s.\n", $session->error);
-   $session->close;
-   exit 3;
-}
-
-$session->close;
-
-my ($cpu_used,$ncpu)=(0,0);
-foreach my $key ( keys %$resultat) {
-  if ($key =~ /$proc_load/) {
-    $cpu_used += $$resultat{$key};
-    $ncpu++;
-  }
-}
-
-if ($ncpu==0) {
-  print "Can't find CPU usage information : UNKNOWN\n";
-  exit 3;
-}
-
-$cpu_used /= $ncpu;
-
-print "$ncpu CPU, ", $ncpu==1 ? "load" : "average load";
-printf(" %.1f%%",$cpu_used);
-$exit_val=0;
-
-if ($cpu_used > $o_crit) {
- print " > $o_crit% : CRITICAL\n";
- $exit_val=2;
-} else {
-  if ($cpu_used > $o_warn) {
-   print " > $o_warn% : WARNING\n";
-   $exit_val=1;
-  }
-}
-print " < $o_warn% : OK\n" if ($exit_val eq 0);
-exit $exit_val;

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_cpu_ha.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_cpu_ha.php b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_cpu_ha.php
deleted file mode 100644
index 91a7c64..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_cpu_ha.php
+++ /dev/null
@@ -1,116 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:k:r:t:u:e");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $hosts=$options['h'];
-  $port=$options['p'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['u'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-  $jmx_response_available = false;
-  $jmx_response;
-
-  foreach (preg_split('/,/', $hosts) as $host) {
-    /* Get the json document */
-
-    $ch = curl_init();
-    $username = rtrim(`id -un`, "\n");
-    curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=java.lang:type=OperatingSystem",
-                                  CURLOPT_RETURNTRANSFER => true,
-                                  CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                  CURLOPT_USERPWD => "$username:",
-                                  CURLOPT_SSL_VERIFYPEER => FALSE ));
-    $json_string = curl_exec($ch);
-    $info = curl_getinfo($ch);
-    if (intval($info['http_code']) == 401){
-      logout();
-      $json_string = curl_exec($ch);
-    }
-    $info = curl_getinfo($ch);
-    curl_close($ch);
-    $json_array = json_decode($json_string, true);
-
-    $object = $json_array['beans'][0];
-
-    if (count($object) > 0) {
-      $jmx_response_available = true;
-      $jmx_response = $object;
-    }
-  }
-
-  if ($jmx_response_available === false) {
-    echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-    exit(2);
-  }
-
-  $cpu_load = $jmx_response['SystemCpuLoad'];
-
-  if (!isset($jmx_response['SystemCpuLoad']) || $cpu_load < 0.0) {
-    echo "WARNING: Data unavailable, SystemCpuLoad is not set\n";
-    exit(1);
-  }
-
-  $cpu_count = $jmx_response['AvailableProcessors'];
-
-  $cpu_percent = $cpu_load*100;
-
-  $out_msg = $cpu_count . " CPU, load " . number_format($cpu_percent, 1, '.', '') . '%';
-
-  if ($cpu_percent > $crit) {
-    echo $out_msg . ' > ' . $crit . "% : CRITICAL\n";
-    exit(2);
-  }
-  if ($cpu_percent > $warn) {
-    echo $out_msg . ' > ' . $warn . "% : WARNING\n";
-    exit(1);
-  }
-
-  echo $out_msg . ' < ' . $warn . "% : OK\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -k keytab_path -r principal_name -t kinit_path -u security_enabled -e ssl_enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_datanode_storage.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_datanode_storage.php b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_datanode_storage.php
deleted file mode 100644
index dee22b4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_datanode_storage.php
+++ /dev/null
@@ -1,100 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * check the storage capacity remaining on local datanode storage
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:e:k:r:t:s:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options) 
-      || !array_key_exists('c', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['s'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-  /* Get the json document */
-  $ch = curl_init();
-  $username = rtrim(`id -un`, "\n");
-  curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=DataNode,name=FSDatasetState-*",
-                                CURLOPT_RETURNTRANSFER => true,
-                                CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                CURLOPT_USERPWD => "$username:",
-                                CURLOPT_SSL_VERIFYPEER => FALSE ));
-  $json_string = curl_exec($ch);
-  $info = curl_getinfo($ch);
-  if (intval($info['http_code']) == 401){
-    logout();
-    $json_string = curl_exec($ch);
-  }
-  $info = curl_getinfo($ch);
-  curl_close($ch);
-  $json_array = json_decode($json_string, true);
-  $object = $json_array['beans'][0];
-  $cap_remain = $object['Remaining']; /* Total capacity - any extenal files created in data directories by non-hadoop app */
-  $cap_total = $object['Capacity']; /* Capacity used by all data partitions minus space reserved for M/R */
-  if (count($object) == 0) {
-    echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-    exit(2);
-  }  
-  $percent_full = ($cap_total - $cap_remain)/$cap_total * 100;
-
-  $out_msg = "Capacity:[" . $cap_total . 
-             "], Remaining Capacity:[" . $cap_remain . 
-             "], percent_full:[" . $percent_full  . "]";
-  
-  if ($percent_full > $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($percent_full > $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_hdfs_blocks.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_hdfs_blocks.php b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_hdfs_blocks.php
deleted file mode 100644
index 3693aa0..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_hdfs_blocks.php
+++ /dev/null
@@ -1,102 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * check the corrupt or missing blocks % is > threshod
- * check_jmx -H hostaddress -p port -w 1% -c 1%
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:s:e:k:r:t:u:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('s', $options)) {
-    usage();
-    exit(3);
-  }
-  $hosts=$options['h'];
-  $port=$options['p'];
-  $nn_jmx_property=$options['s'];
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['u'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-
-  foreach (preg_split('/,/', $hosts) as $host) {
-    /* Get the json document */
-
-    $ch = curl_init();
-    $username = rtrim(`id -un`, "\n");
-    curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=".$nn_jmx_property,
-                                  CURLOPT_RETURNTRANSFER => true,
-                                  CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                  CURLOPT_USERPWD => "$username:",
-                                  CURLOPT_SSL_VERIFYPEER => FALSE ));
-    $json_string = curl_exec($ch);
-    $info = curl_getinfo($ch);
-    if (intval($info['http_code']) == 401){
-      logout();
-      $json_string = curl_exec($ch);
-    }
-    $info = curl_getinfo($ch);
-    curl_close($ch);
-    $json_array = json_decode($json_string, true);
-    $m_percent = 0;
-    $object = $json_array['beans'][0];
-    $missing_blocks = $object['MissingBlocks'];
-    $total_blocks = $object['BlocksTotal'];
-    if (count($object) == 0) {
-      echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-      exit(2);
-    }    
-    if($total_blocks == 0) {
-      $m_percent = 0;
-    } else {
-      $m_percent = ($missing_blocks/$total_blocks)*100;
-      break;
-    }
-  }
-  $out_msg = "missing_blocks:<" . $missing_blocks .
-             ">, total_blocks:<" . $total_blocks . ">";
-
-  if ($m_percent > 0) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -s <namenode bean name> -k keytab path -r principal name -t kinit path -u security enabled -e ssl enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_hdfs_capacity.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_hdfs_capacity.php b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_hdfs_capacity.php
deleted file mode 100644
index af72723..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_hdfs_capacity.php
+++ /dev/null
@@ -1,109 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * check the % HDFS capacity used >= warn and critical limits.
- * check_jmx -H hostaddress -p port -w 1 -c 1
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:e:k:r:t:s:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $hosts=$options['h'];
-  $port=$options['p'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['s'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-
-  foreach (preg_split('/,/', $hosts) as $host) {
-    /* Get the json document */
-    $ch = curl_init();
-    $username = rtrim(`id -un`, "\n");
-    curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=FSNamesystemState",
-                                  CURLOPT_RETURNTRANSFER => true,
-                                  CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                  CURLOPT_USERPWD => "$username:",
-                                  CURLOPT_SSL_VERIFYPEER => FALSE ));
-    $json_string = curl_exec($ch);
-    $info = curl_getinfo($ch);
-    if (intval($info['http_code']) == 401){
-      logout();
-      $json_string = curl_exec($ch);
-    }
-    $info = curl_getinfo($ch);
-    curl_close($ch);
-    $json_array = json_decode($json_string, true);
-    $percent = 0;
-    $object = $json_array['beans'][0];
-    $CapacityUsed = $object['CapacityUsed'];
-    $CapacityRemaining = $object['CapacityRemaining'];
-    if (count($object) == 0) {
-      echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-      exit(2);
-    }    
-    $CapacityTotal = $CapacityUsed + $CapacityRemaining;
-    if($CapacityTotal == 0) {
-      $percent = 0;
-    } else {
-      $percent = ($CapacityUsed/$CapacityTotal)*100;
-      break;
-    }
-  }
-  $out_msg = "DFSUsedGB:<" . round ($CapacityUsed/(1024*1024*1024),1) .
-             ">, DFSTotalGB:<" . round($CapacityTotal/(1024*1024*1024),1) . ">";
-
-  if ($percent >= $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($percent >= $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_hive_metastore_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_hive_metastore_status.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_hive_metastore_status.sh
deleted file mode 100644
index 640c077..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_hive_metastore_status.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#The uri is of the form thrift://<hostname>:<port>
-HOST=$1
-PORT=$2
-JAVA_HOME=$3
-SEC_ENABLED=$4
-if [[ "$SEC_ENABLED" == "true" ]]; then
-  NAGIOS_KEYTAB=$5
-  NAGIOS_USER=$6
-  KINIT_PATH=$7
-  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
-  if [[ "$?" -ne 0 ]]; then
-    echo "CRITICAL: Error doing kinit for nagios [$out1]";
-    exit 2;
-  fi
-fi
-HCAT_URL=-Dhive.metastore.uris="thrift://$HOST:$PORT"
-export JAVA_HOME=$JAVA_HOME
-out=`hcat $HCAT_URL -e "show databases" 2>&1`
-if [[ "$?" -ne 0 ]]; then
-  echo "CRITICAL: Error accessing Hive Metastore status [$out]";
-  exit 2;
-fi
-echo "OK: Hive Metastore status OK";
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_hive_thrift_port.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_hive_thrift_port.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_hive_thrift_port.py
deleted file mode 100644
index c9414f7..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_hive_thrift_port.py
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/usr/bin/env python
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-import os
-import optparse
-import json
-import traceback
-from resource_management import *
-from time import time
-
-
-OK_MESSAGE = "TCP OK - %.3f second response time on port %s"
-CRITICAL_MESSAGE = "Connection to %s on port %s failed"
-
-def main():
-
-  parser = optparse.OptionParser()
-
-  parser.add_option("-H", "--host", dest="address", help="Hive thrift host")
-  parser.add_option("-p", "--port", type="int", dest="port", help="Hive thrift port")
-  parser.add_option("--security-enabled", action="store_true", dest="security_enabled")
-
-  (options, args) = parser.parse_args()
-
-  if options.address is None:
-    print "Specify hive thrift host (--host or -H)"
-    exit(-1)
-
-  if options.port is None:
-    print "Specify hive thrift port (--port or -p)"
-    exit(-1)
-
-  if options.security_enabled:
-    security_enabled = options.security_enabled
-  else:
-    security_enabled = False
-
-  address = options.address
-  port = options.port
-
-  starttime = time()
-  if check_thrift_port_sasl(address, port, security_enabled=security_enabled):
-    timetaken = time() - starttime
-    print OK_MESSAGE % (timetaken, port)
-    exit(0)
-  else:
-    print CRITICAL_MESSAGE % (address, port)
-    exit(2)
-
-
-if __name__ == "__main__":
-  main()
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_hue_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_hue_status.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_hue_status.sh
deleted file mode 100644
index 076d9b3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_hue_status.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-status=`/etc/init.d/hue status 2>&1`
-
-if [[ "$?" -ne 0 ]]; then
-	echo "WARNING: Hue is stopped";
-	exit 1;
-fi
-
-echo "OK: Hue is running";
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_mapred_local_dir_used.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_mapred_local_dir_used.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_mapred_local_dir_used.sh
deleted file mode 100644
index 3f9243a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_mapred_local_dir_used.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-MAPRED_LOCAL_DIRS=$1
-CRITICAL=`echo $2 | cut -d % -f 1`
-IFS=","
-for mapred_dir in $MAPRED_LOCAL_DIRS
-do
-  percent=`df -hl $mapred_dir | awk '{percent=$5;} END{print percent}' | cut -d % -f 1`
-  if [ $percent -ge $CRITICAL ]; then
-    echo "CRITICAL: MapReduce local dir is full."
-    exit 2
-  fi
-done
-echo "OK: MapReduce local dir space is available."
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_name_dir_status.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_name_dir_status.php b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_name_dir_status.php
deleted file mode 100644
index 186166d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_name_dir_status.php
+++ /dev/null
@@ -1,93 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to namenode, get the jmx-json document
- * check the NameDirStatuses to find any offline (failed) directories
- * check_jmx -H hostaddress -p port -k keytab path -r principal name -t kinit path -s security enabled
- */
- 
-  include "hdp_nagios_init.php";
-
-  $options = getopt("h:p:e:k:r:t:s:");
-  //Check only for mandatory options
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['s'];
-  $ssl_enabled=$options['e'];
-  
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-  /* Get the json document */
-  $ch = curl_init();
-  $username = rtrim(`id -un`, "\n");
-  curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo",
-                                CURLOPT_RETURNTRANSFER => true,
-                                CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                CURLOPT_USERPWD => "$username:",
-                                CURLOPT_SSL_VERIFYPEER => FALSE ));
-  $json_string = curl_exec($ch);
-  $info = curl_getinfo($ch);
-  if (intval($info['http_code']) == 401){
-    logout();
-    $json_string = curl_exec($ch);
-  }
-  $info = curl_getinfo($ch);
-  curl_close($ch);
-  $json_array = json_decode($json_string, true);
-  $object = $json_array['beans'][0];
-  if ($object['NameDirStatuses'] == "") {
-    echo "WARNING: NameNode directory status not available via ".$protocol."://".$host.":".$port."/jmx url, code " . $info['http_code'] ."\n";
-    exit(1);
-  }
-  $NameDirStatuses = json_decode($object['NameDirStatuses'], true);
-  $failed_dir_count = count($NameDirStatuses['failed']);
-  $out_msg = "CRITICAL: Offline NameNode directories: ";
-  if ($failed_dir_count > 0) {
-    foreach ($NameDirStatuses['failed'] as $key => $value) {
-      $out_msg = $out_msg . $key . ":" . $value . ", ";
-    }
-    echo $out_msg . "\n";
-    exit (2);
-  }
-  echo "OK: All NameNode directories are active" . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled";
-  }
-?>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_namenodes_ha.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_namenodes_ha.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_namenodes_ha.sh
deleted file mode 100644
index 83c1aca..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_namenodes_ha.sh
+++ /dev/null
@@ -1,83 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-IFS=',' read -a namenodes <<< "$1"
-port=$2
-totalNN=${#namenodes[@]}
-activeNN=()
-standbyNN=()
-unavailableNN=()
-
-for nn in "${namenodes[@]}"
-do
-  export no_proxy=$nn
-  status=$(curl -m 5 -s http://$nn:$port/jmx?qry=Hadoop:service=NameNode,name=FSNamesystem | grep -i "tag.HAState" | grep -o -E "standby|active")
-  if [ "$status" == "active" ]; then
-    activeNN[${#activeNN[*]}]="$nn"
-  elif [ "$status" == "standby" ]; then
-    standbyNN[${#standbyNN[*]}]="$nn"
-  elif [ "$status" == "" ]; then
-    unavailableNN[${#unavailableNN[*]}]="$nn"
-  fi
-done
-
-message=""
-critical=false
-
-if [ ${#activeNN[@]} -gt 1 ]; then
-  critical=true
-  message=$message" Only one NN can have HAState=active;"
-elif [ ${#activeNN[@]} == 0 ]; then
-  critical=true
-  message=$message" No Active NN available;"
-elif [ ${#standbyNN[@]} == 0 ]; then
-  critical=true
-  message=$message" No Standby NN available;"
-fi
-
-NNstats=" Active<"
-for nn in "${activeNN[@]}"
-do
-  NNstats="$NNstats$nn;"
-done
-NNstats=${NNstats%\;}
-NNstats=$NNstats">, Standby<"
-for nn in "${standbyNN[@]}"
-do
-  NNstats="$NNstats$nn;"
-done
-NNstats=${NNstats%\;}
-NNstats=$NNstats">, Unavailable<"
-for nn in "${unavailableNN[@]}"
-do
-  NNstats="$NNstats$nn;"
-done
-NNstats=${NNstats%\;}
-NNstats=$NNstats">"
-
-if [ $critical == false ]; then
-  echo "OK: NameNode HA healthy;"$NNstats
-  exit 0
-fi
-
-echo "CRITICAL:"$message$NNstats
-exit 2

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_nodemanager_health.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_nodemanager_health.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_nodemanager_health.sh
deleted file mode 100644
index eedcd62..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_nodemanager_health.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-HOST=$1
-PORT=$2
-NODEMANAGER_URL="http://$HOST:$PORT/ws/v1/node/info"
-SEC_ENABLED=$3
-export PATH="/usr/bin:$PATH"
-if [[ "$SEC_ENABLED" == "true" ]]; then
-  NAGIOS_KEYTAB=$4
-  NAGIOS_USER=$5
-  KINIT_PATH=$6
-  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
-  if [[ "$?" -ne 0 ]]; then
-    echo "CRITICAL: Error doing kinit for nagios [$out1]";
-    exit 2;
-  fi
-fi
-
-export no_proxy=$HOST
-RESPONSE=`curl --negotiate -u : -s $NODEMANAGER_URL`
-if [[ "$RESPONSE" == *'"nodeHealthy":true'* ]]; then 
-  echo "OK: NodeManager healthy";
-  exit 0;
-fi
-echo "CRITICAL: NodeManager unhealthy";
-exit 2;

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_oozie_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_oozie_status.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_oozie_status.sh
deleted file mode 100644
index 820ee99..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_oozie_status.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-# OOZIE_URL is of the form http://<hostname>:<port>/oozie
-HOST=`echo $1 | tr '[:upper:]' '[:lower:]'`
-PORT=$2
-JAVA_HOME=$3
-SEC_ENABLED=$4
-if [[ "$SEC_ENABLED" == "true" ]]; then
-  NAGIOS_KEYTAB=$5
-  NAGIOS_USER=$6
-  KINIT_PATH=$7
-  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
-  if [[ "$?" -ne 0 ]]; then
-    echo "CRITICAL: Error doing kinit for nagios [$out1]";
-    exit 2;
-  fi
-fi
-OOZIE_URL="http://$HOST:$PORT/oozie"
-export JAVA_HOME=$JAVA_HOME
-out=`oozie admin -oozie ${OOZIE_URL} -status 2>&1`
-if [[ "$?" -ne 0 ]]; then 
-  echo "CRITICAL: Error accessing Oozie Server status [$out]";
-  exit 2;
-fi
-echo "OK: Oozie Server status [$out]";
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_rpcq_latency.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_rpcq_latency.php b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_rpcq_latency.php
deleted file mode 100644
index 463f69b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_rpcq_latency.php
+++ /dev/null
@@ -1,104 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * It checks the rpc wait time in the queue, RpcQueueTime_avg_time
- * check_rpcq_latency -h hostaddress -p port -t ServiceName -w 1 -c 1
- * Warning and Critical values are in seconds
- * Service Name = JobTracker, NameNode, JobHistoryServer
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:n:e:k:r:t:s:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options) || !array_key_exists('n', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-  $master=$options['n'];
-  $warn=$options['w'];
-  $crit=$options['c'];
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['s'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-
-  /* Get the json document */
-  $ch = curl_init();
-  $username = rtrim(`id -un`, "\n");
-  curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=".$master.",name=RpcActivityForPort*",
-                                CURLOPT_RETURNTRANSFER => true,
-                                CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                CURLOPT_USERPWD => "$username:",
-                                CURLOPT_SSL_VERIFYPEER => FALSE ));
-  $json_string = curl_exec($ch);
-  $info = curl_getinfo($ch);
-  if (intval($info['http_code']) == 401){
-    logout();
-    $json_string = curl_exec($ch);
-  }
-  $info = curl_getinfo($ch);
-  curl_close($ch);
-  $json_array = json_decode($json_string, true);
-  $object = $json_array['beans'][0];
-  if (count($object) == 0) {
-    echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-    exit(2);
-  } 
-  $RpcQueueTime_avg_time = round($object['RpcQueueTime_avg_time'], 2); 
-  $RpcProcessingTime_avg_time = round($object['RpcProcessingTime_avg_time'], 2);
-
-  $out_msg = "RpcQueueTime_avg_time:<" . $RpcQueueTime_avg_time .
-             "> Secs, RpcProcessingTime_avg_time:<" . $RpcProcessingTime_avg_time .
-             "> Secs";
-
-  if ($RpcQueueTime_avg_time >= $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($RpcQueueTime_avg_time >= $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -n <JobTracker/NameNode/JobHistoryServer> -w <warn_in_sec> -c <crit_in_sec> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_rpcq_latency_ha.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_rpcq_latency_ha.php b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_rpcq_latency_ha.php
deleted file mode 100644
index 3e7616c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_rpcq_latency_ha.php
+++ /dev/null
@@ -1,115 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * It checks the rpc wait time in the queue, RpcQueueTime_avg_time
- * check_rpcq_latency -h hostaddress -p port -t ServiceName -w 1 -c 1
- * Warning and Critical values are in seconds
- * Service Name = JobTracker, NameNode, JobHistoryServer
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:n:e:k:r:t:s:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options) || !array_key_exists('n', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $hosts=$options['h'];
-  $port=$options['p'];
-  $master=$options['n'];
-  $warn=$options['w'];
-  $crit=$options['c'];
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['s'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-  $jmx_response_available = false;
-  $jmx_response;
-
-  foreach (preg_split('/,/', $hosts) as $host) {
-    /* Get the json document */
-    $ch = curl_init();
-    $username = rtrim(`id -un`, "\n");
-    curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=".$master.",name=RpcActivityForPort*",
-                                  CURLOPT_RETURNTRANSFER => true,
-                                  CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                  CURLOPT_USERPWD => "$username:",
-                                  CURLOPT_SSL_VERIFYPEER => FALSE ));
-    $json_string = curl_exec($ch);
-    $info = curl_getinfo($ch);
-    if (intval($info['http_code']) == 401){
-      logout();
-      $json_string = curl_exec($ch);
-    }
-    $info = curl_getinfo($ch);
-    curl_close($ch);
-    $json_array = json_decode($json_string, true);
-    $object = $json_array['beans'][0];
-
-    if (count($object) > 0) {
-      $jmx_response_available = true;
-      $jmx_response = $object;
-    }
-  }
-
-  if ($jmx_response_available === false) {
-    echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-    exit(2);
-  }
-
-  $RpcQueueTime_avg_time = round($jmx_response['RpcQueueTime_avg_time'], 2);
-  $RpcProcessingTime_avg_time = round($jmx_response['RpcProcessingTime_avg_time'], 2);
-
-  $out_msg = "RpcQueueTime_avg_time:<" . $RpcQueueTime_avg_time .
-             "> Secs, RpcProcessingTime_avg_time:<" . $RpcProcessingTime_avg_time .
-             "> Secs";
-
-  if ($RpcQueueTime_avg_time >= $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($RpcQueueTime_avg_time >= $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -n <JobTracker/NameNode/JobHistoryServer> -w <warn_in_sec> -c <crit_in_sec> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
-  }
-?>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_templeton_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_templeton_status.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_templeton_status.sh
deleted file mode 100644
index 3e2ba0f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_templeton_status.sh
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-# out='{"status":"ok","version":"v1"}<status_code:200>'
-HOST=$1
-PORT=$2
-VERSION=$3
-SEC_ENABLED=$4
-if [[ "$SEC_ENABLED" == "true" ]]; then 
-  NAGIOS_KEYTAB=$5
-  NAGIOS_USER=$6
-  KINIT_PATH=$7
-  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
-  if [[ "$?" -ne 0 ]]; then
-    echo "CRITICAL: Error doing kinit for nagios [$out1]";
-    exit 2;
-  fi
-fi
-regex="^.*\"status\":\"ok\".*<status_code:200>$"
-export no_proxy=$HOST
-out=`curl --negotiate -u : -s -w '<status_code:%{http_code}>' http://$HOST:$PORT/templeton/$VERSION/status 2>&1`
-if [[ $out =~ $regex ]]; then
-  out=`echo "$out" | sed -e 's/{/[/g' | sed -e 's/}/]/g'` 
-  echo "OK: WebHCat Server status [$out]";
-  exit 0;
-fi
-echo "CRITICAL: Error accessing WebHCat Server, status [$out]";
-exit 2;

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_webui.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_webui.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_webui.sh
deleted file mode 100644
index 7044878..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_webui.sh
+++ /dev/null
@@ -1,103 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-service=$1
-host=$2
-port=$3
-
-checkurl () {
-  url=$1
-  export no_proxy=$host
-  curl $url -k -o /dev/null
-  echo $?
-}
-
-if [[ -z "$service" || -z "$host" ]]; then
-  echo "UNKNOWN: Invalid arguments; Usage: check_webui.sh service_name host_name";
-  exit 3;
-fi
-
-case "$service" in
-
-jobtracker) 
-    jtweburl="http://$host:$port"
-    if [[ `checkurl "$jtweburl"` -ne 0 ]]; then 
-      echo "WARNING: Jobtracker web UI not accessible : $jtweburl";
-      exit 1;
-    fi
-    ;;
-namenode)
-    nnweburl="http://$host:$port"
-    if [[ `checkurl "$nnweburl"` -ne 0 ]] ; then 
-      echo "WARNING: NameNode Web UI not accessible : $nnweburl";
-      exit 1;
-    fi
-    ;;
-jobhistory)
-    jhweburl="http://$host:$port/jobhistoryhome.jsp"
-    if [[ `checkurl "$jhweburl"` -ne 0 ]]; then 
-      echo "WARNING: HistoryServer Web UI not accessible : $jhweburl";
-      exit 1;
-    fi
-    ;;
-hbase)
-    hbaseweburl="http://$host:$port/master-status"
-    if [[ `checkurl "$hbaseweburl"` -ne 0 ]]; then 
-      echo "WARNING: HBase Master Web UI not accessible : $hbaseweburl";
-      exit 1;
-    fi
-    ;;
-resourcemanager)
-    rmweburl="http://$host:$port/cluster"
-    if [[ `checkurl "$rmweburl"` -ne 0 ]]; then 
-      echo "WARNING: ResourceManager Web UI not accessible : $rmweburl";
-      exit 1;
-    fi
-    ;;
-historyserver2)
-    hsweburl="http://$host:$port/jobhistory"
-    if [[ `checkurl "$hsweburl"` -ne 0 ]]; then 
-      echo "WARNING: HistoryServer Web UI not accessible : $hsweburl";
-      exit 1;
-    fi
-    ;;
-storm_ui)
-    rmweburl="http://$host:$port"
-    if [[ `checkurl "$rmweburl"` -ne 0 ]]; then
-      echo "WARNING: Storm Web UI not accessible : $rmweburl";
-      exit 1;
-    fi
-    ;;
-falconserver)
-    hsweburl="http://$host:$port/"
-    if [[ `checkurl "$hsweburl"` -ne 0 ]]; then
-      echo "WARNING: FalconServer Web UI not accessible : $hsweburl";
-      exit 1;
-    fi
-    ;;
-*) echo "UNKNOWN: Invalid service name [$service], valid options [jobtracker|jobhistory|hbase|namenode|resourcemanager|historyserver2|falconserver|storm_ui]"
-   exit 3
-   ;;
-esac
-
-echo "OK: Successfully accessed $service Web UI"
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_webui_ha.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_webui_ha.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_webui_ha.sh
deleted file mode 100644
index d9a814d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_webui_ha.sh
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-service=$1
-hosts=$2
-port=$3
-
-checkurl () {
-  url=$1
-  host=$2
-  export no_proxy=$host
-  curl $url -k -o /dev/null
-  echo $?
-}
-
-if [[ -z "$service" || -z "$hosts" ]]; then
-  echo "UNKNOWN: Invalid arguments; Usage: check_webui_ha.sh service_name, host_name";
-  exit 3;
-fi
-
-case "$service" in
-resourcemanager)
-    url_end_part="/cluster"
-    ;;
-*) echo "UNKNOWN: Invalid service name [$service], valid options [resourcemanager]"
-   exit 3
-   ;;
-esac
-
-OIFS="$IFS"
-IFS=','
-read -a hosts_array <<< "${hosts}"
-IFS="$OIFS"
-
-for host in "${hosts_array[@]}"
-do
-  weburl="http://${host}:${port}${url_end_part}"
-  if [[ `checkurl "$weburl" "$host"` -eq 0 ]]; then
-    echo "OK: Successfully accessed $service Web UI"
-    exit 0;
-  fi
-done
-
-echo "WARNING: $service Web UI not accessible : $weburl";
-exit 1;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/hdp_mon_nagios_addons.conf
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/hdp_mon_nagios_addons.conf b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/hdp_mon_nagios_addons.conf
deleted file mode 100644
index 87717d2..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/hdp_mon_nagios_addons.conf
+++ /dev/null
@@ -1,24 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-Alias /ambarinagios /usr/share/hdp
-<Directory /usr/share/hdp>
-  Options None
-  AllowOverride None
-  Order allow,deny
-  Allow from all
-</Directory>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/hdp_nagios_init.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/hdp_nagios_init.php b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/hdp_nagios_init.php
deleted file mode 100644
index 487eb43..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/hdp_nagios_init.php
+++ /dev/null
@@ -1,81 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* Common functions called from other alerts
- *
- */
- 
- /*
- * Function for kinit. Checks if security enabled and klist for this principal doesn't returns nothing,
- * make kinit call in this case.
- */
-  function kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name) {
-    if($security_enabled === 'true') {
-    
-      $is_logined = is_logined($principal_name);
-      
-      if (!$is_logined)
-        $status = kinit($kinit_path_local, $keytab_path, $principal_name);
-      else
-        $status = array(0, '');
-    } else {
-      $status = array(0, '');
-    }
-  
-    return $status;
-  }
-  
-  
-  /*
-  * Checks if user is logined on kerberos
-  */
-  function is_logined($principal_name) {
-    $check_cmd = "klist|grep $principal_name 1> /dev/null 2>/dev/null ; [[ $? != 0 ]] && echo 1";
-    $check_output =  shell_exec($check_cmd);
-    
-    if ($check_output)
-      return false;
-    else
-      return true;
-  }
-
-  /*
-  * Runs kinit command.
-  */
-  function kinit($kinit_path_local, $keytab_path, $principal_name) {
-    $init_cmd = "$kinit_path_local -kt $keytab_path $principal_name 2>&1";
-    $kinit_output = shell_exec($init_cmd);
-    if ($kinit_output) 
-      $status = array(1, $kinit_output);
-    else
-      $status = array(0, '');
-      
-    return $status;
-  }
-
-  function logout() {
-    if (shell_exec("rm -f /tmp/krb5cc_".trim(shell_exec('id -u'))) == "" ) 
-      $status = true;
-    else
-      $status = false;
-      
-    return $status;
-  }
- 
- ?>
\ No newline at end of file


[16/17] ambari git commit: AMBARI-8276 - Alerts: Remove Nagios Service From The Stack (jonathanhurley)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_hive_metastore_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_hive_metastore_status.sh b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_hive_metastore_status.sh
deleted file mode 100644
index 640c077..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_hive_metastore_status.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#The uri is of the form thrift://<hostname>:<port>
-HOST=$1
-PORT=$2
-JAVA_HOME=$3
-SEC_ENABLED=$4
-if [[ "$SEC_ENABLED" == "true" ]]; then
-  NAGIOS_KEYTAB=$5
-  NAGIOS_USER=$6
-  KINIT_PATH=$7
-  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
-  if [[ "$?" -ne 0 ]]; then
-    echo "CRITICAL: Error doing kinit for nagios [$out1]";
-    exit 2;
-  fi
-fi
-HCAT_URL=-Dhive.metastore.uris="thrift://$HOST:$PORT"
-export JAVA_HOME=$JAVA_HOME
-out=`hcat $HCAT_URL -e "show databases" 2>&1`
-if [[ "$?" -ne 0 ]]; then
-  echo "CRITICAL: Error accessing Hive Metastore status [$out]";
-  exit 2;
-fi
-echo "OK: Hive Metastore status OK";
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_hue_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_hue_status.sh b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_hue_status.sh
deleted file mode 100644
index 076d9b3..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_hue_status.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-status=`/etc/init.d/hue status 2>&1`
-
-if [[ "$?" -ne 0 ]]; then
-	echo "WARNING: Hue is stopped";
-	exit 1;
-fi
-
-echo "OK: Hue is running";
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_mapred_local_dir_used.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_mapred_local_dir_used.sh b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_mapred_local_dir_used.sh
deleted file mode 100644
index 3f9243a..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_mapred_local_dir_used.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-MAPRED_LOCAL_DIRS=$1
-CRITICAL=`echo $2 | cut -d % -f 1`
-IFS=","
-for mapred_dir in $MAPRED_LOCAL_DIRS
-do
-  percent=`df -hl $mapred_dir | awk '{percent=$5;} END{print percent}' | cut -d % -f 1`
-  if [ $percent -ge $CRITICAL ]; then
-    echo "CRITICAL: MapReduce local dir is full."
-    exit 2
-  fi
-done
-echo "OK: MapReduce local dir space is available."
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_name_dir_status.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_name_dir_status.php b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_name_dir_status.php
deleted file mode 100644
index 186166d..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_name_dir_status.php
+++ /dev/null
@@ -1,93 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to namenode, get the jmx-json document
- * check the NameDirStatuses to find any offline (failed) directories
- * check_jmx -H hostaddress -p port -k keytab path -r principal name -t kinit path -s security enabled
- */
- 
-  include "hdp_nagios_init.php";
-
-  $options = getopt("h:p:e:k:r:t:s:");
-  //Check only for mandatory options
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['s'];
-  $ssl_enabled=$options['e'];
-  
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-  /* Get the json document */
-  $ch = curl_init();
-  $username = rtrim(`id -un`, "\n");
-  curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo",
-                                CURLOPT_RETURNTRANSFER => true,
-                                CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                CURLOPT_USERPWD => "$username:",
-                                CURLOPT_SSL_VERIFYPEER => FALSE ));
-  $json_string = curl_exec($ch);
-  $info = curl_getinfo($ch);
-  if (intval($info['http_code']) == 401){
-    logout();
-    $json_string = curl_exec($ch);
-  }
-  $info = curl_getinfo($ch);
-  curl_close($ch);
-  $json_array = json_decode($json_string, true);
-  $object = $json_array['beans'][0];
-  if ($object['NameDirStatuses'] == "") {
-    echo "WARNING: NameNode directory status not available via ".$protocol."://".$host.":".$port."/jmx url, code " . $info['http_code'] ."\n";
-    exit(1);
-  }
-  $NameDirStatuses = json_decode($object['NameDirStatuses'], true);
-  $failed_dir_count = count($NameDirStatuses['failed']);
-  $out_msg = "CRITICAL: Offline NameNode directories: ";
-  if ($failed_dir_count > 0) {
-    foreach ($NameDirStatuses['failed'] as $key => $value) {
-      $out_msg = $out_msg . $key . ":" . $value . ", ";
-    }
-    echo $out_msg . "\n";
-    exit (2);
-  }
-  echo "OK: All NameNode directories are active" . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled";
-  }
-?>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_namenodes_ha.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_namenodes_ha.sh b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_namenodes_ha.sh
deleted file mode 100644
index 83c1aca..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_namenodes_ha.sh
+++ /dev/null
@@ -1,83 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-IFS=',' read -a namenodes <<< "$1"
-port=$2
-totalNN=${#namenodes[@]}
-activeNN=()
-standbyNN=()
-unavailableNN=()
-
-for nn in "${namenodes[@]}"
-do
-  export no_proxy=$nn
-  status=$(curl -m 5 -s http://$nn:$port/jmx?qry=Hadoop:service=NameNode,name=FSNamesystem | grep -i "tag.HAState" | grep -o -E "standby|active")
-  if [ "$status" == "active" ]; then
-    activeNN[${#activeNN[*]}]="$nn"
-  elif [ "$status" == "standby" ]; then
-    standbyNN[${#standbyNN[*]}]="$nn"
-  elif [ "$status" == "" ]; then
-    unavailableNN[${#unavailableNN[*]}]="$nn"
-  fi
-done
-
-message=""
-critical=false
-
-if [ ${#activeNN[@]} -gt 1 ]; then
-  critical=true
-  message=$message" Only one NN can have HAState=active;"
-elif [ ${#activeNN[@]} == 0 ]; then
-  critical=true
-  message=$message" No Active NN available;"
-elif [ ${#standbyNN[@]} == 0 ]; then
-  critical=true
-  message=$message" No Standby NN available;"
-fi
-
-NNstats=" Active<"
-for nn in "${activeNN[@]}"
-do
-  NNstats="$NNstats$nn;"
-done
-NNstats=${NNstats%\;}
-NNstats=$NNstats">, Standby<"
-for nn in "${standbyNN[@]}"
-do
-  NNstats="$NNstats$nn;"
-done
-NNstats=${NNstats%\;}
-NNstats=$NNstats">, Unavailable<"
-for nn in "${unavailableNN[@]}"
-do
-  NNstats="$NNstats$nn;"
-done
-NNstats=${NNstats%\;}
-NNstats=$NNstats">"
-
-if [ $critical == false ]; then
-  echo "OK: NameNode HA healthy;"$NNstats
-  exit 0
-fi
-
-echo "CRITICAL:"$message$NNstats
-exit 2

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_nodemanager_health.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_nodemanager_health.sh b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_nodemanager_health.sh
deleted file mode 100644
index eedcd62..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_nodemanager_health.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-HOST=$1
-PORT=$2
-NODEMANAGER_URL="http://$HOST:$PORT/ws/v1/node/info"
-SEC_ENABLED=$3
-export PATH="/usr/bin:$PATH"
-if [[ "$SEC_ENABLED" == "true" ]]; then
-  NAGIOS_KEYTAB=$4
-  NAGIOS_USER=$5
-  KINIT_PATH=$6
-  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
-  if [[ "$?" -ne 0 ]]; then
-    echo "CRITICAL: Error doing kinit for nagios [$out1]";
-    exit 2;
-  fi
-fi
-
-export no_proxy=$HOST
-RESPONSE=`curl --negotiate -u : -s $NODEMANAGER_URL`
-if [[ "$RESPONSE" == *'"nodeHealthy":true'* ]]; then 
-  echo "OK: NodeManager healthy";
-  exit 0;
-fi
-echo "CRITICAL: NodeManager unhealthy";
-exit 2;

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_oozie_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_oozie_status.sh b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_oozie_status.sh
deleted file mode 100644
index 820ee99..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_oozie_status.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-# OOZIE_URL is of the form http://<hostname>:<port>/oozie
-HOST=`echo $1 | tr '[:upper:]' '[:lower:]'`
-PORT=$2
-JAVA_HOME=$3
-SEC_ENABLED=$4
-if [[ "$SEC_ENABLED" == "true" ]]; then
-  NAGIOS_KEYTAB=$5
-  NAGIOS_USER=$6
-  KINIT_PATH=$7
-  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
-  if [[ "$?" -ne 0 ]]; then
-    echo "CRITICAL: Error doing kinit for nagios [$out1]";
-    exit 2;
-  fi
-fi
-OOZIE_URL="http://$HOST:$PORT/oozie"
-export JAVA_HOME=$JAVA_HOME
-out=`oozie admin -oozie ${OOZIE_URL} -status 2>&1`
-if [[ "$?" -ne 0 ]]; then 
-  echo "CRITICAL: Error accessing Oozie Server status [$out]";
-  exit 2;
-fi
-echo "OK: Oozie Server status [$out]";
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_rpcq_latency.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_rpcq_latency.php b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_rpcq_latency.php
deleted file mode 100644
index 463f69b..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_rpcq_latency.php
+++ /dev/null
@@ -1,104 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * It checks the rpc wait time in the queue, RpcQueueTime_avg_time
- * check_rpcq_latency -h hostaddress -p port -t ServiceName -w 1 -c 1
- * Warning and Critical values are in seconds
- * Service Name = JobTracker, NameNode, JobHistoryServer
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:n:e:k:r:t:s:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options) || !array_key_exists('n', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-  $master=$options['n'];
-  $warn=$options['w'];
-  $crit=$options['c'];
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['s'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-
-  /* Get the json document */
-  $ch = curl_init();
-  $username = rtrim(`id -un`, "\n");
-  curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=".$master.",name=RpcActivityForPort*",
-                                CURLOPT_RETURNTRANSFER => true,
-                                CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                CURLOPT_USERPWD => "$username:",
-                                CURLOPT_SSL_VERIFYPEER => FALSE ));
-  $json_string = curl_exec($ch);
-  $info = curl_getinfo($ch);
-  if (intval($info['http_code']) == 401){
-    logout();
-    $json_string = curl_exec($ch);
-  }
-  $info = curl_getinfo($ch);
-  curl_close($ch);
-  $json_array = json_decode($json_string, true);
-  $object = $json_array['beans'][0];
-  if (count($object) == 0) {
-    echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-    exit(2);
-  } 
-  $RpcQueueTime_avg_time = round($object['RpcQueueTime_avg_time'], 2); 
-  $RpcProcessingTime_avg_time = round($object['RpcProcessingTime_avg_time'], 2);
-
-  $out_msg = "RpcQueueTime_avg_time:<" . $RpcQueueTime_avg_time .
-             "> Secs, RpcProcessingTime_avg_time:<" . $RpcProcessingTime_avg_time .
-             "> Secs";
-
-  if ($RpcQueueTime_avg_time >= $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($RpcQueueTime_avg_time >= $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -n <JobTracker/NameNode/JobHistoryServer> -w <warn_in_sec> -c <crit_in_sec> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_rpcq_latency_ha.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_rpcq_latency_ha.php b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_rpcq_latency_ha.php
deleted file mode 100644
index 3e7616c..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_rpcq_latency_ha.php
+++ /dev/null
@@ -1,115 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * It checks the rpc wait time in the queue, RpcQueueTime_avg_time
- * check_rpcq_latency -h hostaddress -p port -t ServiceName -w 1 -c 1
- * Warning and Critical values are in seconds
- * Service Name = JobTracker, NameNode, JobHistoryServer
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:n:e:k:r:t:s:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options) || !array_key_exists('n', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $hosts=$options['h'];
-  $port=$options['p'];
-  $master=$options['n'];
-  $warn=$options['w'];
-  $crit=$options['c'];
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['s'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-  $jmx_response_available = false;
-  $jmx_response;
-
-  foreach (preg_split('/,/', $hosts) as $host) {
-    /* Get the json document */
-    $ch = curl_init();
-    $username = rtrim(`id -un`, "\n");
-    curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=".$master.",name=RpcActivityForPort*",
-                                  CURLOPT_RETURNTRANSFER => true,
-                                  CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                  CURLOPT_USERPWD => "$username:",
-                                  CURLOPT_SSL_VERIFYPEER => FALSE ));
-    $json_string = curl_exec($ch);
-    $info = curl_getinfo($ch);
-    if (intval($info['http_code']) == 401){
-      logout();
-      $json_string = curl_exec($ch);
-    }
-    $info = curl_getinfo($ch);
-    curl_close($ch);
-    $json_array = json_decode($json_string, true);
-    $object = $json_array['beans'][0];
-
-    if (count($object) > 0) {
-      $jmx_response_available = true;
-      $jmx_response = $object;
-    }
-  }
-
-  if ($jmx_response_available === false) {
-    echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-    exit(2);
-  }
-
-  $RpcQueueTime_avg_time = round($jmx_response['RpcQueueTime_avg_time'], 2);
-  $RpcProcessingTime_avg_time = round($jmx_response['RpcProcessingTime_avg_time'], 2);
-
-  $out_msg = "RpcQueueTime_avg_time:<" . $RpcQueueTime_avg_time .
-             "> Secs, RpcProcessingTime_avg_time:<" . $RpcProcessingTime_avg_time .
-             "> Secs";
-
-  if ($RpcQueueTime_avg_time >= $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($RpcQueueTime_avg_time >= $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -n <JobTracker/NameNode/JobHistoryServer> -w <warn_in_sec> -c <crit_in_sec> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
-  }
-?>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_templeton_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_templeton_status.sh b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_templeton_status.sh
deleted file mode 100644
index 3e2ba0f..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_templeton_status.sh
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-# out='{"status":"ok","version":"v1"}<status_code:200>'
-HOST=$1
-PORT=$2
-VERSION=$3
-SEC_ENABLED=$4
-if [[ "$SEC_ENABLED" == "true" ]]; then 
-  NAGIOS_KEYTAB=$5
-  NAGIOS_USER=$6
-  KINIT_PATH=$7
-  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
-  if [[ "$?" -ne 0 ]]; then
-    echo "CRITICAL: Error doing kinit for nagios [$out1]";
-    exit 2;
-  fi
-fi
-regex="^.*\"status\":\"ok\".*<status_code:200>$"
-export no_proxy=$HOST
-out=`curl --negotiate -u : -s -w '<status_code:%{http_code}>' http://$HOST:$PORT/templeton/$VERSION/status 2>&1`
-if [[ $out =~ $regex ]]; then
-  out=`echo "$out" | sed -e 's/{/[/g' | sed -e 's/}/]/g'` 
-  echo "OK: WebHCat Server status [$out]";
-  exit 0;
-fi
-echo "CRITICAL: Error accessing WebHCat Server, status [$out]";
-exit 2;

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_webui.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_webui.sh b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_webui.sh
deleted file mode 100644
index 7044878..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_webui.sh
+++ /dev/null
@@ -1,103 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-service=$1
-host=$2
-port=$3
-
-checkurl () {
-  url=$1
-  export no_proxy=$host
-  curl $url -k -o /dev/null
-  echo $?
-}
-
-if [[ -z "$service" || -z "$host" ]]; then
-  echo "UNKNOWN: Invalid arguments; Usage: check_webui.sh service_name host_name";
-  exit 3;
-fi
-
-case "$service" in
-
-jobtracker) 
-    jtweburl="http://$host:$port"
-    if [[ `checkurl "$jtweburl"` -ne 0 ]]; then 
-      echo "WARNING: Jobtracker web UI not accessible : $jtweburl";
-      exit 1;
-    fi
-    ;;
-namenode)
-    nnweburl="http://$host:$port"
-    if [[ `checkurl "$nnweburl"` -ne 0 ]] ; then 
-      echo "WARNING: NameNode Web UI not accessible : $nnweburl";
-      exit 1;
-    fi
-    ;;
-jobhistory)
-    jhweburl="http://$host:$port/jobhistoryhome.jsp"
-    if [[ `checkurl "$jhweburl"` -ne 0 ]]; then 
-      echo "WARNING: HistoryServer Web UI not accessible : $jhweburl";
-      exit 1;
-    fi
-    ;;
-hbase)
-    hbaseweburl="http://$host:$port/master-status"
-    if [[ `checkurl "$hbaseweburl"` -ne 0 ]]; then 
-      echo "WARNING: HBase Master Web UI not accessible : $hbaseweburl";
-      exit 1;
-    fi
-    ;;
-resourcemanager)
-    rmweburl="http://$host:$port/cluster"
-    if [[ `checkurl "$rmweburl"` -ne 0 ]]; then 
-      echo "WARNING: ResourceManager Web UI not accessible : $rmweburl";
-      exit 1;
-    fi
-    ;;
-historyserver2)
-    hsweburl="http://$host:$port/jobhistory"
-    if [[ `checkurl "$hsweburl"` -ne 0 ]]; then 
-      echo "WARNING: HistoryServer Web UI not accessible : $hsweburl";
-      exit 1;
-    fi
-    ;;
-storm_ui)
-    rmweburl="http://$host:$port"
-    if [[ `checkurl "$rmweburl"` -ne 0 ]]; then
-      echo "WARNING: Storm Web UI not accessible : $rmweburl";
-      exit 1;
-    fi
-    ;;
-falconserver)
-    hsweburl="http://$host:$port/"
-    if [[ `checkurl "$hsweburl"` -ne 0 ]]; then
-      echo "WARNING: FalconServer Web UI not accessible : $hsweburl";
-      exit 1;
-    fi
-    ;;
-*) echo "UNKNOWN: Invalid service name [$service], valid options [jobtracker|jobhistory|hbase|namenode|resourcemanager|historyserver2|falconserver|storm_ui]"
-   exit 3
-   ;;
-esac
-
-echo "OK: Successfully accessed $service Web UI"
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_webui_ha.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_webui_ha.sh b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_webui_ha.sh
deleted file mode 100644
index d9a814d..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_webui_ha.sh
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-service=$1
-hosts=$2
-port=$3
-
-checkurl () {
-  url=$1
-  host=$2
-  export no_proxy=$host
-  curl $url -k -o /dev/null
-  echo $?
-}
-
-if [[ -z "$service" || -z "$hosts" ]]; then
-  echo "UNKNOWN: Invalid arguments; Usage: check_webui_ha.sh service_name, host_name";
-  exit 3;
-fi
-
-case "$service" in
-resourcemanager)
-    url_end_part="/cluster"
-    ;;
-*) echo "UNKNOWN: Invalid service name [$service], valid options [resourcemanager]"
-   exit 3
-   ;;
-esac
-
-OIFS="$IFS"
-IFS=','
-read -a hosts_array <<< "${hosts}"
-IFS="$OIFS"
-
-for host in "${hosts_array[@]}"
-do
-  weburl="http://${host}:${port}${url_end_part}"
-  if [[ `checkurl "$weburl" "$host"` -eq 0 ]]; then
-    echo "OK: Successfully accessed $service Web UI"
-    exit 0;
-  fi
-done
-
-echo "WARNING: $service Web UI not accessible : $weburl";
-exit 1;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_wrapper.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_wrapper.sh b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_wrapper.sh
deleted file mode 100644
index d350e4f..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/check_wrapper.sh
+++ /dev/null
@@ -1,94 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-function real_service() {
-  desc=$NAGIOS_SERVICEGROUPNAME
-  eval "$1='$desc'"
-}
-
-function real_component() {
-  arrDesc=(${NAGIOS_SERVICEDESC//::/ })
-
-  compName="${arrDesc[0]}"
-
-  case "$compName" in
-    HBASEMASTER)
-      realCompName="HBASE_MASTER"
-    ;;
-    REGIONSERVER)
-      realCompName="HBASE_REGIONSERVER"
-    ;;
-    JOBHISTORY)
-      realCompName="MAPREDUCE2"
-    ;;
-    HIVE-METASTORE)
-      realCompName="HIVE_METASTORE"
-    ;;
-    HIVE-SERVER)
-      realCompName="HIVE_SERVER"
-    ;;
-    FLUME)
-      realCompName="FLUME_HANDLER"
-    ;;
-    HUE)
-      realCompName="HUE_SERVER"
-    ;;
-    WEBHCAT)
-      realCompName="WEBHCAT_SERVER"
-    ;;
-    *)
-      realCompName=$compName
-    ;;
-  esac
-
-  eval "$1='$realCompName'"
-}
-
-real_service_var=""
-real_service real_service_var
-
-real_comp_var=""
-real_component real_comp_var
-
-
-wrapper_output=`exec "$@"`
-wrapper_result=$?
-
-if [ "$wrapper_result" == "0" ]; then
-  echo "$wrapper_output"
-  exit $wrapper_result
-fi
-
-if [ ! -f /var/nagios/ignore.dat ]; then
-  echo "$wrapper_output"
-  exit $wrapper_result
-else
-  count=$(grep $NAGIOS_HOSTNAME /var/nagios/ignore.dat | grep $real_service_var | grep $real_comp_var | wc -l)
-  if [ "$count" -ne "0" ]; then
-    echo "$wrapper_output\nAMBARIPASSIVE=${wrapper_result}" | sed 's/^[ \t]*//g'
-    exit 0
-  else
-    echo "$wrapper_output"
-    exit $wrapper_result
-  fi
-fi
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/hdp_mon_nagios_addons.conf
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/hdp_mon_nagios_addons.conf b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/hdp_mon_nagios_addons.conf
deleted file mode 100644
index 87717d2..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/hdp_mon_nagios_addons.conf
+++ /dev/null
@@ -1,24 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-Alias /ambarinagios /usr/share/hdp
-<Directory /usr/share/hdp>
-  Options None
-  AllowOverride None
-  Order allow,deny
-  Allow from all
-</Directory>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/hdp_nagios_init.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/hdp_nagios_init.php b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/hdp_nagios_init.php
deleted file mode 100644
index 487eb43..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/hdp_nagios_init.php
+++ /dev/null
@@ -1,81 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* Common functions called from other alerts
- *
- */
- 
- /*
- * Function for kinit. Checks if security enabled and klist for this principal doesn't returns nothing,
- * make kinit call in this case.
- */
-  function kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name) {
-    if($security_enabled === 'true') {
-    
-      $is_logined = is_logined($principal_name);
-      
-      if (!$is_logined)
-        $status = kinit($kinit_path_local, $keytab_path, $principal_name);
-      else
-        $status = array(0, '');
-    } else {
-      $status = array(0, '');
-    }
-  
-    return $status;
-  }
-  
-  
-  /*
-  * Checks if user is logined on kerberos
-  */
-  function is_logined($principal_name) {
-    $check_cmd = "klist|grep $principal_name 1> /dev/null 2>/dev/null ; [[ $? != 0 ]] && echo 1";
-    $check_output =  shell_exec($check_cmd);
-    
-    if ($check_output)
-      return false;
-    else
-      return true;
-  }
-
-  /*
-  * Runs kinit command.
-  */
-  function kinit($kinit_path_local, $keytab_path, $principal_name) {
-    $init_cmd = "$kinit_path_local -kt $keytab_path $principal_name 2>&1";
-    $kinit_output = shell_exec($init_cmd);
-    if ($kinit_output) 
-      $status = array(1, $kinit_output);
-    else
-      $status = array(0, '');
-      
-    return $status;
-  }
-
-  function logout() {
-    if (shell_exec("rm -f /tmp/krb5cc_".trim(shell_exec('id -u'))) == "" ) 
-      $status = true;
-    else
-      $status = false;
-      
-    return $status;
-  }
- 
- ?>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/nagios_alerts.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/nagios_alerts.php b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/nagios_alerts.php
deleted file mode 100644
index d15b023..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/nagios_alerts.php
+++ /dev/null
@@ -1,513 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Constants. */
-define("HDP_MON_RESPONSE_OPTION_KEY__PROPERTIES", "Properties");
-define("HDP_MON_RESPONSE_OPTION_KEY__TYPE", "Type");
-
-define("HDP_MON_RESPONSE_OPTION_VALUE__PROPERTIES_UNCACHEABLE", "Uncacheable");
-define("HDP_MON_RESPONSE_OPTION_VALUE__TYPE_JSON", "JSON");
-define("HDP_MON_RESPONSE_OPTION_VALUE__TYPE_JAVASCRIPT", "JAVASCRIPT");
-
-define("HDP_MON_QUERY_ARG__JSONP", "jsonp");
-
-/** Spits out appropriate response headers, as per the options passed in. */
-function hdp_mon_generate_response_headers( $response_options )
-{
-  if( $response_options[HDP_MON_RESPONSE_OPTION_KEY__PROPERTIES] == HDP_MON_RESPONSE_OPTION_VALUE__PROPERTIES_UNCACHEABLE )
-  {
-    // Make the response uncache-able.
-    header("Expires: Mon, 26 Jul 1997 05:00:00 GMT"); // Date in the past
-    header("Last-Modified: " . gmdate("D, d M Y H:i:s") . " GMT"); // Always modified
-    header("Cache-Control: no-cache, must-revalidate"); // HTTP/1.1
-    header("Pragma: no-cache"); // HTTP/1.0
-  }
-
-  switch( $response_options[HDP_MON_RESPONSE_OPTION_KEY__TYPE] )
-  {
-    case HDP_MON_RESPONSE_OPTION_VALUE__TYPE_JSON:
-      {
-        header('Content-type: application/json');
-      }
-      break;
-
-    case HDP_MON_RESPONSE_OPTION_VALUE__TYPE_JAVASCRIPT:
-      {
-        header('Content-type: application/javascript');
-      }
-      break;
-  }
-}
-
-/** Given $response_data (which we expect to be a JSON string), generate an
- *  HTTP response, which includes emitting the necessary HTTP response headers
- *  followed by the response body (that is either plain ol' $response_data,
- *  or a JSONP wrapper around it).
- */
-function hdp_mon_generate_response( $response_data )
-{
-  $jsonpFunctionName = NULL;
-  if (isset($_GET[HDP_MON_QUERY_ARG__JSONP])) {
-    $jsonpFunctionName = $_GET[HDP_MON_QUERY_ARG__JSONP];
-  }
-
-  hdp_mon_generate_response_headers( array
-  ( HDP_MON_RESPONSE_OPTION_KEY__PROPERTIES => HDP_MON_RESPONSE_OPTION_VALUE__PROPERTIES_UNCACHEABLE,
-  HDP_MON_RESPONSE_OPTION_KEY__TYPE =>
-  isset( $jsonpFunctionName )  && $jsonpFunctionName != "" ?
-  HDP_MON_RESPONSE_OPTION_VALUE__TYPE_JAVASCRIPT :
-  HDP_MON_RESPONSE_OPTION_VALUE__TYPE_JSON ) );
-
-  if( isset( $jsonpFunctionName ) )
-  {
-    echo "$jsonpFunctionName( $response_data );";
-  }
-  else
-  {
-    echo $response_data;
-  }
-}
-
-  /* alert_type { ok, non-ok, warning, critical, all } */
-  define ("all", "-2");
-  define ("nok", "-1");
-  define ("ok", "0");
-  define ("warn", "1");
-  define ("critical", "2");
-
-  define ("HDFS_SERVICE_CHECK", "NAMENODE::NameNode process down");
-  define ("MAPREDUCE_SERVICE_CHECK", "JOBTRACKER::JobTracker process down");
-  define ("HBASE_SERVICE_CHECK", "HBASEMASTER::HBaseMaster process down");
-  define ("ZOOKEEPER_SERVICE_CHECK", "ZOOKEEPER::Percent ZooKeeper Servers down");
-  define ("HIVE_SERVICE_CHECK", "HIVE-METASTORE::Hive Metastore status check");
-  define ("OOZIE_SERVICE_CHECK", "OOZIE::Oozie Server status check");
-  define ("WEBHCAT_SERVICE_CHECK", "WEBHCAT::WebHCat Server status check");
-  define ("PUPPET_SERVICE_CHECK", "PUPPET::Puppet agent down");
-
-  // on SUSE, some versions of Nagios stored data in /var/lib
-  $status_file = "/var/nagios/status.dat";
-  if (!file_exists($status_file) && file_exists("/etc/SuSE-release")) {
-    $status_file = "/var/lib/nagios/status.dat";
-  }
-  
-  $q1="";
-  if (array_key_exists('q1', $_GET)) {
-    $q1=$_GET["q1"];
-  }
-  $q2="";
-  if (array_key_exists('q2', $_GET)) {
-    $q2=$_GET["q2"];
-  }
-  $alert_type="";
-  if (array_key_exists('alert_type', $_GET)) {
-    $alert_type=$_GET["alert_type"];
-  }
-  $host="";
-  if (array_key_exists('host_name', $_GET)) {
-    $host=$_GET["host_name"];
-  }
-  $indent="";
-  if (array_key_exists('indent', $_GET)) {
-    $indent=$_GET["indent"];
-  }
-
-  $result = array();
-  $status_file_content = file_get_contents($status_file);
-
-  if ($q1 == "alerts") {
-    /* Add the service status object to result array */
-    $result['alerts'] = query_alerts ($status_file_content, $alert_type, $host);
-  }
-
-  if ($q2 == "hosts") {
-    /* Add the service status object to result array */
-    $result['hosts'] = query_hosts ($status_file_content, $alert_type, $host);
-  }
-
-  /* Add host count object to the results */
-  $result['hostcounts'] = query_host_count ($status_file_content);
-
-  /* Add services runtime states */
-  $result['servicestates'] = query_service_states ($status_file_content);
-
-  /* Return results */
-  if ($indent == "true") {
-    hdp_mon_generate_response(indent(json_encode($result)));
-  } else {
-    hdp_mon_generate_response(json_encode($result));
-  }
-
-  # Functions
-  /* Query service states */
-  function query_service_states ($status_file_content) {
-    $num_matches = preg_match_all("/servicestatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    $services_object = array ();
-    $services_object["PUPPET"] = 0;
-    foreach ($matches[0] as $object) {
-
-      if (getParameter($object, "service_description") == HDFS_SERVICE_CHECK) {
-        $services_object["HDFS"] = getParameter($object, "last_hard_state");
-        if ($services_object["HDFS"] >= 1) {
-          $services_object["HDFS"] = 1;
-        }
-        continue;
-      }
-      if (getParameter($object, "service_description") == MAPREDUCE_SERVICE_CHECK) {
-        $services_object["MAPREDUCE"] = getParameter($object, "last_hard_state");
-        if ($services_object["MAPREDUCE"] >= 1) {
-          $services_object["MAPREDUCE"] = 1;
-        }
-        continue;
-      }
-      if (getParameter($object, "service_description") == HBASE_SERVICE_CHECK) {
-        $services_object["HBASE"] = getParameter($object, "last_hard_state");
-        if ($services_object["HBASE"] >= 1) {
-          $services_object["HBASE"] = 1;
-        }
-        continue;
-      }
-      if (getParameter($object, "service_description") == HIVE_SERVICE_CHECK) {
-        $services_object["HIVE"] = getParameter($object, "last_hard_state");
-        if ($services_object["HIVE"] >= 1) {
-          $services_object["HIVE"] = 1;
-        }
-        continue;
-      }
-      if (getParameter($object, "service_description") == OOZIE_SERVICE_CHECK) {
-        $services_object["OOZIE"] = getParameter($object, "last_hard_state");
-        if ($services_object["OOZIE"] >= 1) {
-          $services_object["OOZIE"] = 1;
-        }
-        continue;
-      }
-      if (getParameter($object, "service_description") == WEBHCAT_SERVICE_CHECK) {
-        $services_object["WEBHCAT"] = getParameter($object, "last_hard_state");
-        if ($services_object["WEBHCAT"] >= 1) {
-          $services_object["WEBHCAT"] = 1;
-        }
-        continue;
-      }
-      /* In case of zookeeper, service is treated running if alert is ok or warning (i.e partial
-       * instances of zookeepers are running
-       */
-      if (getParameter($object, "service_description") == ZOOKEEPER_SERVICE_CHECK) {
-        $services_object["ZOOKEEPER"] = getParameter($object, "last_hard_state");
-        if ($services_object["ZOOKEEPER"] <= 1) {
-          $services_object["ZOOKEEPER"] = 0;
-        }
-        continue;
-      }
-      if (getParameter($object, "service_description") == PUPPET_SERVICE_CHECK) {
-        $state = getParameter($object, "last_hard_state");
-        if ($state >= 1) {
-          $services_object["PUPPET"]++;
-        }
-        continue;
-      }
-    }
-    if ($services_object["PUPPET"] >= 1) {
-      $services_object["PUPPET"] = 1;
-    }
-    $services_object = array_map('strval', $services_object);
-    return $services_object;
-  }
-
-  /* Query host count */
-  function query_host_count ($status_file_content) {
-    $num_matches = preg_match_all("/hoststatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    $hostcounts_object = array ();
-    $up_hosts = 0;
-    $down_hosts = 0;
-
-    foreach ($matches[0] as $object) {
-      if (getParameter($object, "last_hard_state") != ok) {
-        $down_hosts++;
-      } else {
-        $up_hosts++;
-      }
-    }
-    $hostcounts_object['up_hosts'] = $up_hosts;
-    $hostcounts_object['down_hosts'] = $down_hosts;
-    $hostcounts_object = array_map('strval', $hostcounts_object);
-    return $hostcounts_object;
-  }
-
-  /* Query Hosts */
-  function query_hosts ($status_file_content, $alert_type, $host) {
-    $hoststatus_attributes = array ("host_name", "current_state", "last_hard_state",
-                              "plugin_output", "last_check", "current_attempt",
-                              "last_hard_state_change", "last_time_up", "last_time_down",
-                              "last_time_unreachable", "is_flapping", "last_check");
-
-    $num_matches = preg_match_all("/hoststatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    $hosts_objects = array ();
-    $i = 0;
-    foreach ($matches[0] as $object) {
-      $hoststatus = array ();
-      $chost = getParameter($object, "host_name");
-      if (empty($host) || $chost == $host) {
-        foreach ($hoststatus_attributes as $attrib) {
-          $hoststatus[$attrib] = htmlentities(getParameter($object, $attrib), ENT_COMPAT);
-        }
-        $hoststatus['alerts'] = query_alerts ($status_file_content, $alert_type, $chost);
-        if (!empty($host)) {
-          $hosts_objects[$i] = $hoststatus;
-          $i++;
-          break;
-        }
-      }
-      if (!empty($hoststatus)) {
-        $hosts_objects[$i] = $hoststatus;
-        $i++;
-      }
-    }
-    /* echo "COUNT : " . count ($services_objects) . "\n"; */
-    return $hosts_objects;
-  }
-
-  /* Query Alerts */
-  function query_alerts ($status_file_content, $alert_type, $host) {
-
-    $servicestatus_attributes = array ("service_description", "host_name", "current_attempt",
-                                       "current_state", "plugin_output", "last_hard_state_change", "last_hard_state",
-                                       "last_time_ok", "last_time_warning", "last_time_unknown",
-                                       "last_time_critical", "is_flapping", "last_check",
-                                       "long_plugin_output");
-
-    $num_matches = preg_match_all("/servicestatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    #echo $matches[0][0] . ", " . $matches[0][1] . "\n";
-    #echo $matches[1][0] . ", " . $matches[1][1] . "\n";
-    $services_objects = array ();
-    $i = 0;
-    foreach ($matches[1] as $object) {      
-      $servicestatus = getParameterMap($object, $servicestatus_attributes);
-      switch ($alert_type) {
-      case "all":
-        if (empty($host) || $servicestatus['host_name'] == $host) {
-          $servicestatus['service_type'] = get_service_type($servicestatus['service_description']);
-          $srv_desc = explode ("::",$servicestatus['service_description'],2);
-
-          $servicestatus['service_description'] = $srv_desc[1];
-        }
-        break;
-      case "nok":
-        if (getParameterMapValue($map, "last_hard_state") != ok &&
-           (empty($host) || getParameterMapValue($map, "host_name") == $host)) {
-          foreach ($servicestatus_attributes as $attrib) {
-            $servicestatus[$attrib] = htmlentities(getParameterMapValue($map, $attrib), ENT_COMPAT);
-          }
-          $servicestatus['service_type'] = get_service_type($servicestatus['service_description']);
-          $srv_desc = explode ("::",$servicestatus['service_description'],2);
-          $servicestatus['service_description'] = $srv_desc[1];
-        }
-        break;
-      case "ok":
-        if (getParameterMapValue($map, "last_hard_state") == ok &&
-           (empty($host) || getParameterMapValue($map, "host_name") == $host)) {
-          foreach ($servicestatus_attributes as $attrib) {
-            $servicestatus[$attrib] = htmlentities(getParameterMapValue($map, $attrib), ENT_COMPAT);
-          }
-          $servicestatus['service_type'] = get_service_type($servicestatus['service_description']);
-          $srv_desc = explode ("::",$servicestatus['service_description'],2);
-          $servicestatus['service_description'] = $srv_desc[1];
-        }
-        break;
-      case "warn":
-        if (getParameterMapValue($map, "last_hard_state") == warn &&
-           (empty($host) || getParameterMapValue($map, "host_name") == $host)) {
-          foreach ($servicestatus_attributes as $attrib) {
-            $servicestatus[$attrib] = htmlentities(getParameterMapValue($map, $attrib), ENT_COMPAT);
-          }
-          $servicestatus['service_type'] = get_service_type($servicestatus['service_description']);
-          $srv_desc = explode ("::",$servicestatus['service_description'],2);
-          $servicestatus['service_description'] = $srv_desc[1];
-        }
-        break;
-      case "critical":
-        if (getParameterMapValue($map, "last_hard_state") == critical &&
-           (empty($host) || getParameterMapValue($map, "host_name") == $host)) {
-          foreach ($servicestatus_attributes as $attrib) {
-            $servicestatus[$attrib] = htmlentities(getParameterMapValue($map, $attrib), ENT_COMPAT);
-          }
-          $servicestatus['service_type'] = get_service_type($servicestatus['service_description']);
-          $srv_desc = explode ("::",$servicestatus['service_description'],2);
-          $servicestatus['service_description'] = $srv_desc[1];
-        }
-        break;
-      }
-      
-      if (!empty($servicestatus)) {
-        $services_objects[$i] = $servicestatus;
-        $i++;
-      }
-    }
-
-    // echo "COUNT : " . count ($services_objects) . "\n";
-    return $services_objects;
-  }
-
-  function get_service_type($service_description)
-  {
-    $pieces = explode("::", $service_description);
-    switch ($pieces[0]) {
-	  case "DATANODE":
-      case "NAMENODE":
-      case "JOURNALNODE":
-        $pieces[0] = "HDFS";
-        break;
-      case "JOBTRACKER":
-	  case "TASKTRACKER":
-        $pieces[0] = "MAPREDUCE";
-        break;
-      case "HBASEMASTER":
-      case "REGIONSERVER":
-        $pieces[0] = "HBASE";
-        break;
-      case "HIVE-METASTORE":
-      case "HIVE-SERVER":
-        $pieces[0] = "HIVE";
-        break;
-      case "ZKSERVERS":
-	    $pieces[0] = "ZOOKEEPER";
-        break;
-      case "AMBARI":
-	    $pieces[0] = "AMBARI";
-      break;
-      case "FLUME":
-            $pieces[0] = "FLUME";
-      break;      
-      case "JOBHISTORY":
-        $pieces[0] = "MAPREDUCE2";
-        break;
-      case "RESOURCEMANAGER":
-      case "APP_TIMELINE_SERVER":
-      case "NODEMANAGER":
-        $pieces[0] = "YARN";
-        break;
-      case "STORM_UI_SERVER":
-      case "NIMBUS":
-      case "DRPC_SERVER":
-      case "SUPERVISOR":
-      case "STORM_REST_API":
-        $pieces[0] = "STORM";
-        break;
-      case "NAGIOS":
-      case "HDFS":
-      case "MAPREDUCE":
-      case "HBASE":
-      case "ZOOKEEPER":
-      case "OOZIE":
-      case "WEBHCAT":
-      case "GANGLIA":
-      case "STORM":
-      case "FALCON":
-      case "PUPPET":
-        break;
-      default:
-        $pieces[0] = "UNKNOWN";
-    }
-    return $pieces[0];
-  }
-
-  function getParameter($object, $key)
-  {
-    $pattern="/\s" . $key . "[\s= ]*([\S, ]*)\n/";
-    $num_mat = preg_match($pattern, $object, $matches);
-    $value = "";
-    if ($num_mat) {
-      $value = $matches[1];
-    }
-    return $value;
-  }
-
-  function getParameterMapValue($map, $key) {
-    $value = $map[$key];
-
-    if (!is_null($value))
-      return "" . $value;
-
-    return "";
-  }
-
-
-  function getParameterMap($object, $keynames) {
-
-    $cnt = preg_match_all('/\t([\S]*)=[\n]?[\t]?([\S= ]*)/', $object, $matches, PREG_PATTERN_ORDER);
-
-    $tmpmap = array_combine($matches[1], $matches[2]);
-
-    $map = array();
-    foreach ($keynames as $key) {
-      $map[$key] = htmlentities($tmpmap[$key], ENT_COMPAT);
-    }
-
-    return $map;
-  }
-  
-function indent($json) {
-
-    $result      = '';
-    $pos         = 0;
-    $strLen      = strlen($json);
-    $indentStr   = '  ';
-    $newLine     = "\n";
-    $prevChar    = '';
-    $outOfQuotes = true;
-
-    for ($i=0; $i<=$strLen; $i++) {
-
-        // Grab the next character in the string.
-        $char = substr($json, $i, 1);
-
-        // Are we inside a quoted string?
-        if ($char == '"' && $prevChar != '\\') {
-            $outOfQuotes = !$outOfQuotes;
-
-        // If this character is the end of an element,
-        // output a new line and indent the next line.
-        } else if(($char == '}' || $char == ']') && $outOfQuotes) {
-            $result .= $newLine;
-            $pos --;
-            for ($j=0; $j<$pos; $j++) {
-                $result .= $indentStr;
-            }
-        }
-
-        // Add the character to the result string.
-        $result .= $char;
-
-        // If the last character was the beginning of an element,
-        // output a new line and indent the next line.
-        if (($char == ',' || $char == '{' || $char == '[') && $outOfQuotes) {
-            $result .= $newLine;
-            if ($char == '{' || $char == '[') {
-                $pos ++;
-            }
-
-            for ($j = 0; $j < $pos; $j++) {
-                $result .= $indentStr;
-            }
-        }
-
-        $prevChar = $char;
-    }
-
-    return $result;
-}
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/sys_logger.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/sys_logger.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/sys_logger.py
deleted file mode 100644
index 8f0a415..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/files/sys_logger.py
+++ /dev/null
@@ -1,186 +0,0 @@
-#!/usr/bin/python
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import sys
-import syslog
-
-# dictionary of state->severity mappings
-severities = {'UP':'OK', 'DOWN':'Critical', 'UNREACHABLE':'Critical', 'OK':'OK',
-              'WARNING':'Warning', 'UNKNOWN':'Warning', 'CRITICAL':'Critical'}
-
-# List of services which can result in events at the Degraded severity
-degraded_alert_services = ['HBASEMASTER::HBaseMaster CPU utilization',
-                           'HDFS::Namenode RPC Latency',
-                           'MAPREDUCE::JobTracker RPC Latency',
-                           'JOBTRACKER::Jobtracker CPU utilization']
-
-# List of services which can result in events at the Fatal severity
-fatal_alert_services = ['NAMENODE::Namenode Process down',
-                        'NAMENODE::NameNode process']
-
-# dictionary of service->msg_id mappings
-msg_ids = {'Host::Ping':'host_down',
-           'HBASEMASTER::HBaseMaster CPU utilization':'master_cpu_utilization',
-           'HDFS::HDFS Capacity utilization':'hdfs_percent_capacity',
-           'HDFS::Corrupt/Missing blocks':'hdfs_block',
-           'NAMENODE::Namenode Edit logs directory status':'namenode_edit_log_write',
-           'HDFS::Percent DataNodes down':'datanode_down',
-           'DATANODE::Process down':'datanode_process_down',
-           'HDFS::Percent DataNodes storage full':'datanodes_percent_storage_full',
-           'NAMENODE::Namenode Process down':'namenode_process_down',
-           'HDFS::Namenode RPC Latency':'namenode_rpc_latency',
-           'DATANODE::Storage full':'datanodes_storage_full',
-           'JOBTRACKER::Jobtracker Process down':'jobtracker_process_down',
-           'MAPREDUCE::JobTracker RPC Latency':'jobtracker_rpc_latency',
-           'MAPREDUCE::Percent TaskTrackers down':'tasktrackers_down',
-           'TASKTRACKER::Process down':'tasktracker_process_down',
-           'HBASEMASTER::HBaseMaster Process down':'hbasemaster_process_down',
-           'REGIONSERVER::Process down':'regionserver_process_down',
-           'HBASE::Percent region servers down':'regionservers_down',
-           'HIVE-METASTORE::HIVE-METASTORE status check':'hive_metastore_process_down',
-           'ZOOKEEPER::Percent zookeeper servers down':'zookeepers_down',
-           'ZKSERVERS::ZKSERVERS Process down':'zookeeper_process_down',
-           'OOZIE::Oozie status check':'oozie_down',
-           'TEMPLETON::Templeton status check':'templeton_down',
-           'PUPPET::Puppet agent down':'puppet_down',
-           'NAGIOS::Nagios status log staleness':'nagios_status_log_stale',
-           'GANGLIA::Ganglia [gmetad] Process down':'ganglia_process_down',
-           'GANGLIA::Ganglia collector [gmond] Process down alert for hbasemaster':'ganglia_collector_process_down',
-           'GANGLIA::Ganglia collector [gmond] Process down alert for jobtracker':'ganglia_collector_process_down',
-           'GANGLIA::Ganglia collector [gmond] Process down alert for namenode':'ganglia_collector_process_down',
-           'GANGLIA::Ganglia collector [gmond] Process down alert for slaves':'ganglia_collector_process_down',
-           'NAMENODE::Secondary Namenode Process down':'secondary_namenode_process_down',
-           'JOBTRACKER::Jobtracker CPU utilization':'jobtracker_cpu_utilization',
-           'HBASEMASTER::HBase Web UI down':'hbase_ui_down',
-           'NAMENODE::Namenode Web UI down':'namenode_ui_down',
-           'JOBTRACKER::JobHistory Web UI down':'jobhistory_ui_down',
-           'JOBTRACKER::JobTracker Web UI down':'jobtracker_ui_down',
-
-           'HBASEMASTER::HBase Master CPU utilization':'master_cpu_utilization',
-           'HDFS::HDFS capacity utilization':'hdfs_percent_capacity',
-           'NAMENODE::NameNode edit logs directory status':'namenode_edit_log_write',
-           'DATANODE::DataNode process down':'datanode_process_down',
-           'NAMENODE::NameNode process down':'namenode_process_down',
-           'HDFS::NameNode RPC latency':'namenode_rpc_latency',
-           'DATANODE::DataNode storage full':'datanodes_storage_full',
-           'JOBTRACKER::JobTracker process down':'jobtracker_process_down',
-           'MAPREDUCE::JobTracker RPC latency':'jobtracker_rpc_latency',
-           'TASKTRACKER::TaskTracker process down':'tasktracker_process_down',
-           'HBASEMASTER::HBase Master process down':'hbasemaster_process_down',
-           'REGIONSERVER::RegionServer process down':'regionserver_process_down',
-           'HBASE::Percent RegionServers down':'regionservers_down',
-           'HIVE-METASTORE::Hive Metastore status check':'hive_metastore_process_down',
-           'ZOOKEEPER::Percent ZooKeeper Servers down':'zookeepers_down',
-           'ZOOKEEPER::ZooKeeper Server process down':'zookeeper_process_down',
-           'OOZIE::Oozie Server status check':'oozie_down',
-           'WEBHCAT::WebHCat Server status check':'templeton_down',
-           'GANGLIA::Ganglia [gmetad] process down':'ganglia_process_down',
-           'GANGLIA::Ganglia Collector [gmond] process down alert for HBase Master':'ganglia_collector_process_down',
-           'GANGLIA::Ganglia Collector [gmond] process down alert for JobTracker':'ganglia_collector_process_down',
-           'GANGLIA::Ganglia Collector [gmond] process down alert for NameNode':'ganglia_collector_process_down',
-           'GANGLIA::Ganglia Collector [gmond] process down alert for slaves':'ganglia_collector_process_down',
-           'NAMENODE::Secondary NameNode process down':'secondary_namenode_process_down',
-           'JOBTRACKER::JobTracker CPU utilization':'jobtracker_cpu_utilization',
-           'HBASEMASTER::HBase Master Web UI down':'hbase_ui_down',
-           'NAMENODE::NameNode Web UI down':'namenode_ui_down',
-           'Oozie status check':'oozie_down',
-           'WEBHCAT::WebHcat status check':'templeton_down',
-
-           # Ambari Nagios service check descriptions
-           'DATANODE::DataNode process':'datanode_process',
-           'NAMENODE::NameNode process':'namenode_process',
-           'NAMENODE::Secondary NameNode process':'secondary_namenode_process',
-           'JOURNALNODE::JournalNode process':'journalnode_process',
-           'ZOOKEEPER::ZooKeeper Server process':'zookeeper_server_process',
-           'JOBTRACKER::JobTracker process':'jobtracker_process',
-           'TASKTRACKER::TaskTracker process':'tasktracker_process',
-           'GANGLIA::Ganglia Server process':'ganglia_server_process',
-           'GANGLIA::Ganglia Monitor process for Slaves':'ganglia_monitor_process',
-           'GANGLIA::Ganglia Monitor process for NameNode':'ganglia_monitor_process',
-           'GANGLIA::Ganglia Monitor process for JobTracker':'ganglia_monitor_process',
-           'GANGLIA::Ganglia Monitor process for HBase Master':'ganglia_monitor_process',
-           'GANGLIA::Ganglia Monitor process for ResourceManager':'ganglia_monitor_process',
-           'GANGLIA::Ganglia Monitor process for HistoryServer':'ganglia_monitor_process',
-           'HBASEMASTER::HBase Master process':'hbase_master_process',
-           'REGIONSERVER::RegionServer process':'regionserver_process',
-           'NAGIOS::Nagios status log freshness':'nagios_process',
-           'FLUME::Flume Agent process':'flume_agent_process',
-           'OOZIE::Oozie Server status':'oozie_server_process',
-           'HIVE-METASTORE::Hive Metastore status':'hive_metastore_process',
-           'WEBHCAT::WebHCat Server status':'webhcat_server_process',
-           'RESOURCEMANAGER::ResourceManager process':'resourcemanager_process',
-           'NODEMANAGER::NodeManager process':'nodemanager_process',
-           'JOBHISTORY::HistoryServer process':'historyserver_process'}
-
-# Determine the severity of the TVI alert based on the Nagios alert state.
-def determine_severity(state, service):
-    if severities.has_key(state):
-        severity = severities[state]
-    else: severity = 'Warning'
-
-    # For some alerts, warning should be converted to Degraded
-    if severity == 'Warning' and service in degraded_alert_services:
-        severity = 'Degraded'
-    elif severity != 'OK' and service in fatal_alert_services:
-        severity = 'Fatal'
-
-    return severity
-
-
-# Determine the msg id for the TVI alert from based on the service which generates the Nagios alert.
-# The msg id is used to correlate a log msg to a TVI rule.
-def determine_msg_id(service, severity):
-    if msg_ids.has_key(service):
-        msg_id = msg_ids[service]
-        if severity == 'OK':
-            msg_id = '{0}_ok'.format(msg_id)
-
-        return msg_id
-    else: return 'HADOOP_UNKNOWN_MSG'
-
-
-# Determine the domain.  Currently the domain is always 'Hadoop'.
-def determine_domain():
-    return 'Hadoop'
-
-
-# log the TVI msg to the syslog
-def log_tvi_msg(msg):
-    syslog.openlog('nagios', syslog.LOG_PID)
-    syslog.syslog(msg)
-
-
-# generate a tvi log msg from a Hadoop alert
-def generate_tvi_log_msg(alert_type, attempt, state, service, msg):
-    # Determine the TVI msg contents
-    severity = determine_severity(state, service)  # The TVI alert severity.
-    domain   = determine_domain()                  # The domain specified in the TVI alert.
-    msg_id   = determine_msg_id(service, severity) # The msg_id used to correlate to a TVI rule.
-
-    # Only log HARD alerts
-    if alert_type == 'HARD':
-        # Format and log msg
-        log_tvi_msg('{0}: {1}: {2}# {3}'.format(severity, domain, msg_id, msg))
-
-
-# main method which is called when invoked on the command line
-def main():
-    generate_tvi_log_msg(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])
-
-
-# run the main method
-if __name__ == '__main__':
-    main()
-    sys.exit(0)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/scripts/functions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/scripts/functions.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/scripts/functions.py
deleted file mode 100644
index 7252f8f..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/scripts/functions.py
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-from resource_management import *
-
-# Gets if the java version is greater than 6
-def is_jdk_greater_6(java64_home):
-  import os
-  import re
-  java_bin = os.path.join(java64_home, 'bin', 'java')
-  ver_check = shell.call([java_bin, '-version'])
-
-  ver = ''
-  if 0 != ver_check[0]:
-    # java is not local, try the home name as a fallback
-    ver = java64_home
-  else:
-    ver = ver_check[1]
-
-  regex = re.compile('"1\.([0-9]*)\.0_([0-9]*)"', re.IGNORECASE)
-  r = regex.search(ver)
-  if r:
-    strs = r.groups()
-    if 2 == len(strs):
-      minor = int(strs[0])
-      if minor > 6:
-        return True
-
-  return False

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/scripts/nagios.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/scripts/nagios.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/scripts/nagios.py
deleted file mode 100644
index a63ea38..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/scripts/nagios.py
+++ /dev/null
@@ -1,109 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-from nagios_server_config import nagios_server_config
-
-def nagios():
-  import params
-
-  File( params.nagios_httpd_config_file,
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    content = Template("nagios.conf.j2"),
-    mode   = 0644
-  )
-  
-  Directory( params.conf_dir,
-    owner = params.nagios_user,
-    group = params.nagios_group
-  )
-
-  Directory( [params.plugins_dir, params.nagios_obj_dir])
-
-  Directory( params.nagios_pid_dir,
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    mode = 0755,
-    recursive = True
-  )
-
-  Directory( [params.nagios_var_dir, params.check_result_path, params.nagios_rw_dir, params.ambarinagios_php_dir],
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    recursive = True
-  )
-  
-  Directory( [params.nagios_log_dir, params.nagios_log_archives_dir],
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    mode = 0755
-  )
-
-  nagios_server_config()
-
-  set_web_permisssions()
-
-  File( format("{conf_dir}/command.cfg"),
-    owner = params.nagios_user,
-    group = params.nagios_group
-  )
-
-  File( format("{ambarinagios_php_dir}/{ambarinagios_php_filename}"),
-    content = StaticFile(params.ambarinagios_php_filename),
-  )
-
-  File( params.hdp_mon_nagios_addons_path,
-    content = StaticFile("hdp_mon_nagios_addons.conf"),
-  )
-
-  File(format("{nagios_var_dir}/ignore.dat"),
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    mode = 0664)
-  
-  if System.get_instance().os_family == "ubuntu":
-    Link(params.ubuntu_stylesheets_desired_location,
-         to = params.ubuntu_stylesheets_real_location
-    )
-  
-  
-def set_web_permisssions():
-  import params
-
-  cmd = format("{htpasswd_cmd} -c -b  {conf_dir}/htpasswd.users {nagios_web_login} {nagios_web_password!p}")
-  Execute(cmd)
-
-  File( format("{conf_dir}/htpasswd.users"),
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    mode  = 0640
-  )
-
-  if System.get_instance().os_family == "suse":
-    command = format("usermod -G {nagios_group} wwwrun")
-  elif System.get_instance().os_family == "ubuntu":
-    command = format("usermod -G {nagios_group} www-data") # check -a ???
-  elif System.get_instance().os_family == "redhat":
-    command = format("usermod -a -G {nagios_group} apache")
-  
-  Execute( command)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/scripts/nagios_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/scripts/nagios_server.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/scripts/nagios_server.py
deleted file mode 100644
index da35b34..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/scripts/nagios_server.py
+++ /dev/null
@@ -1,111 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-from nagios import nagios
-from nagios_service import nagios_service
-from nagios_service import update_active_alerts
-
-         
-class NagiosServer(Script):
-  def install(self, env):
-    remove_conflicting_packages()
-    self.install_packages(env)
-    self.configure(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    nagios()
-
-    
-  def start(self, env):
-    import params
-    env.set_params(params)
-
-    update_ignorable(params)
-
-    self.configure(env) # done for updating configs after Security enabled
-    nagios_service(action='start')
-
-    
-  def stop(self, env):
-    import params
-    env.set_params(params)
-    
-    nagios_service(action='stop')
-
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.nagios_pid_file)
-
-    # check for alert structures
-    update_active_alerts()
-
-    
-def remove_conflicting_packages():  
-  Package('hdp_mon_nagios_addons', action = "remove")
-
-  Package('nagios-plugins', action = "remove")
-  
-  if System.get_instance().os_family in ["redhat","suse"]:
-    Execute("rpm -e --allmatches --nopostun nagios",
-      path  = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
-      ignore_failures = True)
-
-def update_ignorable(params):
-  if not params.config.has_key('passiveInfo'):
-    return
-  else:
-    buf = ""
-    count = 0
-    for define in params.config['passiveInfo']:
-      try:
-        host = str(define['host'])
-        service = str(define['service'])
-        component = str(define['component'])
-        buf += host + " " + service + " " + component + "\n"
-        count += 1
-      except KeyError:
-        pass
-
-    f = None
-    try:
-      f = open('/var/nagios/ignore.dat', 'w')
-      f.write(buf)
-      if 1 == count:
-        Logger.info("Persisted '/var/nagios/ignore.dat' with 1 entry")
-      elif count > 1:
-        Logger.info("Persisted '/var/nagios/ignore.dat' with " + str(count) + " entries")
-    except:
-      Logger.info("Could not persist '/var/nagios/ignore.dat'")
-      pass
-    finally:
-      if f is not None:
-        f.close()
-
-
-if __name__ == "__main__":
-  NagiosServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/scripts/nagios_server_config.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/scripts/nagios_server_config.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/scripts/nagios_server_config.py
deleted file mode 100644
index 86d5a8a..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/scripts/nagios_server_config.py
+++ /dev/null
@@ -1,98 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-def nagios_server_config():
-  import params
-  
-  nagios_server_configfile( 'nagios.cfg', 
-                            config_dir = params.conf_dir, 
-                            group = params.nagios_group
-  )
-  nagios_server_configfile( 'resource.cfg', 
-                            config_dir = params.conf_dir, 
-                            group = params.nagios_group
-  )
-  nagios_server_configfile( 'hadoop-hosts.cfg')
-  nagios_server_configfile( 'hadoop-hostgroups.cfg')
-  nagios_server_configfile( 'hadoop-servicegroups.cfg')
-  nagios_server_configfile( 'hadoop-services.cfg')
-  nagios_server_configfile( 'hadoop-commands.cfg')
-  nagios_server_configfile( 'contacts.cfg')
-  
-  if System.get_instance().os_family != "suse":
-    nagios_server_configfile( 'nagios',
-                              config_dir = '/etc/init.d',
-                              mode = 0755, 
-                              owner = 'root', 
-                              group = 'root'
-    )
-
-  nagios_server_check( 'check_cpu.pl')
-  nagios_server_check( 'check_cpu.php')
-  nagios_server_check( 'check_cpu_ha.php')
-  nagios_server_check( 'check_datanode_storage.php')
-  nagios_server_check( 'check_aggregate.php')
-  nagios_server_check( 'check_hdfs_blocks.php')
-  nagios_server_check( 'check_hdfs_capacity.php')
-  nagios_server_check( 'check_rpcq_latency.php')
-  nagios_server_check( 'check_rpcq_latency_ha.php')
-  nagios_server_check( 'check_webui.sh')
-  nagios_server_check( 'check_webui_ha.sh')
-  nagios_server_check( 'check_name_dir_status.php')
-  nagios_server_check( 'check_oozie_status.sh')
-  nagios_server_check( 'check_templeton_status.sh')
-  nagios_server_check( 'check_hive_metastore_status.sh')
-  nagios_server_check( 'check_hue_status.sh')
-  nagios_server_check( 'check_mapred_local_dir_used.sh')
-  nagios_server_check( 'check_nodemanager_health.sh')
-  nagios_server_check( 'check_namenodes_ha.sh')
-  nagios_server_check( 'check_wrapper.sh')
-  nagios_server_check( 'hdp_nagios_init.php')
-  nagios_server_check( 'check_checkpoint_time.py' )
-  nagios_server_check( 'sys_logger.py' )
-  nagios_server_check( 'check_ambari_alerts.py' )
-
-def nagios_server_configfile(
-  name,
-  owner = None,
-  group = None,
-  config_dir = None,
-  mode = None
-):
-  import params
-  owner = params.nagios_user if not owner else owner
-  group = params.user_group if not group else group
-  config_dir = params.nagios_obj_dir if not config_dir else config_dir
-  
-  TemplateConfig( format("{config_dir}/{name}"),
-    owner          = owner,
-    group          = group,
-    mode           = mode
-  )
-
-def nagios_server_check(name):
-  File( format("{plugins_dir}/{name}"),
-    content = StaticFile(name), 
-    mode = 0755
-  )


[12/17] ambari git commit: AMBARI-8276 - Alerts: Remove Nagios Service From The Stack (jonathanhurley)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/params.py
deleted file mode 100644
index 8eeb181..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/params.py
+++ /dev/null
@@ -1,161 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from functions import is_jdk_greater_6
-from resource_management import *
-import status_params
-
-# server configurations
-config = Script.get_config()
-
-conf_dir = "/etc/nagios"
-nagios_var_dir = "/var/nagios"
-nagios_rw_dir = "/var/nagios/rw"
-plugins_dir = "/usr/lib64/nagios/plugins"
-nagios_obj_dir = "/etc/nagios/objects"
-check_result_path = "/var/nagios/spool/checkresults"
-nagios_log_dir = "/var/log/nagios"
-nagios_log_archives_dir = format("{nagios_log_dir}/archives")
-nagios_host_cfg = format("{nagios_obj_dir}/hadoop-hosts.cfg")
-nagios_lookup_daemon_str = "/usr/sbin/nagios"
-nagios_pid_dir = status_params.nagios_pid_dir
-nagios_pid_file = status_params.nagios_pid_file
-nagios_resource_cfg = format("{conf_dir}/resource.cfg")
-nagios_hostgroup_cfg = format("{nagios_obj_dir}/hadoop-hostgroups.cfg")
-nagios_servicegroup_cfg = format("{nagios_obj_dir}/hadoop-servicegroups.cfg")
-nagios_service_cfg = format("{nagios_obj_dir}/hadoop-services.cfg")
-nagios_command_cfg = format("{nagios_obj_dir}/hadoop-commands.cfg")
-eventhandlers_dir = "/usr/lib/nagios/eventhandlers"
-nagios_principal_name = default("/configurations/nagios-env/nagios_principal_name", "nagios")
-hadoop_ssl_enabled = False
-
-namenode_metadata_port = get_port_from_url(config['configurations']['core-site']['fs.default.name'])
-oozie_server_port = get_port_from_url(config['configurations']['oozie-site']['oozie.base.url'])
-# different to HDP2    
-namenode_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.http.address'])
-# different to HDP2  
-snamenode_port = get_port_from_url(config['configurations']['hdfs-site']["dfs.secondary.http.address"])
-
-hbase_master_rpc_port = default('/configurations/hbase-site/hbase.master.port', "60000")
-hs_port = get_port_from_url(config['configurations']['mapred-site']['mapreduce.history.server.http.address'])
-journalnode_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.journalnode.http-address'])
-datanode_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.datanode.http.address'])
-flume_port = "4159"
-hive_metastore_port = get_port_from_url(config['configurations']['hive-site']['hive.metastore.uris']) #"9083"
-hive_server_port = default('/configurations/hive-site/hive.server2.thrift.port',"10000")
-templeton_port = config['configurations']['webhcat-site']['templeton.port'] #"50111"
-hbase_master_port = config['configurations']['hbase-site']['hbase.master.info.port'] #"60010"
-hbase_rs_port = config['configurations']['hbase-site']['hbase.regionserver.info.port'] #"60030"
-
-# this 4 is different for HDP2
-jtnode_port = get_port_from_url(config['configurations']['mapred-site']['mapred.job.tracker.http.address'])
-jobhistory_port = get_port_from_url(config['configurations']['mapred-site']['mapreduce.history.server.http.address'])
-tasktracker_port = "50060"
-mapred_local_dir = config['configurations']['mapred-site']['mapred.local.dir']
-
-# this is different for HDP2
-nn_metrics_property = "FSNamesystemMetrics"
-clientPort = config['configurations']['zoo.cfg']['clientPort'] #ZK
-
-
-java64_home = config['hostLevelParams']['java_home']
-check_cpu_on = is_jdk_greater_6(java64_home)
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-
-nagios_keytab_path = default("/configurations/nagios-env/nagios_keytab_path", "/etc/security/keytabs/nagios.service.keytab")
-kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-
-ganglia_port = "8651"
-ganglia_collector_slaves_port = "8660"
-ganglia_collector_namenode_port = "8661"
-ganglia_collector_jobtracker_port = "8662"
-ganglia_collector_hbase_port = "8663"
-ganglia_collector_rm_port = "8664"
-ganglia_collector_nm_port = "8660"
-ganglia_collector_hs_port = "8666"
-  
-all_ping_ports = config['clusterHostInfo']['all_ping_ports']
-
-if System.get_instance().os_family == "suse":
-  nagios_p1_pl = "/usr/lib/nagios/p1.pl"
-  htpasswd_cmd = "htpasswd2"
-  nagios_httpd_config_file = format("/etc/apache2/conf.d/nagios.conf")
-else:
-  nagios_p1_pl = "/usr/bin/p1.pl"
-  htpasswd_cmd = "htpasswd"
-  nagios_httpd_config_file = format("/etc/httpd/conf.d/nagios.conf")
-  
-nagios_user = config['configurations']['nagios-env']['nagios_user']
-nagios_group = config['configurations']['nagios-env']['nagios_group']
-nagios_web_login = config['configurations']['nagios-env']['nagios_web_login']
-nagios_web_password = config['configurations']['nagios-env']['nagios_web_password']
-user_group = config['configurations']['cluster-env']['user_group']
-nagios_contact = config['configurations']['nagios-env']['nagios_contact']
-
-namenode_host = default("/clusterHostInfo/namenode_host", None)
-_snamenode_host = default("/clusterHostInfo/snamenode_host", None)
-_jtnode_host = default("/clusterHostInfo/jtnode_host", None)
-_slave_hosts = default("/clusterHostInfo/slave_hosts", None)
-_tt_hosts = default("/clusterHostInfo/mapred_tt_hosts", [])
-_journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", None)
-_zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", None)
-_hs_host = default("/clusterHostInfo/hs_host", None)
-_zookeeper_hosts = default("/clusterHostInfo/zookeeper_hosts", None)
-_flume_hosts = default("/clusterHostInfo/flume_hosts", None)
-_nagios_server_host = default("/clusterHostInfo/nagios_server_host",None)
-_ganglia_server_host = default("/clusterHostInfo/ganglia_server_host",None)
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts",None)
-if type(hbase_master_hosts) is list:
-  hbase_master_hosts_in_str = ','.join(hbase_master_hosts)
-_hive_server_host = default("/clusterHostInfo/hive_server_host",None)
-_oozie_server = default("/clusterHostInfo/oozie_server",None)
-_webhcat_server_host = default("/clusterHostInfo/webhcat_server_host",None)
-# can differ on HDP2
-_mapred_tt_hosts = _tt_hosts
-#if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
-_hbase_rs_hosts = default("/clusterHostInfo/hbase_rs_hosts", _slave_hosts)
-_hue_server_host = default("/clusterHostInfo/hue_server_host", None)
-all_hosts = config['clusterHostInfo']['all_hosts']
-
-
-hostgroup_defs = {
-    'namenode' : namenode_host,
-    'snamenode' : _snamenode_host,
-    'slaves' : _slave_hosts,
-    # no in HDP2
-    'tasktracker-servers' : _mapred_tt_hosts,
-    'agent-servers' : all_hosts,
-    'nagios-server' : _nagios_server_host,
-    'jobtracker' : _jtnode_host,
-    'ganglia-server' : _ganglia_server_host,
-    'flume-servers' : _flume_hosts,
-    'zookeeper-servers' : _zookeeper_hosts,
-    'hbasemasters' : hbase_master_hosts,
-    'hiveserver' : _hive_server_host,
-    'region-servers' : _hbase_rs_hosts,
-    'oozie-server' : _oozie_server,
-    'webhcat-server' : _webhcat_server_host,
-    'hue-server' : _hue_server_host,
-    'historyserver2' : _hs_host,
-    'jobhistory': _hs_host,
-    'journalnodes' : _journalnode_hosts
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/status_params.py
deleted file mode 100644
index 33b35fe..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/status_params.py
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-nagios_pid_dir = "/var/run/nagios"
-nagios_pid_file = format("{nagios_pid_dir}/nagios.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/contacts.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/contacts.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/contacts.cfg.j2
deleted file mode 100644
index 610b2bd..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/contacts.cfg.j2
+++ /dev/null
@@ -1,109 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-###############################################################################
-# CONTACTS.CFG - SAMPLE CONTACT/CONTACTGROUP DEFINITIONS
-#
-# Last Modified: 05-31-2007
-#
-# NOTES: This config file provides you with some example contact and contact
-#        group definitions that you can reference in host and service
-#        definitions.
-#       
-#        You don't need to keep these definitions in a separate file from your
-#        other object definitions.  This has been done just to make things
-#        easier to understand.
-#
-###############################################################################
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-
-###############################################################################
-###############################################################################
-#
-# CONTACTS
-#
-###############################################################################
-###############################################################################
-
-# Just one contact defined by default - the Nagios admin (that's you)
-# This contact definition inherits a lot of default values from the 'generic-contact' 
-# template which is defined elsewhere.
-
-define contact{
-        contact_name    {{nagios_web_login}}                                        ; Short name of user
-        use             generic-contact                                             ; Inherit default values from generic-contact template (defined above)
-        alias           Nagios Admin                                                ; Full name of user
-
-        email           {{nagios_contact}}	; <<***** CHANGE THIS TO YOUR EMAIL ADDRESS ******
-        }
-
-# Contact which writes all Nagios alerts to the system logger.
-define contact{
-        contact_name                    sys_logger         ; Short name of user
-        use                             generic-contact    ; Inherit default values from generic-contact template (defined above)
-        alias                           System Logger      ; Full name of user
-        host_notifications_enabled      1
-        service_notifications_enabled   1
-        service_notification_period     24x7
-        host_notification_period        24x7
-        service_notification_options    w,u,c,r,s
-        host_notification_options       d,u,r,s
-        can_submit_commands             1
-        retain_status_information       1
-        service_notification_commands   service_sys_logger
-        host_notification_commands      host_sys_logger
-        }
-
-###############################################################################
-###############################################################################
-#
-# CONTACT GROUPS
-#
-###############################################################################
-###############################################################################
-
-# We only have one contact in this simple configuration file, so there is
-# no need to create more than one contact group.
-
-define contactgroup {
-        contactgroup_name       admins
-        alias                   Nagios Administrators
-        members                 {{nagios_web_login}},sys_logger
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-commands.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
deleted file mode 100644
index ff13c0d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
+++ /dev/null
@@ -1,147 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-{% if check_cpu_on %}
-# 'check_cpu' check remote cpu load
-define command {
-        command_name    check_cpu
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- php $USER1$/check_cpu.php -h $HOSTADDRESS$ -p $ARG1$ -w $ARG2$ -c $ARG3$ -e $ARG4$ -k $ARG5$ -r $ARG6$ -t $ARG7$ -u $ARG8$
-       }
-
-define command {
-        command_name    check_cpu_ha
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py or $ARG1$ -- php $USER1$/check_cpu_ha.php -h ^^ -p $ARG2$ -w $ARG3$ -c $ARG4$ -e $ARG5$ -k $ARG6$ -r $ARG7$ -t $ARG8$ -u $ARG9$
-       }
-{% endif %}
-
-# Check data node storage full 
-define command {
-        command_name    check_datanode_storage
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- php $USER1$/check_datanode_storage.php -h $HOSTADDRESS$ -p $ARG1$ -w $ARG2$ -c $ARG3$ -e $ARG4$ -k $ARG5$ -r $ARG6$ -t $ARG7$ -s $ARG8$
-       }
-
-define command{
-        command_name    check_hdfs_blocks
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py and $ARG1$ -- php $USER1$/check_hdfs_blocks.php -h ^^ -p $ARG2$ -s $ARG3$ -e $ARG4$ -k $ARG5$ -r $ARG6$ -t $ARG7$ -u $ARG8$
-       }
-
-define command{
-        command_name    check_hdfs_capacity
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py and $ARG1$ -- php $USER1$/check_hdfs_capacity.php -h ^^ -p $ARG2$ -w $ARG3$ -c $ARG4$ -e $ARG5$ -k $ARG6$ -r $ARG7$ -t $ARG8$ -s $ARG9$
-       }
-
-define command{
-        command_name    check_aggregate
-        command_line    php $USER1$/check_aggregate.php -f /var/nagios/status.dat -s 1 -t service -n $ARG1$ -w $ARG2$ -c $ARG3$
-       }
-
-define command{
-        command_name    check_rpcq_latency
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- php $USER1$/check_rpcq_latency.php -h $HOSTADDRESS$ -p $ARG2$ -n $ARG1$ -w $ARG3$ -c $ARG4$ -e $ARG5$ -k $ARG6$ -r $ARG7$ -t $ARG8$ -s $ARG9$
-       }
-
-define command{
-        command_name    check_nagios
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- $USER1$/check_nagios -e $ARG1$ -F $ARG2$ -C $ARG3$ 
-       }
-
-define command{
-        command_name    check_webui
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- $USER1$/check_webui.sh $ARG1$ $HOSTADDRESS$ $ARG2$
-       }
-
-define command{
-        command_name    check_name_dir_status
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- php $USER1$/check_name_dir_status.php -h $HOSTADDRESS$ -p $ARG1$ -e $ARG2$ -k $ARG3$ -r $ARG4$ -t $ARG5$ -s $ARG6$
-       }
-
-define command{
-        command_name    check_oozie_status
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- $USER1$/check_oozie_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$
-       }
-
-define command{
-        command_name    check_templeton_status
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- $USER1$/check_templeton_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$
-       }
-
-define command{
-        command_name    check_hive_metastore_status
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- $USER1$/check_hive_metastore_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$
-       }
-define command{
-        command_name    check_hue_status
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- $USER1$/check_hue_status.sh
-       }
-
-define command{
-        command_name    check_mapred_local_dir_used_space
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- $USER1$/check_mapred_local_dir_used.sh $ARG1$ $ARG2$
-       }
-
-define command{
-        command_name    check_namenodes_ha
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- $USER1$/check_namenodes_ha.sh $ARG1$ $ARG2$
-       }
-
-define command{
-        command_name    check_nodemanager_health
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- $USER1$/check_nodemanager_health.sh $HOSTADDRESS$ $ARG1$
-       }
-
-define command{
-        command_name    host_sys_logger
-        command_line    $USER1$/sys_logger.py $HOSTSTATETYPE$ $HOSTATTEMPT$ $HOSTSTATE$ "Host::Ping" "Event Host=$HOSTADDRESS$($HOSTSTATE$), $HOSTOUTPUT$ $LONGHOSTOUTPUT$"
-       }
-
-define command{
-        command_name    service_sys_logger
-        command_line    $USER1$/sys_logger.py $SERVICESTATETYPE$ $SERVICEATTEMPT$ $SERVICESTATE$ "$SERVICEDESC$" "Event Host=$HOSTADDRESS$ Service Description=$SERVICEDESC$($SERVICESTATE$), $SERVICEOUTPUT$ $LONGSERVICEOUTPUT$"
-       }
-
-define command{
-        command_name check_tcp_wrapper
-        command_line /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py and $HOSTADDRESS$ -- $USER1$/check_tcp -H ^^ -p $ARG1$ $ARG2$
-       }
-
-define command{
-        command_name check_tcp_wrapper_sasl
-        command_line /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- /var/lib/ambari-agent/ambari-python-wrap $USER1$/check_hive_thrift_port.py -H $HOSTADDRESS$ -p $ARG1$ $ARG2$
-       }

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2
deleted file mode 100644
index 544ef71..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2
+++ /dev/null
@@ -1,52 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-{% for name, hosts in hostgroup_defs.iteritems() %}
-{% if hosts %}
-define hostgroup {
-        hostgroup_name  {{name}}
-        alias           {{name}}
-        members         {{','.join(hosts)}}
-}
-{% endif %}
-{% endfor %}
-
-define hostgroup {
-        hostgroup_name  all-servers
-        alias           All Servers
-        members         {{','.join(all_hosts)}}
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2
deleted file mode 100644
index c05934e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2
+++ /dev/null
@@ -1,54 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-{% for host in all_hosts %}
-define host {
-        alias                     {{host}}
-        host_name                 {{host}}
-        use                       linux-server
-        address                   {{host}}
-        check_command             check_tcp_wrapper!{{all_ping_ports[loop.index-1]}}!-w 1 -c 1
-        check_interval            0.25
-        retry_interval            0.25
-        max_check_attempts        4
-        notifications_enabled     1
-        first_notification_delay  0     # Send notification soon after change in the hard state
-        notification_interval     0     # Send the notification once
-        notification_options      d,u,r
-}
-
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2
deleted file mode 100644
index 3833b15..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2
+++ /dev/null
@@ -1,105 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-{% if hostgroup_defs['namenode'] or
-  hostgroup_defs['snamenode']  or
-  hostgroup_defs['slaves'] %}
-define servicegroup {
-  servicegroup_name  HDFS
-  alias  HDFS Checks
-}
-{% endif %}
-{%if hostgroup_defs['jobtracker'] or
-  hostgroup_defs['historyserver2']-%}
-define servicegroup {
-  servicegroup_name  MAPREDUCE
-  alias  MAPREDUCE Checks
-}
-{% endif %}
-
-{%if hostgroup_defs['flume-servers'] %}
-define servicegroup {
-  servicegroup_name  FLUME
-  alias  FLUME Checks
-}
-{% endif %}
-{%if hostgroup_defs['hbasemasters'] %}
-define servicegroup {
-  servicegroup_name  HBASE
-  alias  HBASE Checks
-}
-{% endif %}
-{% if hostgroup_defs['oozie-server'] %}
-define servicegroup {
-  servicegroup_name  OOZIE
-  alias  OOZIE Checks
-}
-{% endif %}
-{% if hostgroup_defs['nagios-server'] %}
-define servicegroup {
-  servicegroup_name  NAGIOS
-  alias  NAGIOS Checks
-}
-{% endif %}
-{% if hostgroup_defs['ganglia-server'] %}
-define servicegroup {
-  servicegroup_name  GANGLIA
-  alias  GANGLIA Checks
-}
-{% endif %}
-{% if hostgroup_defs['hiveserver'] %}
-define servicegroup {
-  servicegroup_name  HIVE
-  alias  HIVE Checks
-}
-{% endif %}
-{% if hostgroup_defs['zookeeper-servers'] %}
-define servicegroup {
-  servicegroup_name  ZOOKEEPER
-  alias  ZOOKEEPER Checks
-}
-{% endif %}
-define servicegroup {
-  servicegroup_name  AMBARI
-  alias  AMBARI Checks
-}
-{% if hostgroup_defs['hue-server'] %}
-define servicegroup {
-  servicegroup_name  HUE
-  alias  HUE Checks
-}
-{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-services.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-services.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-services.cfg.j2
deleted file mode 100644
index c4dcba6..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-services.cfg.j2
+++ /dev/null
@@ -1,613 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-{# TODO: Look for { or } in created file #}
-# NAGIOS SERVER Check (status log update)
-{% if hostgroup_defs['nagios-server'] %}
-define service {
-        name                            hadoop-service
-        use                             generic-service
-        notification_options            w,u,c,r,f,s
-        first_notification_delay        0
-        notification_interval           0                 # Send the notification once
-        contact_groups                  admins
-        notifications_enabled           1
-        event_handler_enabled           1
-        register                        0
-}
-
-define service {        
-        hostgroup_name          nagios-server        
-        use                     hadoop-service
-        service_description     NAGIOS::Nagios status log freshness
-        servicegroups           NAGIOS
-        check_command           check_nagios!10!/var/nagios/status.dat!{{nagios_lookup_daemon_str}}
-        normal_check_interval   5
-        retry_check_interval    0.5
-        max_check_attempts      2
-}
-
-# NAGIOS SERVER HDFS Checks
-{% if hostgroup_defs['namenode'] %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::Percent DataNodes with space available
-        servicegroups           HDFS
-        check_command           check_aggregate!"DATANODE::DataNode space"!10%!30%
-        normal_check_interval   0.5
-        retry_check_interval    1 
-        max_check_attempts      1
-}
-
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::Percent DataNodes live
-        servicegroups           HDFS
-        check_command           check_aggregate!"DATANODE::DataNode process"!10%!30%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-
-# AMBARI AGENT Checks
-{% for hostname in all_hosts %}
-define service {
-        host_name	        {{ hostname }}
-        use                     hadoop-service
-        service_description     AMBARI::Ambari Agent process
-        servicegroups           AMBARI
-        check_command           check_tcp_wrapper!{{all_ping_ports[loop.index-1]}}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-
-{% endfor %}
-
-# NAGIOS SERVER ZOOKEEPER Checks
-{% if hostgroup_defs['zookeeper-servers'] %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     ZOOKEEPER::Percent ZooKeeper Servers live
-        servicegroups           ZOOKEEPER
-        check_command           check_aggregate!"ZOOKEEPER::ZooKeeper Server process"!35%!70%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-
-# NAGIOS SERVER HBASE Checks
-{% if hostgroup_defs['hbasemasters'] %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HBASE::Percent RegionServers live
-        servicegroups           HBASE
-        check_command           check_aggregate!"REGIONSERVER::RegionServer process"!10%!30%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-{% endif %}
-
-# GANGLIA SERVER Checks
-{% if hostgroup_defs['ganglia-server'] %}
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Server process
-        servicegroups           GANGLIA
-        _host_component         GANGLIA_SERVER
-        check_command           check_tcp_wrapper!{{ ganglia_port }}!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-
-{% if hostgroup_defs['namenode'] %}
-{% for hostname in hostgroup_defs['namenode'] %}
-define service {
-        host_name	        {{ hostname }}
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for NameNode
-        servicegroups           GANGLIA
-        _host_component         GANGLIA_MONITOR
-        check_command           check_tcp_wrapper!{{ ganglia_collector_namenode_port }}!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endfor %}
-{% endif %}
-
-{% if hostgroup_defs['jobtracker'] %}
-{% for hostname in hostgroup_defs['jobtracker'] %}
-define service {
-        host_name	        {{ hostname }}
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for JobTracker
-        servicegroups           GANGLIA
-        _host_component         GANGLIA_MONITOR
-        check_command           check_tcp_wrapper!{{ ganglia_collector_jobtracker_port }}!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endfor %}
-{% endif %}
-
-{% if hostgroup_defs['hbasemasters'] %}
-{% for hostname in hostgroup_defs['hbasemasters'] %}
-define service {
-        host_name	        {{ hostname }}
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for HBase Master
-        servicegroups           GANGLIA
-        _host_component         GANGLIA_MONITOR
-        check_command           check_tcp_wrapper!{{ ganglia_collector_hbase_port }}!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endfor %}
-{% endif %}
-
-
-{% if hostgroup_defs['historyserver2'] %}
-{% for hostname in hostgroup_defs['historyserver2'] %}
-define service {
-        host_name	        {{ hostname }}
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for HistoryServer
-        servicegroups           GANGLIA
-        _host_component         GANGLIA_MONITOR
-        check_command           check_tcp_wrapper!{{ ganglia_collector_hs_port }}!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endfor %}
-{% endif %}
-
-{% endif %}
-
-{% if hostgroup_defs['snamenode'] %}
-# Secondary namenode checks
-define service {
-        hostgroup_name          snamenode
-        use                     hadoop-service
-        service_description     NAMENODE::Secondary NameNode process
-        servicegroups           HDFS
-        check_command           check_tcp_wrapper!{{ snamenode_port }}!-w 1 -c 1
-        _host_component         SECONDARY_NAMENODE
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-
-
-{% if hostgroup_defs['namenode'] %}
-# HDFS Checks
-{%  for namenode_hostname in namenode_host %}
-{# TODO: check if we can get rid of str, lower #}
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode edit logs directory status on {{ namenode_hostname }}
-        servicegroups           HDFS
-        check_command           check_name_dir_status!{{ namenode_port }}!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        _host_component         NAMENODE
-        normal_check_interval   0.5
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-{% if check_cpu_on %}
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode host CPU utilization on {{ namenode_hostname }}
-        servicegroups           HDFS
-#        check_command           check_cpu!200%!250%
-        check_command           check_cpu!{{ namenode_port }}!200%!250%!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        _host_component         NAMENODE
-        normal_check_interval   5
-        retry_check_interval    2
-        max_check_attempts      5
-}
-{% endif %}
-
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode Web UI on {{ namenode_hostname }}
-        servicegroups           HDFS
-        check_command           check_webui!namenode!{{ namenode_port }}
-        _host_component         NAMENODE
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode process on {{ namenode_hostname }}
-        servicegroups           HDFS
-        check_command           check_tcp_wrapper!{{ namenode_metadata_port }}!-w 1 -c 1
-        _host_component         NAMENODE
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     HDFS::NameNode RPC latency on {{ namenode_hostname }}
-        servicegroups           HDFS
-        check_command           check_rpcq_latency!NameNode!{{ namenode_port }}!3000!5000!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        _host_component         NAMENODE
-        normal_check_interval   5
-        retry_check_interval    1
-        max_check_attempts      5
-}
-
-{%  endfor  %}
-
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::Blocks health
-        servicegroups           HDFS
-        check_command           check_hdfs_blocks!$HOSTGROUPMEMBERS:namenode$!{{ namenode_port }}!{{ nn_metrics_property }}!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        _host_component         DATANODE
-        normal_check_interval   2
-        retry_check_interval    1 
-        max_check_attempts      1
-}
-
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::HDFS capacity utilization
-        servicegroups           HDFS
-        check_command           check_hdfs_capacity!$HOSTGROUPMEMBERS:namenode$!{{ namenode_port }}!80%!90%!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        _host_component         DATANODE
-        normal_check_interval   2
-        retry_check_interval    1 
-        max_check_attempts      1
-}
-
-{% endif %}
-
-# MAPREDUCE Checks
-{% if hostgroup_defs['jobtracker'] %}
-define service {
-        hostgroup_name          jobtracker
-        use                     hadoop-service
-        service_description     JOBTRACKER::JobTracker Web UI
-        servicegroups           MAPREDUCE
-        check_command           check_webui!jobtracker!{{ jtnode_port }}
-        _host_component         JOBTRACKER
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-
-define service {
-        hostgroup_name          jobhistory
-        use                     hadoop-service
-        service_description     JOBTRACKER::HistoryServer Web UI
-        servicegroups           MAPREDUCE
-        check_command           check_webui!jobhistory!{{ jobhistory_port }}
-        _host_component         HISTORYSERVER
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-{% if check_cpu_on %}
-define service {
-        hostgroup_name          jobtracker
-        use                     hadoop-service
-        service_description     JOBTRACKER::JobTracker CPU utilization
-        servicegroups           MAPREDUCE
-        check_command           check_cpu!{{ jtnode_port }}!200%!250%!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        _host_component         JOBTRACKER
-        normal_check_interval   5
-        retry_check_interval    2 
-        max_check_attempts      5
-}
-{% endif %}
-
-define service {
-        hostgroup_name          jobtracker
-        use                     hadoop-service
-        use                     hadoop-service
-        service_description     JOBTRACKER::JobTracker process
-        servicegroups           MAPREDUCE
-        check_command           check_tcp_wrapper!{{ jtnode_port }}!-w 1 -c 1
-        _host_component         JOBTRACKER
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-
-define service {
-        hostgroup_name          jobtracker
-        use                     hadoop-service
-        service_description     MAPREDUCE::JobTracker RPC latency
-        servicegroups           MAPREDUCE
-        check_command           check_rpcq_latency!JobTracker!{{ jtnode_port }}!3000!5000!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        _host_component         JOBTRACKER
-        normal_check_interval   5
-        retry_check_interval    1 
-        max_check_attempts      5
-}
-
-{% if hostgroup_defs['tasktracker-servers'] %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     MAPREDUCE::Percent TaskTrackers live
-        servicegroups           MAPREDUCE
-        check_command           check_aggregate!"TASKTRACKER::TaskTracker process"!10%!30%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-
-# MAPREDUCE::TASKTRACKER Checks 
-define service {
-        hostgroup_name          tasktracker-servers
-        use                     hadoop-service
-        service_description     TASKTRACKER::TaskTracker process
-        servicegroups           MAPREDUCE
-        check_command           check_tcp_wrapper!{{ tasktracker_port }}!-w 1 -c 1
-        _host_component         TASKTRACKER
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-# MAPREDUCE::TASKTRACKER Mapreduce local dir used space
-define service {
-        hostgroup_name          tasktracker-servers
-        use                     hadoop-service
-        service_description     ::MapReduce local dir space
-        servicegroups           MAPREDUCE
-        check_command           check_mapred_local_dir_used_space!{{ mapred_local_dir }}!85%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-
-{% endif %}
-{% endif %}
-
-
-{% if hostgroup_defs['slaves'] %}
-# HDFS::DATANODE Checks
-define service {
-        hostgroup_name          slaves
-        use                     hadoop-service
-        service_description     DATANODE::DataNode process
-        servicegroups           HDFS
-        check_command           check_tcp_wrapper!{{datanode_port}}!-w 1 -c 1
-        _host_component         DATANODE
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-define service {
-        hostgroup_name          slaves
-        use                     hadoop-service
-        service_description     DATANODE::DataNode space
-        servicegroups           HDFS
-        check_command           check_datanode_storage!{{ datanode_port }}!90%!90%!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        _host_component         DATANODE
-        normal_check_interval   2
-        retry_check_interval    1
-        max_check_attempts      2
-}
-
-{% endif %}
-
-{% if hostgroup_defs['flume-servers'] %}
-# FLUME Checks
-define service {
-        hostgroup_name          flume-servers
-        use                     hadoop-service
-        service_description     FLUME::Flume Agent process
-        servicegroups           FLUME
-        check_command           check_tcp_wrapper!{{ flume_port }}!-w 1 -c 1
-        _host_component         FLUME
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-
-
-{% if hostgroup_defs['zookeeper-servers'] %}
-# ZOOKEEPER Checks
-define service {
-        hostgroup_name          zookeeper-servers
-        use                     hadoop-service
-        service_description     ZOOKEEPER::ZooKeeper Server process
-        servicegroups           ZOOKEEPER
-        check_command           check_tcp_wrapper!{{ clientPort }}!-w 1 -c 1
-        _host_component         ZOOKEEPER_SERVER
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-
-{% if hostgroup_defs['hbasemasters'] and hostgroup_defs['region-servers'] != None %}
-# HBASE::REGIONSERVER Checks
-define service {
-        hostgroup_name          region-servers
-        use                     hadoop-service
-        service_description     REGIONSERVER::RegionServer process
-        servicegroups           HBASE
-        check_command           check_tcp_wrapper!{{ hbase_rs_port }}!-w 1 -c 1
-        _host_component         HBASE_REGIONSERVER
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-{% if hostgroup_defs['hbasemasters'] %}
-{% if check_cpu_on %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HBASEMASTER::HBase Master CPU utilization
-        servicegroups           HBASE
-        check_command           check_cpu_ha!{{ hbase_master_hosts_in_str }}!{{ hbase_master_port }}!200%!250%!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        _host_component         HBASE_MASTER
-        normal_check_interval   5
-        retry_check_interval    2
-        max_check_attempts      5
-}
-{%  endif %}
-{%  endif %}
-
-{%  for hbasemaster in hbase_master_hosts %}
-define service {
-        host_name               {{ hbasemaster }}
-        use                     hadoop-service
-        service_description     HBASEMASTER::HBase Master process on {{ hbasemaster }}
-        servicegroups           HBASE
-        check_command           check_tcp_wrapper!{{ hbase_master_rpc_port }}!-w 1 -c 1
-        _host_component         HBASE_MASTER
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endfor %}
-{% endif %}
-
-{% if hostgroup_defs['hiveserver'] %}
-# HIVE Metastore check
-define service {
-        hostgroup_name          hiveserver
-        use                     hadoop-service
-        service_description     HIVE-METASTORE::Hive Metastore process
-        servicegroups           HIVE
-        check_command           check_tcp_wrapper!{{ hive_metastore_port }}!-w 1 -c 1
-        _host_component         HIVE_METASTORE
-        normal_check_interval   0.5
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-# HIVE Server check
-define service {
-        hostgroup_name          hiveserver
-        use                     hadoop-service
-        service_description     HIVE-SERVER::HiveServer2 process
-        servicegroups           HIVE
-        check_command           check_tcp_wrapper_sasl!{{ hive_server_port }}!{{ '--security-enabled' if security_enabled else '' }}!-w 1 -c 1
-        _host_component         HIVE_SERVER
-        normal_check_interval   0.5
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-{% if hostgroup_defs['oozie-server'] %}
-# Oozie check
-define service {
-        hostgroup_name          oozie-server
-        use                     hadoop-service
-        service_description     OOZIE::Oozie Server status
-        servicegroups           OOZIE
-        {% if security_enabled %}
-        check_command           check_oozie_status!{{ oozie_server_port }}!{{ java64_home }}!true!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
-        {% else %}
-        check_command           check_oozie_status!{{ oozie_server_port }}!{{ java64_home }}!false
-        {% endif %}
-        _host_component         OOZIE_SERVER
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-{% endif %}
-{% if hostgroup_defs['webhcat-server'] %}
-# WEBHCAT check
-define service {
-        hostgroup_name          webhcat-server
-        use                     hadoop-service
-        service_description     WEBHCAT::WebHCat Server status
-        servicegroups           HIVE
-        {% if security_enabled %}
-        check_command           check_templeton_status!{{ templeton_port }}!v1!{{ str(security_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
-        {% else %}
-        check_command           check_templeton_status!{{ templeton_port }}!v1!false
-        {% endif %}
-        _host_component         WEBHCAT_SERVER
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-
-{% if hostgroup_defs['hue-server'] %}
-define service {
-        hostgroup_name          hue-server
-        use                     hadoop-service
-        service_description     HUE::Hue Server status
-        servicegroups           HUE
-        check_command           check_hue_status
-        _host_component         HUE
-        normal_check_interval   100
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-


[03/17] ambari git commit: AMBARI-8276 - Alerts: Remove Nagios Service From The Stack (jonathanhurley)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/resources/api_testscripts/curl-setup-multiple-hbase-master.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/api_testscripts/curl-setup-multiple-hbase-master.sh b/ambari-server/src/test/resources/api_testscripts/curl-setup-multiple-hbase-master.sh
index bf29e0d..ddbd8cd 100644
--- a/ambari-server/src/test/resources/api_testscripts/curl-setup-multiple-hbase-master.sh
+++ b/ambari-server/src/test/resources/api_testscripts/curl-setup-multiple-hbase-master.sh
@@ -32,11 +32,11 @@ echo AGENT_HOST2=$AGENT_HOST2
 echo '########### Create cluster ###########'
 curl -i -X POST -d "{\"Clusters\": {\"version\" : \"HDP-1.2.0\"}}" http://$SERVER_HOST:8080/api/v1/clusters/c1
 echo '########### Set service names ###########'
-curl -i -X POST -d "[{\"ServiceInfo\":{\"service_name\":\"HDFS\"}},{\"ServiceInfo\":{\"service_name\":\"MAPREDUCE\"}},{\"ServiceInfo\":{\"service_name\":\"NAGIOS\"}},{\"ServiceInfo\":{\"service_name\":\"GANGLIA\"}},{\"ServiceInfo\":{\"service_name\":\"HBASE\"}},{\"ServiceInfo\":{\"service_name\":\"ZOOKEEPER\"}}]" http://$SERVER_HOST:8080/api/v1/clusters/c1/services
+curl -i -X POST -d "[{\"ServiceInfo\":{\"service_name\":\"HDFS\"}},{\"ServiceInfo\":{\"service_name\":\"MAPREDUCE\"}},{\"ServiceInfo\":{\"service_name\":\"GANGLIA\"}},{\"ServiceInfo\":{\"service_name\":\"HBASE\"}},{\"ServiceInfo\":{\"service_name\":\"ZOOKEEPER\"}}]" http://$SERVER_HOST:8080/api/v1/clusters/c1/services
 echo '########### Create configs ###########'
 
 echo '########### Create global config ###########'
-curl -i -X POST -d "{\"type\":\"global\",\"tag\":\"version1\",\"properties\":{\"dfs_name_dir\":\"/hadoop/hdfs/namenode\",\"fs_checkpoint_dir\":\"/hadoop/hdfs/namesecondary\",\"dfs_data_dir\":\"/hadoop/hdfs/data\",\"hdfs_log_dir_prefix\":\"/var/log/hadoop\",\"hadoop_pid_dir_prefix\":\"/var/run/hadoop\",\"dfs_webhdfs_enabled\":false,\"hadoop_heapsize\":\"1024\",\"namenode_heapsize\":\"1024m\",\"namenode_opt_newsize\":\"200m\",\"namenode_opt_maxnewsize\":\"640m\",\"namenode_opt_permsize\":\"128m\",\"namenode_opt_maxpermsize\":\"256m\",\"datanode_du_reserved\":\"1\",\"dtnode_heapsize\":\"1024m\",\"dfs_datanode_failed_volume_tolerated\":\"0\",\"fs_checkpoint_period\":\"21600\",\"fs_checkpoint_size\":\"0.5\",\"dfs_exclude\":\"dfs.exclude\",\"dfs_include\":\"dfs.include\",\"dfs_replication\":\"3\",\"dfs_block_local_path_access_user\":\"hbase\",\"dfs_datanode_data_dir_perm\":\"750\",\"security_enabled\":false,\"kerberos_domain\":\"EXAMPLE.COM\",\"kadmin_pw\":\"\",\"keytab_path\":\"/etc/secu
 rity/keytabs\",\"namenode_formatted_mark_dir\":\"/var/run/hadoop/hdfs/namenode/formatted/\",\"hcat_conf_dir\":\"\",\"mapred_local_dir\":\"/hadoop/mapred\",\"mapred_system_dir\":\"/mapred/system\",\"scheduler_name\":\"org.apache.hadoop.mapred.CapacityTaskScheduler\",\"jtnode_opt_newsize\":\"200m\",\"jtnode_opt_maxnewsize\":\"200m\",\"jtnode_heapsize\":\"1024m\",\"mapred_map_tasks_max\":\"4\",\"mapred_red_tasks_max\":\"2\",\"mapred_cluster_map_mem_mb\":\"-1\",\"mapred_cluster_red_mem_mb\":\"-1\",\"mapred_cluster_max_map_mem_mb\":\"-1\",\"mapred_cluster_max_red_mem_mb\":\"-1\",\"mapred_job_map_mem_mb\":\"-1\",\"mapred_job_red_mem_mb\":\"-1\",\"mapred_child_java_opts_sz\":\"768\",\"io_sort_mb\":\"200\",\"io_sort_spill_percent\":\"0.9\",\"mapreduce_userlog_retainhours\":\"24\",\"maxtasks_per_job\":\"-1\",\"lzo_enabled\":false,\"snappy_enabled\":true,\"rca_enabled\":true,\"mapred_hosts_exclude\":\"mapred.exclude\",\"mapred_hosts_include\":\"mapred.include\",\"mapred_jobstatus_dir\":\"file
 :////mapred/jobstatus\",\"task_controller\":\"org.apache.hadoop.mapred.DefaultTaskController\",\"hbase_log_dir\":\"/var/log/hbase\",\"hbase_pid_dir\":\"/var/run/hbase\",\"hbase_regionserver_heapsize\":\"1024m\",\"hbase_regionserver_xmn_max\":\"512\",\"hbase_regionserver_xmn_ratio\":\"0.2\",\"hbase_master_heapsize\":\"1024m\",\"hstore_compactionthreshold\":\"3\",\"hfile_blockcache_size\":\"0.25\",\"hstorefile_maxsize\":\"1073741824\",\"regionserver_handlers\":\"30\",\"hregion_majorcompaction\":\"86400000\",\"hregion_blockmultiplier\":\"2\",\"hregion_memstoreflushsize\":\"134217728\",\"client_scannercaching\":\"100\",\"zookeeper_sessiontimeout\":\"60000\",\"hfile_max_keyvalue_size\":\"10485760\",\"hbase_hdfs_root_dir\":\"/apps/hbase/data\",\"hbase_tmp_dir\":\"/var/log/hbase\",\"hdfs_enable_shortcircuit_read\":true,\"hdfs_enable_shortcircuit_skipchecksum\":false,\"hdfs_support_append\":true,\"hstore_blockingstorefiles\":7,\"regionserver_memstore_lab\":true,\"regionserver_memstore_lower
 limit\":\"0.35\",\"regionserver_memstore_upperlimit\":\"0.4\",\"zk_data_dir\":\"/hadoop/zookeeper\",\"zk_log_dir\":\"/var/log/zookeeper\",\"zk_pid_dir\":\"/var/run/zookeeper\",\"zk_pid_file\":\"/var/run/zookeeper/zookeeper_server.pid\",\"tickTime\":\"2000\",\"initLimit\":\"10\",\"syncLimit\":\"5\",\"clientPort\":\"2181\",\"nagios_user\":\"nagios\",\"nagios_group\":\"nagios\",\"nagios_web_login\":\"nagiosadmin\",\"nagios_web_password\":\"password\",\"nagios_contact\":\"x@x.x\",\"hbase_conf_dir\":\"/etc/hbase\",\"proxyuser_group\":\"users\",\"dfs_datanode_address\":\"50010\",\"dfs_datanode_http_address\":\"50075\",\"apache_artifacts_download_url\":\"\",\"ganglia_runtime_dir\":\"/var/run/ganglia/hdp\",\"gmetad_user\":\"nobody\",\"gmond_user\":\"nobody\",\"ganglia_shell_cmds_dir\":\"/usr/libexec/hdp/ganglia\",\"webserver_group\":\"apache\",\"java64_home\":\"/usr/jdk/jdk1.6.0_31\",\"run_dir\":\"/var/run/hadoop\",\"hadoop_conf_dir\":\"/etc/hadoop\",\"hcat_metastore_port\":\"/usr/lib/hcata
 log/share/hcatalog\",\"hcat_lib\":\"/usr/lib/hcatalog/share/hcatalog\",\"hcat_dbroot\":\"/usr/lib/hcatalog/share/hcatalog\",\"hdfs_user\":\"hdfs\",\"mapred_user\":\"mapred\",\"hbase_user\":\"hbase\",\"hive_user\":\"hive\",\"hcat_user\":\"hcat\",\"webhcat_user\":\"hcat\",\"oozie_user\":\"oozie\",\"oozie_conf_dir\":\"/etc/oozie\",\"pig_conf_dir\":\"/etc/pig\",\"pig_user\":\"pig\",\"sqoop_conf_dir\":\"/etc/sqoop\",\"sqoop_lib\":\"/usr/lib/sqoop/lib/\",\"sqoop_user\":\"sqoop\",\"zk_user\":\"zookeeper\",\"user_group\":\"hadoop\",\"zk_conf_dir\":\"/etc/conf/\"}}" http://$SERVER_HOST:8080/api/v1/clusters/c1/configurations
+curl -i -X POST -d "{\"type\":\"global\",\"tag\":\"version1\",\"properties\":{\"dfs_name_dir\":\"/hadoop/hdfs/namenode\",\"fs_checkpoint_dir\":\"/hadoop/hdfs/namesecondary\",\"dfs_data_dir\":\"/hadoop/hdfs/data\",\"hdfs_log_dir_prefix\":\"/var/log/hadoop\",\"hadoop_pid_dir_prefix\":\"/var/run/hadoop\",\"dfs_webhdfs_enabled\":false,\"hadoop_heapsize\":\"1024\",\"namenode_heapsize\":\"1024m\",\"namenode_opt_newsize\":\"200m\",\"namenode_opt_maxnewsize\":\"640m\",\"namenode_opt_permsize\":\"128m\",\"namenode_opt_maxpermsize\":\"256m\",\"datanode_du_reserved\":\"1\",\"dtnode_heapsize\":\"1024m\",\"dfs_datanode_failed_volume_tolerated\":\"0\",\"fs_checkpoint_period\":\"21600\",\"fs_checkpoint_size\":\"0.5\",\"dfs_exclude\":\"dfs.exclude\",\"dfs_include\":\"dfs.include\",\"dfs_replication\":\"3\",\"dfs_block_local_path_access_user\":\"hbase\",\"dfs_datanode_data_dir_perm\":\"750\",\"security_enabled\":false,\"kerberos_domain\":\"EXAMPLE.COM\",\"kadmin_pw\":\"\",\"keytab_path\":\"/etc/secu
 rity/keytabs\",\"namenode_formatted_mark_dir\":\"/var/run/hadoop/hdfs/namenode/formatted/\",\"hcat_conf_dir\":\"\",\"mapred_local_dir\":\"/hadoop/mapred\",\"mapred_system_dir\":\"/mapred/system\",\"scheduler_name\":\"org.apache.hadoop.mapred.CapacityTaskScheduler\",\"jtnode_opt_newsize\":\"200m\",\"jtnode_opt_maxnewsize\":\"200m\",\"jtnode_heapsize\":\"1024m\",\"mapred_map_tasks_max\":\"4\",\"mapred_red_tasks_max\":\"2\",\"mapred_cluster_map_mem_mb\":\"-1\",\"mapred_cluster_red_mem_mb\":\"-1\",\"mapred_cluster_max_map_mem_mb\":\"-1\",\"mapred_cluster_max_red_mem_mb\":\"-1\",\"mapred_job_map_mem_mb\":\"-1\",\"mapred_job_red_mem_mb\":\"-1\",\"mapred_child_java_opts_sz\":\"768\",\"io_sort_mb\":\"200\",\"io_sort_spill_percent\":\"0.9\",\"mapreduce_userlog_retainhours\":\"24\",\"maxtasks_per_job\":\"-1\",\"lzo_enabled\":false,\"snappy_enabled\":true,\"rca_enabled\":true,\"mapred_hosts_exclude\":\"mapred.exclude\",\"mapred_hosts_include\":\"mapred.include\",\"mapred_jobstatus_dir\":\"file
 :////mapred/jobstatus\",\"task_controller\":\"org.apache.hadoop.mapred.DefaultTaskController\",\"hbase_log_dir\":\"/var/log/hbase\",\"hbase_pid_dir\":\"/var/run/hbase\",\"hbase_regionserver_heapsize\":\"1024m\",\"hbase_regionserver_xmn_max\":\"512\",\"hbase_regionserver_xmn_ratio\":\"0.2\",\"hbase_master_heapsize\":\"1024m\",\"hstore_compactionthreshold\":\"3\",\"hfile_blockcache_size\":\"0.25\",\"hstorefile_maxsize\":\"1073741824\",\"regionserver_handlers\":\"30\",\"hregion_majorcompaction\":\"86400000\",\"hregion_blockmultiplier\":\"2\",\"hregion_memstoreflushsize\":\"134217728\",\"client_scannercaching\":\"100\",\"zookeeper_sessiontimeout\":\"60000\",\"hfile_max_keyvalue_size\":\"10485760\",\"hbase_hdfs_root_dir\":\"/apps/hbase/data\",\"hbase_tmp_dir\":\"/var/log/hbase\",\"hdfs_enable_shortcircuit_read\":true,\"hdfs_enable_shortcircuit_skipchecksum\":false,\"hdfs_support_append\":true,\"hstore_blockingstorefiles\":7,\"regionserver_memstore_lab\":true,\"regionserver_memstore_lower
 limit\":\"0.35\",\"regionserver_memstore_upperlimit\":\"0.4\",\"zk_data_dir\":\"/hadoop/zookeeper\",\"zk_log_dir\":\"/var/log/zookeeper\",\"zk_pid_dir\":\"/var/run/zookeeper\",\"zk_pid_file\":\"/var/run/zookeeper/zookeeper_server.pid\",\"tickTime\":\"2000\",\"initLimit\":\"10\",\"syncLimit\":\"5\",\"clientPort\":\"2181\",\"hbase_conf_dir\":\"/etc/hbase\",\"proxyuser_group\":\"users\",\"dfs_datanode_address\":\"50010\",\"dfs_datanode_http_address\":\"50075\",\"apache_artifacts_download_url\":\"\",\"ganglia_runtime_dir\":\"/var/run/ganglia/hdp\",\"gmetad_user\":\"nobody\",\"gmond_user\":\"nobody\",\"ganglia_shell_cmds_dir\":\"/usr/libexec/hdp/ganglia\",\"webserver_group\":\"apache\",\"java64_home\":\"/usr/jdk/jdk1.6.0_31\",\"run_dir\":\"/var/run/hadoop\",\"hadoop_conf_dir\":\"/etc/hadoop\",\"hcat_metastore_port\":\"/usr/lib/hcatalog/share/hcatalog\",\"hcat_lib\":\"/usr/lib/hcatalog/share/hcatalog\",\"hcat_dbroot\":\"/usr/lib/hcatalog/share/hcatalog\",\"hdfs_user\":\"hdfs\",\"mapred_us
 er\":\"mapred\",\"hbase_user\":\"hbase\",\"hive_user\":\"hive\",\"hcat_user\":\"hcat\",\"webhcat_user\":\"hcat\",\"oozie_user\":\"oozie\",\"oozie_conf_dir\":\"/etc/oozie\",\"pig_conf_dir\":\"/etc/pig\",\"pig_user\":\"pig\",\"sqoop_conf_dir\":\"/etc/sqoop\",\"sqoop_lib\":\"/usr/lib/sqoop/lib/\",\"sqoop_user\":\"sqoop\",\"zk_user\":\"zookeeper\",\"user_group\":\"hadoop\",\"zk_conf_dir\":\"/etc/conf/\"}}" http://$SERVER_HOST:8080/api/v1/clusters/c1/configurations
 echo '########### Create core-site config ###########'
 curl -i -X POST -d "{\"type\":\"core-site\",\"tag\":\"version1\",\"properties\":{\"io.file.buffer.size\":\"131072\",\"io.serializations\":\"org.apache.hadoop.io.serializer.WritableSerialization\",\"io.compression.codec.lzo.class\":\"com.hadoop.compression.lzo.LzoCodec\",\"fs.trash.interval\":\"360\",\"ipc.client.idlethreshold\":\"8000\",\"ipc.client.connection.maxidletime\":\"30000\",\"ipc.client.connect.max.retries\":\"50\",\"webinterface.private.actions\":\"false\",\"fs.default.name\":\"hdfs://1.ambari.ua:8020\",\"fs.checkpoint.dir\":\"/hadoop/hdfs/namesecondary\",\"fs.checkpoint.period\":\"21600\",\"fs.checkpoint.size\":\"0.5\",\"fs.checkpoint.edits.dir\":\"/hadoop/hdfs/namesecondary\"}}" http://$SERVER_HOST:8080/api/v1/clusters/c1/configurations
 echo '########### Create hdfs-site config ###########'
@@ -49,8 +49,6 @@ echo '########### Attach configs to HDFS ###########'
 curl -i -X PUT -d "{\"config\":{\"global\":\"version1\",\"core-site\":\"version1\",\"hdfs-site\":\"version1\"}}" http://$SERVER_HOST:8080/api/v1/clusters/c1/services/HDFS
 echo '########### Attach configs to MAPREDUCE ###########'
 curl -i -X PUT -d "{\"config\":{\"global\":\"version1\",\"core-site\":\"version1\",\"mapred-site\":\"version1\"}}" http://$SERVER_HOST:8080/api/v1/clusters/c1/services/MAPREDUCE
-echo '########### Attach configs to NAGIOS ###########'
-curl -i -X PUT -d "{\"config\":{\"global\":\"version1\"}}" http://$SERVER_HOST:8080/api/v1/clusters/c1/services/NAGIOS
 echo '########### Attach configs to GANGLIA ###########'
 curl -i -X PUT -d "{\"config\":{\"global\":\"version1\"}}" http://$SERVER_HOST:8080/api/v1/clusters/c1/services/GANGLIA
 echo '########### Attach configs to HBASE ###########'
@@ -61,8 +59,6 @@ echo '########### Add components to HDFS ###########'
 curl -i -X POST -d "{\"components\":[{\"ServiceComponentInfo\":{\"component_name\":\"NAMENODE\"}},{\"ServiceComponentInfo\":{\"component_name\":\"SECONDARY_NAMENODE\"}},{\"ServiceComponentInfo\":{\"component_name\":\"DATANODE\"}},{\"ServiceComponentInfo\":{\"component_name\":\"HDFS_CLIENT\"}}]}" http://$SERVER_HOST:8080/api/v1/clusters/c1/services?ServiceInfo/service_name=HDFS
 echo '########### Add components to MAPREDUCE ###########'
 curl -i -X POST -d "{\"components\":[{\"ServiceComponentInfo\":{\"component_name\":\"JOBTRACKER\"}},{\"ServiceComponentInfo\":{\"component_name\":\"TASKTRACKER\"}},{\"ServiceComponentInfo\":{\"component_name\":\"MAPREDUCE_CLIENT\"}}]}" http://$SERVER_HOST:8080/api/v1/clusters/c1/services?ServiceInfo/service_name=MAPREDUCE
-echo '########### Add components to NAGIOS ###########'
-curl -i -X POST -d "{\"components\":[{\"ServiceComponentInfo\":{\"component_name\":\"NAGIOS_SERVER\"}}]}" http://$SERVER_HOST:8080/api/v1/clusters/c1/services?ServiceInfo/service_name=NAGIOS
 echo '########### Add components to GANGLIA ###########'
 curl -i -X POST -d "{\"components\":[{\"ServiceComponentInfo\":{\"component_name\":\"GANGLIA_SERVER\"}},{\"ServiceComponentInfo\":{\"component_name\":\"GANGLIA_MONITOR\"}}]}" http://$SERVER_HOST:8080/api/v1/clusters/c1/services?ServiceInfo/service_name=GANGLIA
 echo '########### Add components to HBASE ###########'
@@ -76,7 +72,6 @@ echo '########### Deploy components to hosts ###########'
 curl -i -X POST -d "{\"host_components\":[{\"HostRoles\":{\"component_name\":\"NAMENODE\"}}]}" http://$SERVER_HOST:8080/api/v1/clusters/c1/hosts?Hosts/host_name=$AGENT_HOST1
 curl -i -X POST -d "{\"host_components\":[{\"HostRoles\":{\"component_name\":\"SECONDARY_NAMENODE\"}}]}" http://$SERVER_HOST:8080/api/v1/clusters/c1/hosts?Hosts/host_name=$AGENT_HOST2
 curl -i -X POST -d "{\"host_components\":[{\"HostRoles\":{\"component_name\":\"JOBTRACKER\"}}]}" http://$SERVER_HOST:8080/api/v1/clusters/c1/hosts?Hosts/host_name=$AGENT_HOST2
-curl -i -X POST -d "{\"host_components\":[{\"HostRoles\":{\"component_name\":\"NAGIOS_SERVER\"}}]}" http://$SERVER_HOST:8080/api/v1/clusters/c1/hosts?Hosts/host_name=$AGENT_HOST1
 curl -i -X POST -d "{\"host_components\":[{\"HostRoles\":{\"component_name\":\"GANGLIA_SERVER\"}}]}" http://$SERVER_HOST:8080/api/v1/clusters/c1/hosts?Hosts/host_name=$AGENT_HOST1
 curl -i -X POST -d "{\"host_components\":[{\"HostRoles\":{\"component_name\":\"HBASE_MASTER\"}}]}" "http://$SERVER_HOST:8080/api/v1/clusters/c1/hosts?Hosts/host_name=$AGENT_HOST1|Hosts/host_name=$AGENT_HOST2"
 curl -i -X POST -d "{\"host_components\":[{\"HostRoles\":{\"component_name\":\"ZOOKEEPER_SERVER\"}}]}" http://$SERVER_HOST:8080/api/v1/clusters/c1/hosts?Hosts/host_name=$AGENT_HOST1

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/resources/deploy_HDP2.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/deploy_HDP2.sh b/ambari-server/src/test/resources/deploy_HDP2.sh
index f9bed8a..e9a9396 100644
--- a/ambari-server/src/test/resources/deploy_HDP2.sh
+++ b/ambari-server/src/test/resources/deploy_HDP2.sh
@@ -19,7 +19,7 @@ curl -i -X POST -d '{"Clusters": {"version" : "HDP-2.0.1"}}' -u admin:admin http
 echo "-----------------------Cluster-----------------------"
 curl -i -X POST -d '[{"ServiceInfo":{"service_name":"HDFS"}},{"ServiceInfo":{"service_name":"YARN"}}, {"ServiceInfo":{"service_name":"MAPREDUCEv2"}}, {"ServiceInfo":{"service_name":"TEZ"}}]' -u admin:admin http://localhost:8080/api/v1/clusters/c1/services
 echo "-----------------------Services-----------------------"
-curl -i -X PUT -d '{"Clusters":{"desired_configs":{"type":"global","tag":"version1","properties":{"hive_lib":"/usr/lib/hive/lib/","hadoop_heapsize":"1024","hive_log_dir":"/var/log/hive","dfs_datanode_http_address":"50075","zk_user":"zookeeper","dfs_data_dir":"/grid/0/hadoop/hdfs/data,/grid/1/hadoop/hdfs/data","clientPort":"2181","hdfs_user":"hdfs","hive_dbroot":"/usr/lib/hive/lib","dfs_name_dir":"/grid/0/hadoop/hdfs/namenode,/grid/1/hadoop/hdfs/namenode","hfile_blockcache_size":"0.25","kadmin_pw":"","gmond_user":"nobody","hregion_majorcompaction":"86400000","user_group":"hadoop","dfs_datanode_failed_volume_tolerated":"0","zk_pid_file":"/var/run/zookeeper/zookeeper_server.pid","zookeeper_sessiontimeout":"60000","hcat_user":"hcat","zk_log_dir":"/var/log/zookeeper","oozie_derby_database":"Derby","hbase_conf_dir":"/etc/hbase","oozie_data_dir":"/grid/0/hadoop/oozie/data","dfs_replication":"3","fs_checkpoint_period":"21600","hive_database_type":"mysql","hbase_hdfs_root_dir":"/apps/hbase/d
 ata","run_dir":"/var/run/hadoop","oozie_log_dir":"/var/log/oozie","hbase_pid_dir":"/var/run/hbase","hive_user":"hive","security_enabled":"false","dfs_datanode_address":"50010","dfs_block_local_path_access_user":"hbase","dfs_datanode_data_dir_perm":"750","nagios_web_password":"admin","dtnode_heapsize":"1024m","tickTime":"2000","oozie_database":"New Derby Database","regionserver_memstore_upperlimit":"0.4","datanode_du_reserved":"1","hbase_tmp_dir":"/var/log/hbase","java64_home":"/usr/jdk/jdk1.6.0_31","oozie_user":"oozie","hive_metastore_port":"9083","namenode_heapsize":"1024m","nagios_contact":"a@d.m","oozie_JPAService_url":"${oozie.data.dir}/${oozie.db.schema.name}-db;create\u003dtrue","hive_ambari_database":"MySQL","oozie_jdbc_driver":"org.apache.derby.jdbc.EmbeddedDriver","zk_data_dir":"/grid/0/hadoop/zookeeper","hive_pid_dir":"/var/run/hive","mysql_connector_url":"${download_url}/mysql-connector-java-5.1.18.zip","hregion_blockmultiplier":"2","oozie_pid_dir":"/var/run/oozie","gmeta
 d_user":"nobody","oozie_metastore_user_name":"oozie","hive_metastore_user_passwd":"admin","hcat_log_dir":"/var/log/webhcat","hive_hostname":"HOST","syncLimit":"5","mapred_user":"mapred","fs_checkpoint_size":"0.5","initLimit":"10","hive_database":"New MySQL Database","hive_jdbc_driver":"com.mysql.jdbc.Driver","hive_conf_dir":"/etc/hive/conf","hdfs_log_dir_prefix":"/var/log/hadoop","keytab_path":"/etc/security/keytabs","proxyuser_group":"users","hive_database_name":"hive","client_scannercaching":"100","hcat_pid_dir":"/var/run/webhcat","hdfs_enable_shortcircuit_read":"true","kerberos_domain":"EXAMPLE.COM","nagios_group":"nagios","hdfs_support_append":"true","nagios_web_login":"nagiosadmin","oozie_database_type":"derby","namenode_formatted_mark_dir":"/var/run/hadoop/hdfs/namenode/formatted/","dfs_exclude":"dfs.exclude","namenode_opt_maxnewsize":"640m","oozie_database_name":"oozie","regionserver_memstore_lab":"true","namenode_opt_newsize":"200m","namenode_opt_permsize":"128m","namenode_o
 pt_maxpermsize":"256m","smokeuser":"ambari-qa","nagios_user":"nagios","hcat_conf_dir":"","regionserver_memstore_lowerlimit":"0.35","apache_artifacts_download_url":"","hive_metastore_user_name":"hive","hstore_blockingstorefiles":"7","hadoop_conf_dir":"/etc/hadoop","oozie_metastore_user_passwd":"admin","hbase_user":"hbase","ganglia_runtime_dir":"/var/run/ganglia/hdp","fs_checkpoint_dir":"/grid/0/hadoop/hdfs/namesecondary","zk_pid_dir":"/var/run/zookeeper","hfile_max_keyvalue_size":"10485760","hive_aux_jars_path":"/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar","hstore_compactionthreshold":"3","hregion_memstoreflushsize":"134217728","hadoop_pid_dir_prefix":"/var/run/hadoop","hbase_log_dir":"/var/log/hbase","webhcat_user":"hcat","regionserver_handlers":"30","hbase_regionserver_heapsize":"1024m","hbase_regionserver_xmn_max":"512","hbase_regionserver_xmn_ratio":"0.2","dfs_include":"dfs.include","dfs_webhdfs_enabled":"true","rrdcached_base_dir":"/var/lib/ganglia/rrds","hstorefile_maxsi
 ze":"1073741824","hbase_master_heapsize":"1024m"}}}}' -u admin:admin http://localhost:8080/api/v1/clusters/c1
+curl -i -X PUT -d '{"Clusters":{"desired_configs":{"type":"global","tag":"version1","properties":{"hive_lib":"/usr/lib/hive/lib/","hadoop_heapsize":"1024","hive_log_dir":"/var/log/hive","dfs_datanode_http_address":"50075","zk_user":"zookeeper","dfs_data_dir":"/grid/0/hadoop/hdfs/data,/grid/1/hadoop/hdfs/data","clientPort":"2181","hdfs_user":"hdfs","hive_dbroot":"/usr/lib/hive/lib","dfs_name_dir":"/grid/0/hadoop/hdfs/namenode,/grid/1/hadoop/hdfs/namenode","hfile_blockcache_size":"0.25","kadmin_pw":"","gmond_user":"nobody","hregion_majorcompaction":"86400000","user_group":"hadoop","dfs_datanode_failed_volume_tolerated":"0","zk_pid_file":"/var/run/zookeeper/zookeeper_server.pid","zookeeper_sessiontimeout":"60000","hcat_user":"hcat","zk_log_dir":"/var/log/zookeeper","oozie_derby_database":"Derby","hbase_conf_dir":"/etc/hbase","oozie_data_dir":"/grid/0/hadoop/oozie/data","dfs_replication":"3","fs_checkpoint_period":"21600","hive_database_type":"mysql","hbase_hdfs_root_dir":"/apps/hbase/d
 ata","run_dir":"/var/run/hadoop","oozie_log_dir":"/var/log/oozie","hbase_pid_dir":"/var/run/hbase","hive_user":"hive","security_enabled":"false","dfs_datanode_address":"50010","dfs_block_local_path_access_user":"hbase","dfs_datanode_data_dir_perm":"750","dtnode_heapsize":"1024m","tickTime":"2000","oozie_database":"New Derby Database","regionserver_memstore_upperlimit":"0.4","datanode_du_reserved":"1","hbase_tmp_dir":"/var/log/hbase","java64_home":"/usr/jdk/jdk1.6.0_31","oozie_user":"oozie","hive_metastore_port":"9083","namenode_heapsize":"1024m","oozie_JPAService_url":"${oozie.data.dir}/${oozie.db.schema.name}-db;create\u003dtrue","hive_ambari_database":"MySQL","oozie_jdbc_driver":"org.apache.derby.jdbc.EmbeddedDriver","zk_data_dir":"/grid/0/hadoop/zookeeper","hive_pid_dir":"/var/run/hive","mysql_connector_url":"${download_url}/mysql-connector-java-5.1.18.zip","hregion_blockmultiplier":"2","oozie_pid_dir":"/var/run/oozie","gmetad_user":"nobody","oozie_metastore_user_name":"oozie","h
 ive_metastore_user_passwd":"admin","hcat_log_dir":"/var/log/webhcat","hive_hostname":"HOST","syncLimit":"5","mapred_user":"mapred","fs_checkpoint_size":"0.5","initLimit":"10","hive_database":"New MySQL Database","hive_jdbc_driver":"com.mysql.jdbc.Driver","hive_conf_dir":"/etc/hive/conf","hdfs_log_dir_prefix":"/var/log/hadoop","keytab_path":"/etc/security/keytabs","proxyuser_group":"users","hive_database_name":"hive","client_scannercaching":"100","hcat_pid_dir":"/var/run/webhcat","hdfs_enable_shortcircuit_read":"true","kerberos_domain":"EXAMPLE.COM","hdfs_support_append":"true","oozie_database_type":"derby","namenode_formatted_mark_dir":"/var/run/hadoop/hdfs/namenode/formatted/","dfs_exclude":"dfs.exclude","namenode_opt_maxnewsize":"640m","oozie_database_name":"oozie","regionserver_memstore_lab":"true","namenode_opt_newsize":"200m","namenode_opt_permsize":"128m","namenode_opt_maxpermsize":"256m","smokeuser":"ambari-qa","hcat_conf_dir":"","regionserver_memstore_lowerlimit":"0.35","apa
 che_artifacts_download_url":"","hive_metastore_user_name":"hive","hstore_blockingstorefiles":"7","hadoop_conf_dir":"/etc/hadoop","oozie_metastore_user_passwd":"admin","hbase_user":"hbase","ganglia_runtime_dir":"/var/run/ganglia/hdp","fs_checkpoint_dir":"/grid/0/hadoop/hdfs/namesecondary","zk_pid_dir":"/var/run/zookeeper","hfile_max_keyvalue_size":"10485760","hive_aux_jars_path":"/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar","hstore_compactionthreshold":"3","hregion_memstoreflushsize":"134217728","hadoop_pid_dir_prefix":"/var/run/hadoop","hbase_log_dir":"/var/log/hbase","webhcat_user":"hcat","regionserver_handlers":"30","hbase_regionserver_heapsize":"1024m","hbase_regionserver_xmn_max":"512","hbase_regionserver_xmn_ratio":"0.2","dfs_include":"dfs.include","dfs_webhdfs_enabled":"true","rrdcached_base_dir":"/var/lib/ganglia/rrds","hstorefile_maxsize":"1073741824","hbase_master_heapsize":"1024m"}}}}' -u admin:admin http://localhost:8080/api/v1/clusters/c1
 echo "-----------------------Global configs-----------------------"
 curl -i -X PUT -d `echo '{"Clusters":{"desired_configs":{"type":"core-site","tag":"version1","properties":{"io.file.buffer.size":"131072","io.serializations":"org.apache.hadoop.io.serializer.WritableSerialization","io.compression.codecs":"","io.compression.codec.lzo.class":"com.hadoop.compression.lzo.LzoCodec","fs.default.name":"hdfs://HOST:8020","fs.trash.interval":"360","fs.checkpoint.dir":"","fs.checkpoint.edits.dir":"${fs.checkpoint.dir}","fs.checkpoint.period":"21600","fs.checkpoint.size":"536870912","ipc.client.idlethreshold":"8000","ipc.client.connection.maxidletime":"30000","ipc.client.connect.max.retries":"50","webinterface.private.actions":"false","hadoop.security.authentication":"","hadoop.security.authorization":"","hadoop.security.auth_to_local":""}}}}' | sed s/HOST/$HOST/g` -u admin:admin http://localhost:8080/api/v1/clusters/c1
 echo "-----------------------core-site-----------------------"

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/resources/gsInstaller-hosts.txt
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/gsInstaller-hosts.txt b/ambari-server/src/test/resources/gsInstaller-hosts.txt
index 7d4b74c..1bd85f4 100644
--- a/ambari-server/src/test/resources/gsInstaller-hosts.txt
+++ b/ambari-server/src/test/resources/gsInstaller-hosts.txt
@@ -32,4 +32,3 @@ OOZIE OOZIE_CLIENT ip-10-190-97-104.ec2.internal
 OOZIE OOZIE_SERVER ip-10-8-113-183.ec2.internal
 GANGLIA GANGLIA ip-10-190-97-104.ec2.internal
 GANGLIA GANGLIA_MONITOR ip-10-190-97-104.ec2.internal
-NAGIOS NAGIOS_SERVER ip-10-190-97-104.ec2.internal

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/resources/nagios_alerts.txt
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/nagios_alerts.txt b/ambari-server/src/test/resources/nagios_alerts.txt
deleted file mode 100644
index e5f04d6..0000000
--- a/ambari-server/src/test/resources/nagios_alerts.txt
+++ /dev/null
@@ -1,605 +0,0 @@
-{
-    "alerts": [
-        {
-            "service_description": "Ambari Agent process",
-            "host_name": "c6401.ambari.apache.org",
-            "current_attempt": "1",
-            "current_state": "0",
-            "plugin_output": "TCP OK - 0.000 second response time on port 8670",
-            "last_hard_state_change": "1389125372",
-            "last_hard_state": "0",
-            "last_time_ok": "1389288692",
-            "last_time_warning": "0",
-            "last_time_unknown": "0",
-            "last_time_critical": "0",
-            "is_flapping": "0",
-            "last_check": "1389288692",
-            "service_type": "AMBARI"
-        },
-        {
-            "service_description": "Ganglia Monitor process for HBase Master",
-            "host_name": "c6401.ambari.apache.org",
-            "current_attempt": "1",
-            "current_state": "0",
-            "plugin_output": "TCP OK - 0.032 second response time on port 8663",
-            "last_hard_state_change": "1389125380",
-            "last_hard_state": "0",
-            "last_time_ok": "1389288714",
-            "last_time_warning": "0",
-            "last_time_unknown": "0",
-            "last_time_critical": "0",
-            "is_flapping": "0",
-            "last_check": "1389288714",
-            "service_type": "GANGLIA"
-        },
-        {
-            "service_description": "Ganglia Monitor process for JobTracker",
-            "host_name": "c6401.ambari.apache.org",
-            "current_attempt": "1",
-            "current_state": "0",
-            "plugin_output": "TCP OK - 0.001 second response time on port 8662",
-            "last_hard_state_change": "1389125389",
-            "long_plugin_output": "AMBARIPASSIVE=2\n",
-            "last_hard_state": "0",
-            "last_time_ok": "1389288714",
-            "last_time_warning": "0",
-            "last_time_unknown": "0",
-            "last_time_critical": "0",
-            "is_flapping": "0",
-            "last_check": "1389288714",
-            "service_type": "GANGLIA"
-        },
-        {
-            "service_description": "Ganglia Monitor process for NameNode",
-            "host_name": "c6401.ambari.apache.org",
-            "current_attempt": "1",
-            "current_state": "0",
-            "plugin_output": "TCP OK - 0.001 second response time on port 8661",
-            "last_hard_state_change": "1389125397",
-            "last_hard_state": "0",
-            "last_time_ok": "1389288714",
-            "last_time_warning": "0",
-            "last_time_unknown": "0",
-            "last_time_critical": "0",
-            "is_flapping": "0",
-            "last_check": "1389288714",
-            "service_type": "GANGLIA"
-        },
-        {
-            "service_description": "Ganglia Server process",
-            "host_name": "c6401.ambari.apache.org",
-            "current_attempt": "1",
-            "current_state": "0",
-            "plugin_output": "TCP OK - 0.000 second response time on port 8651",
-            "last_hard_state_change": "1389125406",
-            "last_hard_state": "0",
-            "last_time_ok": "1389288714",
-            "last_time_warning": "0",
-            "last_time_unknown": "0",
-            "last_time_critical": "0",
-            "is_flapping": "0",
-            "last_check": "1389288714",
-            "service_type": "GANGLIA"
-        },
-        {
-            "service_description": "Percent RegionServers live",
-            "host_name": "c6401.ambari.apache.org",
-            "current_attempt": "3",
-            "current_state": "2",
-            "plugin_output": "CRITICAL: total:&lt;1&gt;, affected:&lt;1&gt;",
-            "last_hard_state_change": "1389145364",
-            "last_hard_state": "2",
-            "last_time_ok": "1389057717",
-            "last_time_warning": "0",
-            "last_time_unknown": "0",
-            "last_time_critical": "1389288711",
-            "is_flapping": "0",
-            "last_check": "1389288711",
-            "service_type": "HBASE"
-        },
-        {
-            "service_description": "HBase Master CPU utilization on c6401.ambari.apache.org",
-            "host_name": "c6401.ambari.apache.org",
-            "current_attempt": "1",
-            "current_state": "0",
-            "plugin_output": "1 CPU, load 7.0% &lt; 200% : OK",
-            "last_hard_state_change": "",
-            "last_hard_state": "0",
-            "last_time_ok": "1389288443",
-            "last_time_warning": "0",
-            "last_time_unknown": "1389034859",
-            "last_time_critical": "0",
-            "is_flapping": "0",
-            "last_check": "1389288443",
-            "service_type": "HBASE"
-        },
-        {
-            "service_description": "HBase Master process on c6401.ambari.apache.org",
-            "host_name": "c6401.ambari.apache.org",
-            "current_attempt": "4",
-            "current_state": "2",
-            "plugin_output": "Connection refused",
-            "last_hard_state_change": "1389158776",
-            "last_hard_state": "2",
-            "last_time_ok": "1389100994",
-            "last_time_warning": "0",
-            "last_time_unknown": "0",
-            "last_time_critical": "1389288714",
-            "is_flapping": "0",
-            "last_check": "1389288714",
-            "service_type": "HBASE"
-        },
-        {
-            "service_description": "Blocks health",
-            "host_name": "c6401.ambari.apache.org",
-            "current_attempt": "1",
-            "current_state": "0",
-            "plugin_output": "OK: corrupt_blocks:&lt;0&gt;, missing_blocks:&lt;0&gt;, total_blocks:&lt;17&gt;",
-            "last_hard_state_change": "1389125440",
-            "last_hard_state": "0",
-            "last_time_ok": "1389288640",
-            "last_time_warning": "0",
-            "last_time_unknown": "0",
-            "last_time_critical": "0",
-            "is_flapping": "0",
-            "last_check": "1389288640",
-            "service_type": "HDFS"
-        },
-        {
-            "service_description": "HDFS capacity utilization",
-            "host_name": "c6401.ambari.apache.org",
-            "current_attempt": "1",
-            "current_state": "0",
-            "plugin_output": "OK: DFSUsedGB:&lt;0&gt;, DFSTotalGB:&lt;462.4&gt;",
-            "last_hard_state_change": "1389125448",
-            "last_hard_state": "0",
-            "last_time_ok": "1389288648",
-            "last_time_warning": "0",
-            "last_time_unknown": "0",
-            "last_time_critical": "0",
-            "is_flapping": "0",
-            "last_check": "1389288648",
-            "service_type": "HDFS"
-        },
-        {
-            "service_description": "NameNode RPC latency on c6401.ambari.apache.org",
-            "host_name": "c6401.ambari.apache.org",
-            "current_attempt": "1",
-            "current_state": "0",
-            "plugin_output": "OK: RpcQueueTime_avg_time:&lt;0&gt; Secs, RpcProcessingTime_avg_time:&lt;0&gt; Secs",
-            "last_hard_state_change": "1389125457",
-            "last_hard_state": "0",
-            "last_time_ok": "1389288664",
-            "last_time_warning": "0",
-            "last_time_unknown": "0",
-            "last_time_critical": "0",
-            "is_flapping": "0",
-            "last_check": "1389288664",
-            "service_type": "HDFS"
-        },
-        {
-            "service_description": "Percent DataNodes live",
-            "host_name": "c6401.ambari.apache.org",
-            "current_attempt": "1",
-            "current_state": "0",
-            "plugin_output": "OK: total:&lt;1&gt;, affected:&lt;0&gt;",
-            "last_hard_state_change": "1389125465",
-            "last_hard_state": "0",
-            "last_time_ok": "1389288714",
-            "last_time_warning": "0",
-            "last_time_unknown": "0",
-            "last_time_critical": "0",
-            "is_flapping": "0",
-            "last_check": "1389288714",
-            "service_type": "HDFS"
-        },
-        {
-            "service_description": "Percent DataNodes with space available",
-            "host_name": "c6401.ambari.apache.org",
-            "current_attempt": "1",
-            "current_state": "0",
-            "plugin_output": "OK: total:&lt;1&gt;, affected:&lt;0&gt;",
-            "last_hard_state_change": "1389125375",
-            "last_hard_state": "0",
-            "last_time_ok": "1389288684",
-            "last_time_warning": "0",
-            "last_time_unknown": "0",
-            "last_time_critical": "0",
-            "is_flapping": "0",
-            "last_check": "1389288684",
-            "service_type": "HDFS"
-        },
-        {
-            "service_description": "Percent TaskTrackers live",
-            "host_name": "c6401.ambari.apache.org",
-            "current_attempt": "1",
-            "current_state": "0",
-            "plugin_output": "OK: total:&lt;1&gt;, affected:&lt;0&gt;",
-            "last_hard_state_change": "1389125383",
-            "last_hard_state": "0",
-            "last_time_ok": "1389288703",
-            "last_time_warning": "0",
-            "last_time_unknown": "0",
-            "last_time_critical": "0",
-            "is_flapping": "0",
-            "last_check": "1389288703",
-            "service_type": "MAPREDUCE"
-        },
-        {
-            "service_description": "Nagios status log freshness",
-            "host_name": "c6401.ambari.apache.org",
-            "current_attempt": "1",
-            "current_state": "0",
-            "plugin_output": "NAGIOS OK: 5 processes, status log updated 8 seconds ago",
-            "last_hard_state_change": "1389125392",
-            "last_hard_state": "0",
-            "last_time_ok": "1389288592",
-            "last_time_warning": "0",
-            "last_time_unknown": "0",
-            "last_time_critical": "0",
-            "is_flapping": "0",
-            "last_check": "1389288592",
-            "service_type": "NAGIOS"
-        },
-        {
-            "service_description": "NameNode Web UI on c6401.ambari.apache.org",
-            "host_name": "c6401.ambari.apache.org",
-            "current_attempt": "1",
-            "current_state": "0",
-            "plugin_output": "OK: Successfully accessed namenode Web UI",
-            "last_hard_state_change": "1389125400",
-            "last_hard_state": "0",
-            "last_time_ok": "1389288720",
-            "last_time_warning": "0",
-            "last_time_unknown": "0",
-            "last_time_critical": "0",
-            "is_flapping": "0",
-            "last_check": "1389288720",
-            "service_type": "HDFS"
-        },
-        {
-            "service_description": "NameNode edit logs directory status on c6401.ambari.apache.org",
-            "host_name": "c6401.ambari.apache.org",
-            "current_attempt": "1",
-            "current_state": "0",
-            "plugin_output": "OK: All NameNode directories are active",
-            "last_hard_state_change": "1389125409",
-            "last_hard_state": "0",
-            "last_time_ok": "1389288714",
-            "last_time_warning": "0",
-            "last_time_unknown": "0",
-            "last_time_critical": "0",
-            "is_flapping": "0",
-            "last_check": "1389288714",
-            "service_type": "HDFS"
-        },
-        {
-            "service_description": "NameNode host CPU utilization on c6401.ambari.apache.org",
-            "host_name": "c6401.ambari.apache.org",
-            "current_attempt": "1",
-            "current_state": "0",
-            "plugin_output": "1 CPU, load 7.0% &lt; 200% : OK",
-            "last_hard_state_change": "1389125417",
-            "last_hard_state": "0",
-            "last_time_ok": "1389288437",
-            "last_time_warning": "0",
-            "last_time_unknown": "1389034853",
-            "last_time_critical": "0",
-            "is_flapping": "0",
-            "last_check": "1389288437",
-            "service_type": "HDFS"
-        },
-        {
-            "service_description": "NameNode process on c6401.ambari.apache.org",
-            "host_name": "c6401.ambari.apache.org",
-            "current_attempt": "1",
-            "current_state": "0",
-            "plugin_output": "TCP OK - 0.001 second response time on port 8020",
-            "last_hard_state_change": "1389125426",
-            "last_hard_state": "0",
-            "last_time_ok": "1389288714",
-            "last_time_warning": "0",
-            "last_time_unknown": "0",
-            "last_time_critical": "0",
-            "is_flapping": "0",
-            "last_check": "1389288714",
-            "service_type": "HDFS"
-        },
-        {
-            "service_description": "Percent ZooKeeper Servers live",
-            "host_name": "c6401.ambari.apache.org",
-            "current_attempt": "1",
-            "current_state": "0",
-            "plugin_output": "OK: total:&lt;3&gt;, affected:&lt;0&gt;",
-            "last_hard_state_change": "1389125434",
-            "last_hard_state": "0",
-            "last_time_ok": "1389288714",
-            "last_time_warning": "0",
-            "last_time_unknown": "0",
-            "last_time_critical": "0",
-            "is_flapping": "0",
-            "last_check": "1389288714",
-            "service_type": "ZOOKEEPER"
-        },
-        {
-            "service_description": "ZooKeeper Server process",
-            "host_name": "c6401.ambari.apache.org",
-            "current_attempt": "1",
-            "current_state": "0",
-            "plugin_output": "TCP OK - 0.004 second response time on port 2181",
-            "last_hard_state_change": "1389125443",
-            "last_hard_state": "0",
-            "last_time_ok": "1389288703",
-            "last_time_warning": "0",
-            "last_time_unknown": "0",
-            "last_time_critical": "0",
-            "is_flapping": "0",
-            "last_check": "1389288703",
-            "service_type": "ZOOKEEPER"
-        },
-        {
-            "service_description": "Ambari Agent process",
-            "host_name": "c6402.ambari.apache.org",
-            "current_attempt": "1",
-            "current_state": "0",
-            "plugin_output": "TCP OK - 0.000 second response time on port 8670",
-            "last_hard_state_change": "1389125451",
-            "last_hard_state": "0",
-            "last_time_ok": "1389288711",
-            "last_time_warning": "0",
-            "last_time_unknown": "0",
-            "last_time_critical": "0",
-            "is_flapping": "0",
-            "last_check": "1389288711",
-            "service_type": "AMBARI"
-        },
-        {
-            "service_description": "HistoryServer Web UI",
-            "host_name": "c6402.ambari.apache.org",
-            "current_attempt": "3",
-            "current_state": "1",
-            "plugin_output": "WARNING: HistoryServer Web UI not accessible : http://c6402.ambari.apache.org:51111/jobhistoryhome.jsp",
-            "last_hard_state_change": "1389125580",
-            "last_hard_state": "1",
-            "last_time_ok": "0",
-            "last_time_warning": "1389288720",
-            "last_time_unknown": "0",
-            "last_time_critical": "0",
-            "is_flapping": "0",
-            "last_check": "1389288720",
-            "service_type": "MAPREDUCE"
-        },
-        {
-            "service_description": "JobTracker CPU utilization",
-            "host_name": "c6402.ambari.apache.org",
-            "current_attempt": "1",
-            "current_state": "0",
-            "plugin_output": "1 CPU, load 2.0% &lt; 200% : OK",
-            "last_hard_state_change": "1389125468",
-            "last_hard_state": "0",
-            "last_time_ok": "1389288668",
-            "last_time_warning": "0",
-            "last_time_unknown": "0",
-            "last_time_critical": "0",
-            "is_flapping": "0",
-            "last_check": "1389288668",
-            "service_type": "MAPREDUCE"
-        },
-        {
-            "service_description": "JobTracker Web UI",
-            "host_name": "c6402.ambari.apache.org",
-            "current_attempt": "1",
-            "current_state": "0",
-            "plugin_output": "OK: Successfully accessed jobtracker Web UI",
-            "last_hard_state_change": "1389125378",
-            "last_hard_state": "0",
-            "last_time_ok": "1389288684",
-            "last_time_warning": "0",
-            "last_time_unknown": "0",
-            "last_time_critical": "0",
-            "is_flapping": "0",
-            "last_check": "1389288684",
-            "service_type": "MAPREDUCE"
-        },
-        {
-            "service_description": "JobTracker process",
-            "host_name": "c6402.ambari.apache.org",
-            "current_attempt": "1",
-            "current_state": "0",
-            "plugin_output": "TCP OK - 0.000 second response time on port 50030",
-            "last_hard_state_change": "1389125386",
-            "last_hard_state": "0",
-            "last_time_ok": "1389288713",
-            "last_time_warning": "0",
-            "last_time_unknown": "0",
-            "last_time_critical": "0",
-            "is_flapping": "0",
-            "last_check": "1389288713",
-            "service_type": "MAPREDUCE"
-        },
-        {
-            "service_description": "JobTracker RPC latency",
-            "host_name": "c6402.ambari.apache.org",
-            "current_attempt": "1",
-            "current_state": "0",
-            "plugin_output": "OK: RpcQueueTime_avg_time:&lt;0.09&gt; Secs, RpcProcessingTime_avg_time:&lt;0.03&gt; Secs",
-            "last_hard_state_change": "1389125395",
-            "last_hard_state": "0",
-            "last_time_ok": "1389288595",
-            "last_time_warning": "0",
-            "last_time_unknown": "0",
-            "last_time_critical": "0",
-            "is_flapping": "0",
-            "last_check": "1389288595",
-            "service_type": "MAPREDUCE"
-        },
-        {
-            "service_description": "Secondary NameNode process",
-            "host_name": "c6402.ambari.apache.org",
-            "current_attempt": "1",
-            "current_state": "0",
-            "plugin_output": "TCP OK - 0.001 second response time on port 50090",
-            "last_hard_state_change": "1389125403",
-            "last_hard_state": "0",
-            "last_time_ok": "1389288723",
-            "last_time_warning": "0",
-            "last_time_unknown": "0",
-            "last_time_critical": "0",
-            "is_flapping": "0",
-            "last_check": "1389288723",
-            "service_type": "HDFS"
-        },
-        {
-            "service_description": "ZooKeeper Server process",
-            "host_name": "c6402.ambari.apache.org",
-            "current_attempt": "1",
-            "current_state": "0",
-            "plugin_output": "TCP OK - 0.000 second response time on port 2181",
-            "last_hard_state_change": "1389125412",
-            "last_hard_state": "0",
-            "last_time_ok": "1389288672",
-            "last_time_warning": "0",
-            "last_time_unknown": "0",
-            "last_time_critical": "0",
-            "is_flapping": "0",
-            "last_check": "1389288672",
-            "service_type": "ZOOKEEPER"
-        },
-        {
-            "service_description": "MapReduce local dir space",
-            "host_name": "c6403.ambari.apache.org",
-            "current_attempt": "1",
-            "current_state": "0",
-            "plugin_output": "OK: MapReduce local dir space is available.",
-            "last_hard_state_change": "1389125420",
-            "last_hard_state": "0",
-            "last_time_ok": "1389288714",
-            "last_time_warning": "0",
-            "last_time_unknown": "0",
-            "last_time_critical": "0",
-            "is_flapping": "0",
-            "last_check": "1389288714",
-            "service_type": "UNKNOWN"
-        },
-        {
-            "service_description": "Ambari Agent process",
-            "host_name": "c6403.ambari.apache.org",
-            "current_attempt": "1",
-            "current_state": "0",
-            "plugin_output": "TCP OK - 0.001 second response time on port 8670",
-            "last_hard_state_change": "1389125428",
-            "last_hard_state": "0",
-            "last_time_ok": "1389288688",
-            "last_time_warning": "0",
-            "last_time_unknown": "0",
-            "last_time_critical": "0",
-            "is_flapping": "0",
-            "last_check": "1389288688",
-            "service_type": "AMBARI"
-        },
-        {
-            "service_description": "DataNode process",
-            "host_name": "c6403.ambari.apache.org",
-            "current_attempt": "1",
-            "current_state": "0",
-            "plugin_output": "TCP OK - 0.027 second response time on port 50075",
-            "last_hard_state_change": "1389125437",
-            "last_hard_state": "0",
-            "last_time_ok": "1389288684",
-            "last_time_warning": "0",
-            "last_time_unknown": "0",
-            "last_time_critical": "0",
-            "is_flapping": "0",
-            "last_check": "1389288684",
-            "service_type": "HDFS"
-        },
-        {
-            "service_description": "DataNode space",
-            "host_name": "c6403.ambari.apache.org",
-            "current_attempt": "1",
-            "current_state": "0",
-            "plugin_output": "OK: Capacity:[524208947200], Remaining Capacity:[496455069696], percent_full:[5.2944303320735]",
-            "last_hard_state_change": "1389125445",
-            "last_hard_state": "0",
-            "last_time_ok": "1389288645",
-            "last_time_warning": "0",
-            "last_time_unknown": "0",
-            "last_time_critical": "0",
-            "is_flapping": "0",
-            "last_check": "1389288645",
-            "service_type": "HDFS"
-        },
-        {
-            "service_description": "RegionServer process",
-            "host_name": "c6403.ambari.apache.org",
-            "current_attempt": "3",
-            "current_state": "2",
-            "plugin_output": "Connection refused",
-            "last_hard_state_change": "1389145384",
-            "last_hard_state": "2",
-            "last_time_ok": "1389057677",
-            "last_time_warning": "0",
-            "last_time_unknown": "0",
-            "last_time_critical": "1389288664",
-            "is_flapping": "0",
-            "last_check": "1389288664",
-            "service_type": "HBASE"
-        },
-        {
-            "service_description": "TaskTracker process",
-            "host_name": "c6403.ambari.apache.org",
-            "current_attempt": "1",
-            "current_state": "0",
-            "plugin_output": "TCP OK - 0.001 second response time on port 50060",
-            "last_hard_state_change": "1389125462",
-            "last_hard_state": "0",
-            "last_time_ok": "1389288722",
-            "last_time_warning": "0",
-            "last_time_unknown": "0",
-            "last_time_critical": "0",
-            "is_flapping": "0",
-            "last_check": "1389288722",
-            "service_type": "MAPREDUCE"
-        },
-        {
-            "service_description": "ZooKeeper Server process",
-            "host_name": "c6403.ambari.apache.org",
-            "current_attempt": "1",
-            "current_state": "0",
-            "plugin_output": "TCP OK - 0.000 second response time on port 2181",
-            "last_hard_state_change": "1389125471",
-            "last_hard_state": "0",
-            "last_time_ok": "1389288671",
-            "last_time_warning": "0",
-            "last_time_unknown": "0",
-            "last_time_critical": "0",
-            "is_flapping": "0",
-            "last_check": "1389288671",
-            "service_type": "ZOOKEEPER"
-        },
-        {
-          "service_description" : "Hive Metastore status",
-          "host_name" : "c6404.ambari.apache.org",
-          "current_attempt" : "1",
-          "current_state" : "0",
-          "plugin_output" : "CRITICAL: Error accessing Hive Metastore status [Exception in thread &quot;main&quot; java.lang.RuntimeException: java.lang.RuntimeException: Unable to instantiate org.apache.hadoop.hive.metastore.HiveMetaStoreClient",
-          "last_hard_state_change" : "1392736649",
-          "last_hard_state" : "0",
-          "last_time_ok" : "1392736799",
-          "last_time_warning" : "0",
-          "last_time_unknown" : "0",
-          "last_time_critical" : "1392736619",
-          "is_flapping" : "0",
-          "last_check" : "1392736799",
-          "long_plugin_output" : "at org.apache.hadoop.hive.ql.session.SessionState.start(SessionState.java:319)\\nat org.apache.hive.hcatalog.cli.HCatCli.main(HCatCli.java:138)\\nat sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\\nat sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)\\nat sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)\\nat java.lang.reflect.Method.invoke(Method.java:597)\\nat org.apache.hadoop.util.RunJar.main(RunJar.java:212)\\nCaused by: java.lang.RuntimeException: Unable to instantiate org.apache.hadoop.hive.metastore.HiveMetaStoreClient\\nat org.apache.hadoop.hive.metastore.MetaStoreUtils.newInstance(MetaStoreUtils.java:1345)\\nat org.apache.hadoop.hive.metastore.RetryingMetaStoreClient.&lt;init&gt;(RetryingMetaStoreClient.java:62)\\nat org.apache.hadoop.hive.metastore.RetryingMetaStoreClient.getProxy(RetryingMetaStoreClient.java:72)\\nat org.apache.hadoop.hive.ql.metadata.Hive.creat
 eMetaStoreClient(Hive.java:2434)\\nat org.apache.hadoop.hive.ql.metadata.Hive.getMSC(Hive.java:2446)\\nat org.apache.hadoop.hive.ql.session.SessionState.start(SessionState.java:313)\\n... 6 more\\nCaused by: java.lang.reflect.InvocationTargetException\\nat sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)\\nat sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:39)\\nat sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:27)\\nat java.lang.reflect.Constructor.newInstance(Constructor.java:513)\\nat org.apache.hadoop.hive.metastore.MetaStoreUtils.newInstance(MetaStoreUtils.java:1343)\\n... 11 more\\nCaused by: MetaException(message:Could not connect to meta store using any of the URIs provided. Most recent failure: org.apache.thrift.transport.TTransportException: java.net.ConnectException: Connection refused\\nat org.apache.thrift.transport.TSocket.open(TSocket.java:185)\\nat org.apac
 he.hadoop.hive.metastore.HiveMetaStoreClient.open(HiveMetaStoreClient.java:300)\\nat org.apache.hadoop.hive.metastore.HiveMetaStoreClient.&lt;init&gt;(HiveMetaStoreClient.java:181)\\nat sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)\\nat sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:39)\\nat sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:27)\\nat java.lang.reflect.Constructor.newInstance(Constructor.java:513)\\nat org.apache.hadoop.hive.metastore.MetaStoreUtils.newInstance(MetaStoreUtils.java:1343)\\nat org.apache.hadoop.hive.metastore.RetryingMetaStoreClient.&lt;init&gt;(RetryingMetaStoreClient.java:62)\\nat org.apache.hadoop.hive.metastore.RetryingMetaStoreClient.getProxy(RetryingMetaStoreClient.java:72)\\nat org.apache.hadoop.hive.ql.metadata.Hive.createMetaStoreClient(Hive.java:2434)\\nat org.apache.hadoop.hive.ql.metadata.Hive.getMSC(Hive.java:2446)\\nat org.apach
 e.hadoop.hive.ql.session.SessionState.start(SessionState.java:313)\\nat org.apache.hive.hcatalog.cli.HCatCli.main(HCatCli.java:138)\\nat sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\\nat sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)\\nat sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)\\nat java.lang.reflect.Method.invoke(Method.java:597)\\nat org.apache.hadoop.util.RunJar.main(RunJar.java:212)\\nCaused by: java.net.ConnectException: Connection refused\\nat java.net.PlainSocketImpl.socketConnect(Native Method)\\nat java.net.PlainSocketImpl.doConnect(PlainSocketImpl.java:351)\\nat java.net.PlainSocketImpl.connectToAddress(PlainSocketImpl.java:213)\\nat java.net.PlainSocketImpl.connect(PlainSocketImpl.java:200)\\nat java.net.SocksSocketImpl.connect(SocksSocketImpl.java:366)\\nat java.net.Socket.connect(Socket.java:529)\\nat org.apache.thrift.transport.TSocket.open(TSocket.java:180)\\n... 18 more\\n)\\n
 at org.apache.hadoop.hive.metastore.HiveMetaStoreClient.open(HiveMetaStoreClient.java:346)\\nat org.apache.hadoop.hive.metastore.HiveMetaStoreClient.&lt;init&gt;(HiveMetaStoreClient.java:181)\\n... 16 more]\\nAMBARIPASSIVE=2\\n",
-          "service_type" : "HIVE"
-        }
-    ],
-    "hostcounts": {
-        "up_hosts": "4",
-        "down_hosts": "0"
-    },
-    "servicestates": {
-        "PUPPET": "0"
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/resources/stacks/HDP/1.2.0/services/NAGIOS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.2.0/services/NAGIOS/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/1.2.0/services/NAGIOS/metainfo.xml
deleted file mode 100644
index 4b9872f..0000000
--- a/ambari-server/src/test/resources/stacks/HDP/1.2.0/services/NAGIOS/metainfo.xml
+++ /dev/null
@@ -1,95 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>NAGIOS</name>
-      <comment>Nagios Monitoring and Alerting system</comment>
-      <version>3.2.3</version>
-
-      <components>
-        <component>
-          <name>NAGIOS_SERVER</name>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <commandScript>
-            <script>scripts/nagios_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>perl</name>
-            </package>
-            <package>
-              <name>perl-Net-SNMP</name>
-            </package>
-            <package>
-              <name>nagios-plugins-1.4.9</name>
-            </package>
-            <package>
-              <name>nagios-3.5.0-99</name>
-            </package>
-            <package>
-              <name>nagios-www-3.5.0-99</name>
-            </package>
-            <package>
-              <name>nagios-devel-3.5.0-99</name>
-            </package>
-            <package>
-              <name>fping</name>
-            </package>
-            <package>
-              <name>hdp_mon_nagios_addons</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>suse</osFamily>
-          <package>
-            <name>php5-json</name>
-          </package>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>centos5</osFamily>
-          <package>
-            <name>php-pecl-json.x86_64</name>
-          </package>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>redhat5</osFamily>
-          <package>
-            <name>php-pecl-json.x86_64</name>
-          </package>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>oraclelinux5</osFamily>
-          <package>
-            <name>php-pecl-json.x86_64</name>
-          </package>
-        </osSpecific>
-      </osSpecifics>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/NAGIOS/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/NAGIOS/configuration/global.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/NAGIOS/configuration/global.xml
deleted file mode 100644
index 61a2b90..0000000
--- a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/NAGIOS/configuration/global.xml
+++ /dev/null
@@ -1,50 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>nagios_user</name>
-    <value>nagios</value>
-    <description>Nagios Username.</description>
-  </property>
-  <property>
-    <name>nagios_group</name>
-    <value>nagios</value>
-    <description>Nagios Group.</description>
-  </property>
-  <property>
-    <name>nagios_web_login</name>
-    <value>nagiosadmin</value>
-    <description>Nagios web user.</description>
-  </property>
-  <property>
-    <name>nagios_web_password</name>
-    <value></value>
-    <description>Nagios Admin Password.</description>
-  </property>
-  <property>
-    <name>nagios_contact</name>
-    <value></value>
-    <description>Hadoop Admin Email.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/NAGIOS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/NAGIOS/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/NAGIOS/metainfo.xml
deleted file mode 100644
index 4b9872f..0000000
--- a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/NAGIOS/metainfo.xml
+++ /dev/null
@@ -1,95 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>NAGIOS</name>
-      <comment>Nagios Monitoring and Alerting system</comment>
-      <version>3.2.3</version>
-
-      <components>
-        <component>
-          <name>NAGIOS_SERVER</name>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <commandScript>
-            <script>scripts/nagios_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>perl</name>
-            </package>
-            <package>
-              <name>perl-Net-SNMP</name>
-            </package>
-            <package>
-              <name>nagios-plugins-1.4.9</name>
-            </package>
-            <package>
-              <name>nagios-3.5.0-99</name>
-            </package>
-            <package>
-              <name>nagios-www-3.5.0-99</name>
-            </package>
-            <package>
-              <name>nagios-devel-3.5.0-99</name>
-            </package>
-            <package>
-              <name>fping</name>
-            </package>
-            <package>
-              <name>hdp_mon_nagios_addons</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>suse</osFamily>
-          <package>
-            <name>php5-json</name>
-          </package>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>centos5</osFamily>
-          <package>
-            <name>php-pecl-json.x86_64</name>
-          </package>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>redhat5</osFamily>
-          <package>
-            <name>php-pecl-json.x86_64</name>
-          </package>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>oraclelinux5</osFamily>
-          <package>
-            <name>php-pecl-json.x86_64</name>
-          </package>
-        </osSpecific>
-      </osSpecifics>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/NAGIOS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/NAGIOS/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/NAGIOS/metainfo.xml
deleted file mode 100644
index 4b9872f..0000000
--- a/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/NAGIOS/metainfo.xml
+++ /dev/null
@@ -1,95 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>NAGIOS</name>
-      <comment>Nagios Monitoring and Alerting system</comment>
-      <version>3.2.3</version>
-
-      <components>
-        <component>
-          <name>NAGIOS_SERVER</name>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <commandScript>
-            <script>scripts/nagios_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>perl</name>
-            </package>
-            <package>
-              <name>perl-Net-SNMP</name>
-            </package>
-            <package>
-              <name>nagios-plugins-1.4.9</name>
-            </package>
-            <package>
-              <name>nagios-3.5.0-99</name>
-            </package>
-            <package>
-              <name>nagios-www-3.5.0-99</name>
-            </package>
-            <package>
-              <name>nagios-devel-3.5.0-99</name>
-            </package>
-            <package>
-              <name>fping</name>
-            </package>
-            <package>
-              <name>hdp_mon_nagios_addons</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>suse</osFamily>
-          <package>
-            <name>php5-json</name>
-          </package>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>centos5</osFamily>
-          <package>
-            <name>php-pecl-json.x86_64</name>
-          </package>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>redhat5</osFamily>
-          <package>
-            <name>php-pecl-json.x86_64</name>
-          </package>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>oraclelinux5</osFamily>
-          <package>
-            <name>php-pecl-json.x86_64</name>
-          </package>
-        </osSpecific>
-      </osSpecifics>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/resources/stacks/HDP/1.3.4/services/NAGIOS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/1.3.4/services/NAGIOS/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/1.3.4/services/NAGIOS/metainfo.xml
deleted file mode 100644
index 4b0e6b1..0000000
--- a/ambari-server/src/test/resources/stacks/HDP/1.3.4/services/NAGIOS/metainfo.xml
+++ /dev/null
@@ -1,94 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>NAGIOS</name>
-      <comment>Nagios Monitoring and Alerting system</comment>
-      <version>3.5.0</version>
-      <components>
-        <component>
-            <name>NAGIOS_SERVER</name>
-            <category>MASTER</category>
-            <cardinality>1</cardinality>
-            <commandScript>
-              <script>scripts/nagios_server.py</script>
-              <scriptType>PYTHON</scriptType>
-              <timeout>600</timeout>
-            </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>perl</name>
-            </package>
-            <package>
-              <name>perl-Net-SNMP</name>
-            </package>
-            <package>
-              <name>nagios-plugins-1.4.9</name>
-            </package>
-            <package>
-              <name>nagios-3.5.0-99</name>
-            </package>
-            <package>
-              <name>nagios-www-3.5.0-99</name>
-            </package>
-            <package>
-              <name>nagios-devel-3.5.0-99</name>
-            </package>
-            <package>
-              <name>fping</name>
-            </package>
-            <package>
-              <name>hdp_mon_nagios_addons</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>suse</osFamily>
-          <package>
-            <name>php5-json</name>
-          </package>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>centos5</osFamily>
-          <package>
-            <name>php-pecl-json.x86_64</name>
-          </package>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>redhat5</osFamily>
-          <package>
-            <name>php-pecl-json.x86_64</name>
-          </package>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>oraclelinux5</osFamily>
-          <package>
-            <name>php-pecl-json.x86_64</name>
-          </package>
-        </osSpecific>
-      </osSpecifics>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/NAGIOS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/NAGIOS/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/NAGIOS/metainfo.xml
deleted file mode 100644
index dafd3c3..0000000
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/NAGIOS/metainfo.xml
+++ /dev/null
@@ -1,90 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>NAGIOS</name>
-      <comment>Nagios Monitoring and Alerting system</comment>
-      <version>3.2.3</version>
-
-      <components>
-        <component>
-          <name>NAGIOS_SERVER</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/nagios_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>perl</name>
-            </package>
-            <package>
-              <name>perl-Net-SNMP</name>
-            </package>
-            <package>
-              <name>nagios-plugins</name>
-            </package>
-            <package>
-              <name>nagios</name>
-            </package>
-            <package>
-              <name>nagios-www</name>
-            </package>
-            <package>
-              <name>nagios-devel</name>
-            </package>
-            <package>
-              <name>fping</name>
-            </package>
-            <package>
-              <name>hdp_mon_nagios_addons</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>suse11</osFamily>
-          <packages>
-            <package>
-              <name>php5-json</name>
-            </package>
-            <package>
-              <name>apache2-mod_php5</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>redhat5</osFamily>
-          <packages>
-            <package>
-              <name>php-pecl-json.x86_64</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <monitoringService>true</monitoringService>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/NAGIOS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/NAGIOS/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/NAGIOS/metainfo.xml
deleted file mode 100644
index 28c73b5..0000000
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/NAGIOS/metainfo.xml
+++ /dev/null
@@ -1,89 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>NAGIOS</name>
-      <comment>Nagios Monitoring and Alerting system</comment>
-      <version>3.5.0</version>
-      <components>
-        <component>
-          <name>NAGIOS_SERVER</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/nagios_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>perl</name>
-            </package>
-            <package>
-              <name>perl-Net-SNMP</name>
-            </package>
-            <package>
-              <name>nagios-plugins</name>
-            </package>
-            <package>
-              <name>nagios</name>
-            </package>
-            <package>
-              <name>nagios-www</name>
-            </package>
-            <package>
-              <name>nagios-devel</name>
-            </package>
-            <package>
-              <name>fping</name>
-            </package>
-            <package>
-              <name>hdp_mon_nagios_addons</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>suse11</osFamily>
-          <packages>
-            <package>
-              <name>php5-json</name>
-            </package>
-            <package>
-              <name>apache2-mod_php5</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>redhat5</osFamily>
-          <packages>
-            <package>
-              <name>php-pecl-json.x86_64</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <monitoringService>true</monitoringService>
-    </service>
-  </services>
-</metainfo>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/resources/stacks/HDP/2.0.6/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.6/role_command_order.json b/ambari-server/src/test/resources/stacks/HDP/2.0.6/role_command_order.json
index 1c2181c..6154004 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.6/role_command_order.json
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.6/role_command_order.json
@@ -3,8 +3,6 @@
   "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
   "general_deps" : {
     "_comment" : "dependencies for all cases",
-    "NAGIOS_SERVER-INSTALL" : ["HIVE_CLIENT-INSTALL", "HCAT-INSTALL",
-        "MAPREDUCE_CLIENT-INSTALL", "OOZIE_CLIENT-INSTALL"],
     "HBASE_MASTER-START": ["ZOOKEEPER_SERVER-START"],
     "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
     "OOZIE_SERVER-START": ["JOBTRACKER-START", "TASKTRACKER-START"],
@@ -13,12 +11,6 @@
     "HIVE_SERVER-START": ["TASKTRACKER-START", "MYSQL_SERVER-START"],
     "HUE_SERVER-START": ["HIVE_SERVER-START", "HCAT-START", "OOZIE_SERVER-START"],
     "FLUME_HANDLER-START": ["OOZIE_SERVER-START"],
-    "NAGIOS_SERVER-START": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START",
-        "GANGLIA_SERVER-START", "GANGLIA_MONITOR-START", "HCAT-START",
-        "HIVE_SERVER-START", "HIVE_METASTORE-START", "HUE_SERVER-START",
-        "JOBTRACKER-START", "TASKTRACKER-START", "ZOOKEEPER_SERVER-START",
-        "MYSQL_SERVER-START", "OOZIE_SERVER-START", "PIG-START", "SQOOP-START",
-        "WEBHCAT_SERVER-START", "FLUME_HANDLER-START"],
     "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"],
     "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START", "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK"],
     "WEBHCAT_SERVICE_CHECK-SERVICE_CHECK": ["WEBHCAT_SERVER-START"],
@@ -48,8 +40,7 @@
     "WEBHCAT_SERVER-UPGRADE" : ["OOZIE_CLIENT-UPGRADE"],
     "PIG-UPGRADE" : ["WEBHCAT_SERVER-UPGRADE"],
     "SQOOP-UPGRADE" : ["PIG-UPGRADE"],
-    "NAGIOS_SERVER-UPGRADE" : ["SQOOP-UPGRADE"],
-    "GANGLIA_SERVER-UPGRADE" : ["NAGIOS_SERVER-UPGRADE"],
+    "GANGLIA_SERVER-UPGRADE" : ["SQOOP-UPGRADE"],
     "GANGLIA_MONITOR-UPGRADE" : ["GANGLIA_SERVER-UPGRADE"]
   },
   "_comment" : "GLUSTERFS-specific dependencies",
@@ -71,8 +62,6 @@
     "TASKTRACKER-START": ["NAMENODE-START", "DATANODE-START"],
     "HIVE_SERVER-START": ["DATANODE-START"],
     "WEBHCAT_SERVER-START": ["DATANODE-START"],
-    "NAGIOS_SERVER-START": ["NAMENODE-START", "SECONDARY_NAMENODE-START",
-        "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START", "HISTORYSERVER-START"],
     "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START",
         "SECONDARY_NAMENODE-START"],
     "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START",
@@ -93,7 +82,6 @@
   "namenode_optional_ha": {
     "NAMENODE-START": ["JOURNALNODE-START", "ZOOKEEPER_SERVER-START"],
     "ZKFC-START": ["NAMENODE-START"],
-    "NAGIOS_SERVER-START": ["ZKFC-START", "JOURNALNODE-START"],
     "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["ZKFC-START"]
   },
   "_comment" : "Dependencies that are used in ResourceManager HA cluster",

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/resources/stacks/HDP/2.0.6/services/NAGIOS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.6/services/NAGIOS/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.6/services/NAGIOS/metainfo.xml
deleted file mode 100644
index 958eb14..0000000
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.6/services/NAGIOS/metainfo.xml
+++ /dev/null
@@ -1,139 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>NAGIOS</name>
-      <displayName>Nagios</displayName>
-      <comment>Nagios Monitoring and Alerting system</comment>
-      <version>3.5.0</version>
-      <components>
-        <component>
-           <name>NAGIOS_SERVER</name>
-           <displayName>Nagios Server</displayName>
-           <category>MASTER</category>
-           <cardinality>1</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>OOZIE/OOZIE_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>YARN/YARN_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>TEZ/TEZ_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>HCATALOG/HCAT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-           <commandScript>
-             <script>scripts/nagios_server.py</script>
-             <scriptType>PYTHON</scriptType>
-             <timeout>600</timeout>
-           </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>perl</name>
-            </package>
-            <package>
-              <name>nagios-plugins-1.4.9</name>
-            </package>
-            <package>
-              <name>nagios-3.5.0-99</name>
-            </package>
-            <package>
-              <name>nagios-www-3.5.0-99</name>
-            </package>
-            <package>
-              <name>nagios-devel-3.5.0-99</name>
-            </package>
-            <package>
-              <name>fping</name>
-            </package>
-            <package>
-              <name>hdp_mon_nagios_addons</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>suse11</osFamily>
-          <packages>
-            <package>
-              <name>php5*-json</name>
-            </package>
-            <package>
-              <name>apache2?mod_php*</name>
-            </package>
-            <package>
-              <name>php-curl</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>redhat5</osFamily>
-          <packages>
-            <package>
-              <name>php-pecl-json.x86_64</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <configuration-dependencies>
-        <config-type>global</config-type>
-      </configuration-dependencies>
-      <monitoringService>true</monitoringService>
-    </service>
-  </services>
-</metainfo>


[04/17] ambari git commit: AMBARI-8276 - Alerts: Remove Nagios Service From The Stack (jonathanhurley)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/python/stacks/2.0.6/NAGIOS/test_mm_wrapper.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/NAGIOS/test_mm_wrapper.py b/ambari-server/src/test/python/stacks/2.0.6/NAGIOS/test_mm_wrapper.py
deleted file mode 100644
index ff556ce..0000000
--- a/ambari-server/src/test/python/stacks/2.0.6/NAGIOS/test_mm_wrapper.py
+++ /dev/null
@@ -1,549 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-import StringIO
-
-import os, sys
-import pprint
-import subprocess
-from unittest import TestCase
-from mock.mock import Mock, MagicMock, patch
-import mm_wrapper
-
-class TestOrWrapper(TestCase):
-
-  dummy_ignore_file = """
-vm-4.vm HIVE HIVE_METASTORE
-vm-5.vm GANGLIA GANGLIA_MONITOR
-vm-4.vm YARN NODEMANAGER
-vm-3.vm YARN NODEMANAGER
-vm-3.vm HBASE HBASE_REGIONSERVER
-vm-4.vm HBASE HBASE_REGIONSERVER
-vm-4.vm STORM STORM_REST_API
-vm-4.vm HDFS DATANODE
-vm-4.vm STORM SUPERVISOR
-vm-4.vm STORM NIMBUS
-vm-4.vm STORM STORM_UI_SERVER
-vm-3.vm STORM SUPERVISOR
-vm-4.vm HDFS SECONDARY_NAMENODE
-vm-3.vm FLUME FLUME_HANDLER
-vm-4.vm GANGLIA GANGLIA_SERVER
-vm-4.vm HIVE HIVE_SERVER
-vm-4.vm ZOOKEEPER ZOOKEEPER_SERVER
-vm-4.vm WEBHCAT WEBHCAT_SERVER
-vm-3.vm HBASE HBASE_MASTER
-vm-4.vm GANGLIA GANGLIA_MONITOR
-vm-3.vm GANGLIA GANGLIA_MONITOR
-vm-3.vm HDFS NAMENODE
-vm-4.vm HIVE MYSQL_SERVER
-vm-4.vm YARN APP_TIMELINE_SERVER
-vm-4.vm FALCON FALCON_SERVER
-vm-3.vm HDFS DATANODE
-vm-4.vm YARN RESOURCEMANAGER
-vm-4.vm OOZIE OOZIE_SERVER
-vm-4.vm MAPREDUCE2 HISTORYSERVER
-vm-4.vm STORM DRPC_SERVER
-vm-4.vm FLUME FLUME_HANDLER
-vm-3.vm ZOOKEEPER ZOOKEEPER_SERVER
-"""
-
-  default_empty_check_result = {
-    'message': 'No checks have been run (no hostnames provided)',
-    'retcode': -1,
-    'real_retcode': None
-  }
-
-
-  @patch("__builtin__.open")
-  def test_ignored_host_list(self, open_mock):
-    # Check with empty file content
-    open_mock.return_value.__enter__.return_value.read.return_value = ""
-    lst = mm_wrapper.ignored_host_list('STORM', 'SUPERVISOR')
-    self.assertEqual(pprint.pformat(lst), '[]')
-    # Check with dummy content
-    open_mock.return_value.__enter__.return_value.read.return_value = self.dummy_ignore_file
-    lst = mm_wrapper.ignored_host_list('STORM', 'SUPERVISOR')
-    self.assertEqual(pprint.pformat(lst), "['vm-4.vm', 'vm-3.vm']")
-    # Check if service name/comp name are not defined
-    open_mock.return_value.__enter__.return_value.read.return_value = self.dummy_ignore_file
-    lst = mm_wrapper.ignored_host_list('', '')
-    self.assertEqual(pprint.pformat(lst), "[]")
-
-
-  @patch("sys.exit")
-  def test_print_usage(self, exit_mock):
-    mm_wrapper.print_usage()
-    self.assertTrue(exit_mock.called)
-    self.assertEqual(exit_mock.call_args_list[0][0][0], 1)
-
-
-  def test_get_real_component(self):
-    with patch.dict(os.environ, {'NAGIOS__SERVICEHOST_COMPONENT': 'SUPERVISOR'}, clear=True):
-      component = mm_wrapper.get_real_component()
-      self.assertEqual(component, 'SUPERVISOR')
-    with patch.dict(os.environ, {'NAGIOS__SERVICEHOST_COMPONENT': 'MAPREDUCE2'}, clear=True):
-      component = mm_wrapper.get_real_component()
-      self.assertEqual(component, 'MAPREDUCE2')
-
-
-  @patch("mm_wrapper.print_usage")
-  def test_parse_args(self, print_usage_mock):
-    args = ['or', 'h1', 'h2', '--', 'prog', '-h', '^^', '-opt', 'yet', 'another', 'opt']
-    mode, hostnames, command_line = mm_wrapper.parse_args(args)
-    self.assertEquals(mode, mm_wrapper.OR)
-    self.assertEquals(hostnames, ['h1', 'h2'])
-    self.assertEquals(command_line, ['prog', '-h', '^^', '-opt', 'yet', 'another', 'opt'])
-
-    args = ['and', 'h1', 'h2', '--', 'prog', '-h', '^^', '-opt', 'yet', 'another', 'opt']
-    mode, hostnames, command_line = mm_wrapper.parse_args(args)
-    self.assertEquals(mode, mm_wrapper.AND)
-    self.assertEquals(hostnames, ['h1', 'h2'])
-    self.assertEquals(command_line, ['prog', '-h', '^^', '-opt', 'yet', 'another', 'opt'])
-
-    args = ['env_only', 'h1', 'h2', '--', 'prog', '-h', '^^', '-opt', 'yet', 'another', 'opt']
-    mode, hostnames, command_line = mm_wrapper.parse_args(args)
-    self.assertEquals(mode, mm_wrapper.ENV_ONLY)
-    self.assertEquals(hostnames, ['h1', 'h2'])
-    self.assertEquals(command_line, ['prog', '-h', '^^', '-opt', 'yet', 'another', 'opt'])
-
-    # Check wrong usage
-    args = []
-    mm_wrapper.parse_args(args)
-    self.assertTrue(print_usage_mock.called)
-
-
-  @patch("mm_wrapper.ignored_host_list")
-  @patch("mm_wrapper.work_in_or_mode")
-  @patch("mm_wrapper.work_in_and_mode")
-  @patch("mm_wrapper.work_in_env_only_mode")
-  @patch("mm_wrapper.work_in_filter_mm_mode")
-  @patch("mm_wrapper.work_in_legacy_check_wrapper_mode")
-  def test_do_work(self, work_in_legacy_check_wrapper_mode, work_in_filter_mm_mode_mock,
-                   work_in_env_only_mode_mock, work_in_and_mode_mock,
-                   work_in_or_mode_mock,
-                   ignored_host_list_mock):
-    hostnames = ['h1', 'h2', 'h3', 'h4']
-    ignored_host_list_mock.return_value = ['h2', 'h3']
-    command_line = ['prog', '-h', '^^', '-opt', 'yet', 'another', 'opt']
-    work_in_or_mode_mock.return_value = {
-      'message': "or_mode mode result",
-      'retcode': 0,
-      'real_retcode': None
-    }
-    work_in_and_mode_mock.return_value = {
-      'message': "and_mode mode result",
-      'retcode': 0,
-      'real_retcode': None
-    }
-    work_in_env_only_mode_mock.return_value = {
-      'message': "env_only mode result",
-      'retcode': 0,
-      'real_retcode': None
-    }
-    work_in_filter_mm_mode_mock.return_value = {
-      'message': "filter_mm mode result",
-      'retcode': 0,
-      'real_retcode': None
-    }
-    work_in_legacy_check_wrapper_mode.return_value = {
-      'message': "legacy_check_wrapper mode result",
-      'retcode': 0,
-      'real_retcode': None
-    }
-    result = mm_wrapper.do_work(mm_wrapper.OR, hostnames, command_line)
-    self.assertEquals(str(result), "(['or_mode mode result'], 0)")
-
-    result = mm_wrapper.do_work(mm_wrapper.AND, hostnames, command_line)
-    self.assertEquals(str(result), "(['and_mode mode result'], 0)")
-
-    result = mm_wrapper.do_work(mm_wrapper.ENV_ONLY, hostnames, command_line)
-    self.assertEquals(str(result), "(['env_only mode result'], 0)")
-
-    result = mm_wrapper.do_work(mm_wrapper.FILTER_MM, hostnames, command_line)
-    self.assertEquals(str(result), "(['filter_mm mode result'], 0)")
-
-    result = mm_wrapper.do_work(mm_wrapper.LEGACY_CHECK_WRAPPER, hostnames, command_line)
-    self.assertEquals(str(result), "(['legacy_check_wrapper mode result'], 0)")
-
-    # Check behaviour when real_retcode is defined
-    work_in_or_mode_mock.return_value = {
-      'message': "or_mode mode result",
-      'retcode': 0,
-      'real_retcode': 1
-    }
-    result = mm_wrapper.do_work(mm_wrapper.OR, hostnames, command_line)
-    self.assertEquals(str(result), "(['or_mode mode result', 'AMBARIPASSIVE=1'], 0)")
-
-
-  @patch("mm_wrapper.check_output")
-  def test_work_in_or_mode(self, check_output_mock):
-    hostnames = ['h1', 'h2', 'h3', 'h4']
-    ignored_hosts = ['h2', 'h3']
-    command_line = ['prog', '-h', '^^', '-opt', 'yet', 'another', 'opt']
-    custom_env = {'MM_HOSTS': ignored_hosts}
-
-    # Normal usage
-    check_output_mock.return_value = 'Dummy message'
-    result = mm_wrapper.work_in_or_mode(hostnames, ignored_hosts, command_line,
-                                        custom_env,
-                                        self.default_empty_check_result)
-    self.assertEquals(str(result),
-                      "{'message': 'Dummy message', 'real_retcode': None, 'retcode': 0}")
-    self.assertEquals(check_output_mock.call_count, 1)  # Exited on first success
-    self.assertEquals(check_output_mock.call_args[1]['env']['MM_HOSTS'], ignored_hosts)
-    for check_tupple in zip(check_output_mock.call_args_list, hostnames):
-      self.assertEquals(check_tupple[0][0][0], ['prog', '-h', check_tupple[1], '-opt', 'yet', 'another', 'opt'])
-
-    check_output_mock.reset_mock()
-
-    # Failed all checks
-    check_output_side_effects = []
-    error = subprocess.CalledProcessError(1, 'dummy cmd')
-    error.output = 'dummy output1'
-    check_output_side_effects.append(error)
-
-    error = subprocess.CalledProcessError(3, 'dummy cmd')
-    error.output = 'dummy output2'
-    check_output_side_effects.append(error)
-
-    error = subprocess.CalledProcessError(1, 'dummy cmd')
-    error.output = 'dummy output3'
-    check_output_side_effects.append(error)
-
-    error = subprocess.CalledProcessError(2, 'dummy cmd')
-    error.output = 'dummy output4'
-    check_output_side_effects.append(error)
-
-    check_output_mock.side_effect = check_output_side_effects
-
-    result = mm_wrapper.work_in_or_mode(hostnames, ignored_hosts, command_line,
-                                        custom_env,
-                                        self.default_empty_check_result)
-    self.assertEquals(check_output_mock.call_count, 4)
-    self.assertEquals(str(result),
-                      "{'message': 'dummy output4', 'real_retcode': None, 'retcode': 2}")
-
-    check_output_mock.reset_mock()
-
-    # Failed all but MM host component checks
-    check_output_side_effects = []
-    error = subprocess.CalledProcessError(1, 'dummy cmd')
-    error.output = 'dummy output1'
-    check_output_side_effects.append(error)
-
-    error = subprocess.CalledProcessError(0, 'dummy cmd')
-    error.output = 'dummy output2'
-    check_output_side_effects.append(error)
-
-    error = subprocess.CalledProcessError(2, 'dummy cmd')
-    error.output = 'dummy output3'
-    check_output_side_effects.append(error)
-
-    error = subprocess.CalledProcessError(3, 'dummy cmd')
-    error.output = 'dummy output4'
-    check_output_side_effects.append(error)
-
-    check_output_mock.side_effect = check_output_side_effects
-
-    result = mm_wrapper.work_in_or_mode(hostnames, ignored_hosts, command_line,
-                                        custom_env,
-                                        self.default_empty_check_result)
-    self.assertEquals(check_output_mock.call_count, 4)
-    self.assertEquals(str(result),
-                      "{'message': 'dummy output4', 'real_retcode': None, 'retcode': 3}")
-
-    check_output_mock.reset_mock()
-
-    # Components check only for one check is successful
-    ignored_hosts = []
-    check_output_side_effects = []
-    error = subprocess.CalledProcessError(1, 'dummy cmd')
-    error.output = 'dummy output1'
-    check_output_side_effects.append(error)
-
-    error = subprocess.CalledProcessError(0, 'dummy cmd')
-    error.output = 'dummy output2'
-    check_output_side_effects.append(error)
-
-    error = subprocess.CalledProcessError(2, 'dummy cmd')
-    error.output = 'dummy output3'
-    check_output_side_effects.append(error)
-
-    error = subprocess.CalledProcessError(3, 'dummy cmd')
-    error.output = 'dummy output4'
-    check_output_side_effects.append(error)
-
-    check_output_mock.side_effect = check_output_side_effects
-
-    result = mm_wrapper.work_in_or_mode(hostnames, ignored_hosts, command_line,
-                                        custom_env,
-                                        self.default_empty_check_result)
-    self.assertEquals(check_output_mock.call_count, 2)
-    self.assertEquals(str(result),
-                      "{'message': 'dummy output2', 'real_retcode': None, 'retcode': 0}")
-
-
-  @patch("mm_wrapper.check_output")
-  def test_work_in_and_mode(self, check_output_mock):
-    hostnames = ['h1', 'h2', 'h3', 'h4']
-    ignored_hosts = ['h2', 'h3']
-    command_line = ['prog', '-h', '^^', '-opt', 'yet', 'another', 'opt']
-    custom_env = {'MM_HOSTS': ignored_hosts}
-
-    # Normal usage
-    check_output_mock.return_value = 'Dummy message'
-    result = mm_wrapper.work_in_and_mode(hostnames, ignored_hosts, command_line,
-                                        custom_env,
-                                        self.default_empty_check_result)
-    self.assertEquals(str(result),
-                      "{'message': 'Dummy message', 'real_retcode': None, 'retcode': 0}")
-    self.assertEquals(check_output_mock.call_count, 4)
-    self.assertEquals(check_output_mock.call_args[1]['env']['MM_HOSTS'], ignored_hosts)
-    for check_tupple in zip(check_output_mock.call_args_list, hostnames):
-      self.assertEquals(check_tupple[0][0][0], ['prog', '-h', check_tupple[1], '-opt', 'yet', 'another', 'opt'])
-
-    check_output_mock.reset_mock()
-
-    # Failed all checks
-    check_output_side_effects = []
-    error = subprocess.CalledProcessError(1, 'dummy cmd')
-    error.output = 'dummy output1'
-    check_output_side_effects.append(error)
-
-    error = subprocess.CalledProcessError(3, 'dummy cmd')
-    error.output = 'dummy output2'
-    check_output_side_effects.append(error)
-
-    error = subprocess.CalledProcessError(1, 'dummy cmd')
-    error.output = 'dummy output3'
-    check_output_side_effects.append(error)
-
-    error = subprocess.CalledProcessError(2, 'dummy cmd')
-    error.output = 'dummy output4'
-    check_output_side_effects.append(error)
-
-    check_output_mock.side_effect = check_output_side_effects
-    result = mm_wrapper.work_in_and_mode(hostnames, ignored_hosts, command_line,
-                                        custom_env,
-                                        self.default_empty_check_result)
-    self.assertEquals(check_output_mock.call_count, 4)
-    self.assertEquals(str(result),
-                      "{'message': 'dummy output4', 'real_retcode': None, 'retcode': 2}")
-
-    check_output_mock.reset_mock()
-
-    # Failed all but MM host component checks
-    check_output_side_effects = []
-    error = subprocess.CalledProcessError(1, 'dummy cmd')
-    error.output = 'dummy output1'
-    check_output_side_effects.append(error)
-
-    error = subprocess.CalledProcessError(0, 'dummy cmd')
-    error.output = 'dummy output2'
-    check_output_side_effects.append(error)
-
-    error = subprocess.CalledProcessError(2, 'dummy cmd')
-    error.output = 'dummy output3'
-    check_output_side_effects.append(error)
-
-    error = subprocess.CalledProcessError(3, 'dummy cmd')
-    error.output = 'dummy output4'
-    check_output_side_effects.append(error)
-
-    check_output_mock.side_effect = check_output_side_effects
-    result = mm_wrapper.work_in_and_mode(hostnames, ignored_hosts, command_line,
-                                        custom_env,
-                                        self.default_empty_check_result)
-    self.assertEquals(check_output_mock.call_count, 4)
-    self.assertEquals(str(result),
-                      "{'message': 'dummy output4', 'real_retcode': None, 'retcode': 3}")
-
-    check_output_mock.reset_mock()
-
-    # Components check only for one check is successful
-    ignored_hosts = []
-    check_output_side_effects = []
-    error = subprocess.CalledProcessError(1, 'dummy cmd')
-    error.output = 'dummy output1'
-    check_output_side_effects.append(error)
-
-    error = subprocess.CalledProcessError(0, 'dummy cmd')
-    error.output = 'dummy output2'
-    check_output_side_effects.append(error)
-
-    error = subprocess.CalledProcessError(2, 'dummy cmd')
-    error.output = 'dummy output3'
-    check_output_side_effects.append(error)
-
-    error = subprocess.CalledProcessError(3, 'dummy cmd')
-    error.output = 'dummy output4'
-    check_output_side_effects.append(error)
-
-    check_output_mock.side_effect = check_output_side_effects
-    result = mm_wrapper.work_in_and_mode(hostnames, ignored_hosts, command_line,
-                                        custom_env,
-                                        self.default_empty_check_result)
-    self.assertEquals(check_output_mock.call_count, 4)
-    self.assertEquals(str(result),
-                      "{'message': 'dummy output4', 'real_retcode': None, 'retcode': 3}")
-
-
-  @patch("mm_wrapper.check_output")
-  def test_work_in_env_only_mode(self, check_output_mock):
-    hostnames = ['h1', 'h2', 'h3', 'h4']
-    ignored_hosts = ['h2', 'h3']
-    command_line = ['prog', '-h', '^^', '-opt', 'yet', 'another', 'opt']
-    custom_env = {'MM_HOSTS' : ignored_hosts}
-
-    # Normal usage
-    check_output_mock.return_value = 'Dummy message'
-    result = mm_wrapper.work_in_env_only_mode(hostnames, command_line, custom_env)
-    self.assertEquals(str(result),
-                      "{'message': 'Dummy message', 'real_retcode': None, 'retcode': 0}")
-    self.assertEquals(check_output_mock.call_count, 1)
-    self.assertEquals(check_output_mock.call_args[1]['env']['MM_HOSTS'], ignored_hosts)
-    self.assertEquals(check_output_mock.call_args[0][0],
-                      ['prog', '-h', 'h1', 'h2', 'h3', 'h4', '-opt', 'yet', 'another', 'opt'])
-
-    check_output_mock.reset_mock()
-
-    # Failed all checks
-    check_output_side_effects = []
-    error = subprocess.CalledProcessError(1, 'dummy cmd')
-    error.output = 'dummy output1'
-    check_output_side_effects.append(error)
-
-    check_output_mock.side_effect = check_output_side_effects
-    result = mm_wrapper.work_in_env_only_mode(hostnames, command_line, custom_env)
-    self.assertEquals(check_output_mock.call_count, 1)
-    self.assertEquals(str(result),
-                      "{'message': 'dummy output1', 'real_retcode': None, 'retcode': 1}")
-
-    check_output_mock.reset_mock()
-
-
-  @patch("mm_wrapper.check_output")
-  def test_work_in_filter_mm_mode(self, check_output_mock):
-    hostnames = ['h1', 'h2', 'h3', 'h4']
-    ignored_hosts = ['h2', 'h3']
-    command_line = ['prog', '-h', '^^', '-opt', 'yet', 'another', 'opt']
-    custom_env = {'MM_HOSTS' : ignored_hosts}
-
-    # Normal usage
-    check_output_mock.return_value = 'Dummy message'
-    result = mm_wrapper.work_in_filter_mm_mode(hostnames, ignored_hosts, command_line,
-                                               custom_env,
-                                               self.default_empty_check_result)
-    self.assertEquals(str(result),
-                      "{'message': 'Dummy message', 'real_retcode': None, 'retcode': 0}")
-    self.assertEquals(check_output_mock.call_count, 1)
-    self.assertEquals(check_output_mock.call_args[1]['env']['MM_HOSTS'], ignored_hosts)
-    self.assertEquals(check_output_mock.call_args[0][0],
-                      ['prog', '-h', 'h1', 'h4', '-opt', 'yet', 'another', 'opt'])
-
-    check_output_mock.reset_mock()
-
-    # Failed all checks
-    check_output_side_effects = []
-    error = subprocess.CalledProcessError(1, 'dummy cmd')
-    error.output = 'dummy output1'
-    check_output_side_effects.append(error)
-
-    check_output_mock.side_effect = check_output_side_effects
-    result = mm_wrapper.work_in_filter_mm_mode(hostnames, ignored_hosts, command_line,
-                                               custom_env,
-                                               self.default_empty_check_result)
-    self.assertEquals(check_output_mock.call_count, 1)
-    self.assertEquals(str(result),
-                      "{'message': 'dummy output1', 'real_retcode': None, 'retcode': 1}")
-
-    check_output_mock.reset_mock()
-
-    # All host components are in MM
-    ignored_hosts = hostnames
-    check_output_side_effects = []
-    error = subprocess.CalledProcessError(1, 'dummy cmd')
-    error.output = 'dummy output1'
-    check_output_side_effects.append(error)
-
-    check_output_mock.side_effect = check_output_side_effects
-    result = mm_wrapper.work_in_filter_mm_mode(hostnames, ignored_hosts, command_line,
-                                               custom_env,
-                                               self.default_empty_check_result)
-    self.assertEquals(check_output_mock.call_count, 0)
-    self.assertEquals(str(result),
-                      "{'message': 'No checks have been run (no hostnames provided)', "
-                      "'real_retcode': None, 'retcode': -1}")
-
-    check_output_mock.reset_mock()
-
-
-  @patch("mm_wrapper.check_output")
-  @patch.dict(os.environ, {'NAGIOS_HOSTNAME': 'h2'}, clear=True)
-  def test_work_in_legacy_check_wrapper_mode(self, check_output_mock):
-    command_line = ['prog', '-opt', 'yet', 'another', 'opt']
-    ignored_hosts = []
-    custom_env = {'MM_HOSTS': ignored_hosts}
-
-    # Normal usage
-    ignored_hosts = []
-    check_output_mock.return_value = 'Dummy message'
-    result = mm_wrapper.work_in_legacy_check_wrapper_mode(ignored_hosts, command_line,
-                                               custom_env)
-    self.assertEquals(str(result),
-                      "{'message': 'Dummy message', 'real_retcode': None, 'retcode': 0}")
-    self.assertEquals(check_output_mock.call_count, 1)
-    self.assertEquals(check_output_mock.call_args[1]['env']['MM_HOSTS'], ignored_hosts)
-    self.assertEquals(check_output_mock.call_args[0][0],
-                      ['prog', '-opt', 'yet', 'another', 'opt'])
-
-    check_output_mock.reset_mock()
-
-    # Failed check on host that is not in MM state
-    ignored_hosts = ['h3']
-    check_output_side_effects = []
-    error = subprocess.CalledProcessError(1, 'dummy cmd')
-    error.output = 'dummy output1'
-    check_output_side_effects.append(error)
-
-    check_output_mock.side_effect = check_output_side_effects
-    result = mm_wrapper.work_in_legacy_check_wrapper_mode(ignored_hosts, command_line,
-                                               custom_env)
-    self.assertEquals(check_output_mock.call_count, 1)
-    self.assertEquals(str(result),
-                      "{'message': 'dummy output1', 'real_retcode': None, 'retcode': 1}")
-
-    check_output_mock.reset_mock()
-
-    # Failed check on host that is in MM state
-    ignored_hosts = ['h2']
-    check_output_side_effects = []
-    error = subprocess.CalledProcessError(1, 'dummy cmd')
-    error.output = 'dummy output1'
-    check_output_side_effects.append(error)
-
-    check_output_mock.side_effect = check_output_side_effects
-    result = mm_wrapper.work_in_legacy_check_wrapper_mode(ignored_hosts, command_line,
-                                               custom_env)
-    self.assertEquals(check_output_mock.call_count, 1)
-    self.assertEquals(str(result),
-                      "{'message': 'dummy output1', 'real_retcode': 1, 'retcode': 0}")
-
-    check_output_mock.reset_mock()
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/python/stacks/2.0.6/NAGIOS/test_nagios_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/NAGIOS/test_nagios_server.py b/ambari-server/src/test/python/stacks/2.0.6/NAGIOS/test_nagios_server.py
deleted file mode 100644
index a35f537..0000000
--- a/ambari-server/src/test/python/stacks/2.0.6/NAGIOS/test_nagios_server.py
+++ /dev/null
@@ -1,315 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import os
-
-from mock.mock import Mock, MagicMock, patch
-from stacks.utils.RMFTestCase import *
-
-
-class TestNagiosServer(RMFTestCase):
-  def test_configure_default(self):
-    self.executeScript("2.0.6/services/NAGIOS/package/scripts/nagios_server.py",
-                       classname="NagiosServer",
-                       command="configure",
-                       config_file="default.json"
-    )
-    self.assert_configure_default()
-    self.assertNoMoreResources()
-
-  def test_start_default(self):
-    self.executeScript(
-      "2.0.6/services/NAGIOS/package/scripts/nagios_service.py",
-      classname="NagiosServer",
-      command="start",
-      config_file="default.json"
-    )
-    self.assert_configure_default()
-    self.assertResourceCalled('Execute', 'service nagios start',
-    )
-    self.assertResourceCalled('MonitorWebserver', 'restart',
-    )
-    self.assertNoMoreResources()
-
-
-  @patch('os.path.isfile')
-  def test_stop_default(self, os_path_isfile_mock):
-    src_dir = RMFTestCase._getSrcFolder()    
-    os_path_isfile_mock.side_effect = [False, True]
-       
-    self.executeScript(
-      "2.0.6/services/NAGIOS/package/scripts/nagios_service.py",
-      classname="NagiosServer",
-      command="stop",
-      config_file="default.json"
-    )
-    
-    self.assertResourceCalled('Execute','service nagios stop')
-    self.assertResourceCalled('Execute','rm -f /var/run/nagios/nagios.pid')
-    self.assertResourceCalled('MonitorWebserver', 'restart')
-    
-    self.assertNoMoreResources()
-
-
-  def assert_configure_default(self):
-    self.assertResourceCalled('File', '/etc/apache2/conf.d/nagios.conf',
-                              owner='nagios',
-                              group='nagios',
-                              content=Template("nagios.conf.j2"),
-                              mode=0644
-    )
-    self.assertResourceCalled('Directory', '/etc/nagios',
-                              owner='nagios',
-                              group='nagios',
-    )
-    self.assertResourceCalled('Directory', '/usr/lib64/nagios/plugins'
-    )
-    self.assertResourceCalled('Directory', '/etc/nagios/objects'
-    )
-    self.assertResourceCalled('Directory', '/var/run/nagios',
-                              owner='nagios',
-                              group='nagios',
-                              mode=0755,
-                              recursive=True
-    )
-    self.assertResourceCalled('Directory', '/var/nagios',
-                              owner='nagios',
-                              group='nagios',
-                              recursive=True
-    )
-    self.assertResourceCalled('Directory', '/var/nagios/spool/checkresults',
-                              owner='nagios',
-                              group='nagios',
-                              recursive=True
-    )
-    self.assertResourceCalled('Directory', '/var/nagios/rw',
-                              owner='nagios',
-                              group='nagios',
-                              recursive=True
-    )
-    self.assertResourceCalled('Directory', '/usr/share/hdp/nagios/',
-                              owner='nagios',
-                              group='nagios',
-                              recursive=True
-    )
-    self.assertResourceCalled('Directory', '/var/log/nagios',
-                              owner='nagios',
-                              group='nagios',
-                              mode=0755
-    )
-    self.assertResourceCalled('Directory', '/var/log/nagios/archives',
-                              owner='nagios',
-                              group='nagios',
-                              mode=0755
-    )
-    self.assertResourceCalled('TemplateConfig', '/etc/nagios/nagios.cfg',
-                              owner='nagios',
-                              group='nagios',
-                              mode=None
-    )
-    self.assertResourceCalled('TemplateConfig', '/etc/nagios/resource.cfg',
-                              owner='nagios',
-                              group='nagios',
-                              mode=None
-    )
-    self.assertResourceCalled('TemplateConfig',
-                              '/etc/nagios/objects/hadoop-hosts.cfg',
-                              owner='nagios',
-                              group='hadoop',
-                              mode=None
-    )
-    self.assertResourceCalled('TemplateConfig',
-                              '/etc/nagios/objects/hadoop-hostgroups.cfg',
-                              owner='nagios',
-                              group='hadoop',
-                              mode=None
-    )
-    self.assertResourceCalled('TemplateConfig',
-                              '/etc/nagios/objects/hadoop-servicegroups.cfg',
-                              owner='nagios',
-                              group='hadoop',
-                              mode=None
-    )
-    self.assertResourceCalled('TemplateConfig',
-                              '/etc/nagios/objects/hadoop-services.cfg',
-                              owner='nagios',
-                              group='hadoop',
-                              mode=None
-    )
-    self.assertResourceCalled('TemplateConfig',
-                              '/etc/nagios/objects/hadoop-commands.cfg',
-                              owner='nagios',
-                              group='hadoop',
-                              mode=None
-    )
-    self.assertResourceCalled('TemplateConfig',
-                              '/etc/nagios/objects/contacts.cfg',
-                              owner='nagios',
-                              group='hadoop',
-                              mode=None
-    )
-    self.assertResourceCalled('File', '/usr/lib64/nagios/plugins/check_cpu.pl',
-                              content=StaticFile('check_cpu.pl'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File', '/usr/lib64/nagios/plugins/check_cpu.php',
-                              content=StaticFile('check_cpu.php'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File', '/usr/lib64/nagios/plugins/check_cpu_ha.php',
-                              content=StaticFile('check_cpu_ha.php'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File',
-                              '/usr/lib64/nagios/plugins/check_datanode_storage.php',
-                              content=StaticFile('check_datanode_storage.php'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File',
-                              '/usr/lib64/nagios/plugins/check_aggregate.php',
-                              content=StaticFile('check_aggregate.php'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File',
-                              '/usr/lib64/nagios/plugins/check_hdfs_blocks.php',
-                              content=StaticFile('check_hdfs_blocks.php'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File',
-                              '/usr/lib64/nagios/plugins/check_hdfs_capacity.php',
-                              content=StaticFile('check_hdfs_capacity.php'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File',
-                              '/usr/lib64/nagios/plugins/check_rpcq_latency.php',
-                              content=StaticFile('check_rpcq_latency.php'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File',
-                              '/usr/lib64/nagios/plugins/check_rpcq_latency_ha.php',
-                              content=StaticFile('check_rpcq_latency_ha.php'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File',
-                              '/usr/lib64/nagios/plugins/check_webui.sh',
-                              content=StaticFile('check_webui.sh'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File',
-                              '/usr/lib64/nagios/plugins/check_webui_ha.sh',
-                              content=StaticFile('check_webui_ha.sh'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File',
-                              '/usr/lib64/nagios/plugins/check_name_dir_status.php',
-                              content=StaticFile('check_name_dir_status.php'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File',
-                              '/usr/lib64/nagios/plugins/check_oozie_status.sh',
-                              content=StaticFile('check_oozie_status.sh'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File',
-                              '/usr/lib64/nagios/plugins/check_templeton_status.sh',
-                              content=StaticFile('check_templeton_status.sh'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File',
-                              '/usr/lib64/nagios/plugins/check_hive_metastore_status.sh',
-                              content=StaticFile(
-                                'check_hive_metastore_status.sh'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File',
-                              '/usr/lib64/nagios/plugins/check_hue_status.sh',
-                              content=StaticFile('check_hue_status.sh'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File',
-                              '/usr/lib64/nagios/plugins/check_mapred_local_dir_used.sh',
-                              content=StaticFile(
-                                'check_mapred_local_dir_used.sh'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File',
-                              '/usr/lib64/nagios/plugins/check_nodemanager_health.sh',
-                              content=StaticFile('check_nodemanager_health.sh'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File',
-                              '/usr/lib64/nagios/plugins/check_namenodes_ha.sh',
-                              content=StaticFile('check_namenodes_ha.sh'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File',
-                              '/usr/lib64/nagios/plugins/hdp_nagios_init.php',
-                              content=StaticFile('hdp_nagios_init.php'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File',
-                              '/usr/lib64/nagios/plugins/check_checkpoint_time.py',
-                              content=StaticFile('check_checkpoint_time.py'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File', '/usr/lib64/nagios/plugins/sys_logger.py',
-        content = StaticFile('sys_logger.py'),
-        mode = 0755,
-    )
-    self.assertResourceCalled('File',
-                              '/usr/lib64/nagios/plugins/check_ambari_alerts.py',
-                              content=StaticFile('check_ambari_alerts.py'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File',
-                              '/usr/lib64/nagios/plugins/mm_wrapper.py',
-                              content=StaticFile('mm_wrapper.py'),
-                              mode=0755
-    )
-    self.assertResourceCalled('File',
-                              '/usr/lib64/nagios/plugins/check_hive_thrift_port.py',
-                              content=StaticFile('check_hive_thrift_port.py'),
-                              mode=0755
-    )
-    self.assertResourceCalled('Execute',
-                              'htpasswd2 -c -b  /etc/nagios/htpasswd.users nagiosadmin \'!`"\'"\'"\' 1\''
-    )
-    self.assertResourceCalled('File', '/etc/nagios/htpasswd.users',
-                              owner='nagios',
-                              group='nagios',
-                              mode=0640
-    )
-    self.assertResourceCalled('Execute', 'usermod -G nagios wwwrun'
-    )
-    self.assertResourceCalled('File', '/etc/nagios/command.cfg',
-                              owner='nagios',
-                              group='nagios'
-    )
-    self.assertResourceCalled('File', '/usr/share/hdp/nagios//nagios_alerts.php',
-        content = StaticFile('nagios_alerts.php'),
-    )
-    self.assertResourceCalled('File', '/etc/apache2/conf.d/hdp_mon_nagios_addons.conf',
-        content = StaticFile('hdp_mon_nagios_addons.conf'),
-    )
-    self.assertResourceCalled('File', '/var/nagios/ignore.dat',
-                              owner='nagios',
-                              group='nagios',
-                              mode=0664
-    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/python/stacks/2.0.6/configs/default.hbasedecom.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default.hbasedecom.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default.hbasedecom.json
index c7a04f1..f6e9570 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default.hbasedecom.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default.hbasedecom.json
@@ -673,9 +673,6 @@
             "c6401.ambari.apache.org", 
             "c6402.ambari.apache.org"
         ], 
-        "nagios_server_host": [
-            "c6402.ambari.apache.org"
-        ], 
         "all_ping_ports": [
             "8670", 
             "8670"

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
index 83e36ae..4ddc718 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
@@ -15,8 +15,8 @@
         "ambari_db_rca_username": "mapred", 
         "java_home": "/usr/jdk64/jdk1.7.0_45",
         "db_name": "ambari",
-        "group_list": "[\"hadoop\",\"nobody\",\"users\",\"nagios\"]",
-        "user_list": "[\"hive\",\"oozie\",\"nobody\",\"nagios\",\"ambari-qa\",\"flume\",\"hdfs\",\"storm\",\"mapred\",\"hbase\",\"tez\",\"zookeeper\",\"falcon\",\"sqoop\",\"yarn\",\"hcat\"]"
+        "group_list": "[\"hadoop\",\"nobody\",\"users\"]",
+        "user_list": "[\"hive\",\"oozie\",\"nobody\",\"ambari-qa\",\"flume\",\"hdfs\",\"storm\",\"mapred\",\"hbase\",\"tez\",\"zookeeper\",\"falcon\",\"sqoop\",\"yarn\",\"hcat\"]"
     }, 
     "commandType": "EXECUTION_COMMAND", 
     "roleParams": {}, 
@@ -509,14 +509,6 @@
             "jobhistory_heapsize": "900", 
             "mapred_log_dir_prefix": "/var/log/hadoop-mapreduce"
         },
-        "nagios-env": {
-            "hive_metastore_user_passwd": "password", 
-            "nagios_web_password": "!`\"' 1",
-            "nagios_user": "nagios", 
-            "nagios_group": "nagios", 
-            "nagios_web_login": "nagiosadmin", 
-            "nagios_contact": "user@com.ua"
-        }, 
         "tez-env": {
             "content": "\n# Tez specific configuration\nexport TEZ_CONF_DIR={{config_dir}}\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}", 
             "tez_user": "tez"
@@ -776,9 +768,6 @@
             "c6401.ambari.apache.org", 
             "c6402.ambari.apache.org"
         ], 
-        "nagios_server_host": [
-            "c6402.ambari.apache.org"
-        ], 
         "all_ping_ports": [
             "8670", 
             "8670"

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/python/stacks/2.0.6/configs/default.non_gmetad_host.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default.non_gmetad_host.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default.non_gmetad_host.json
index e4f0970..b31d4b9 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default.non_gmetad_host.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default.non_gmetad_host.json
@@ -720,9 +720,6 @@
             "c6401.ambari.apache.org", 
             "c6402.ambari.apache.org"
         ], 
-        "nagios_server_host": [
-            "c6402.ambari.apache.org"
-        ], 
         "all_ping_ports": [
             "8670", 
             "8670"

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json
index 18d9c46..c8333ac 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json
@@ -499,14 +499,6 @@
             "jobhistory_heapsize": "900", 
             "mapred_log_dir_prefix": "/var/log/hadoop-mapreduce"
         },
-        "nagios-env": {
-            "hive_metastore_user_passwd": "password", 
-            "nagios_web_password": "!`\"' 1",
-            "nagios_user": "nagios", 
-            "nagios_group": "nagios", 
-            "nagios_web_login": "nagiosadmin", 
-            "nagios_contact": "user@com.ua"
-        }, 
         "tez-env": {
             "content": "\n# Tez specific configuration\nexport TEZ_CONF_DIR={{config_dir}}\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}", 
             "tez_user": "tez"
@@ -759,9 +751,6 @@
             "c6401.ambari.apache.org", 
             "c6402.ambari.apache.org"
         ], 
-        "nagios_server_host": [
-            "c6402.ambari.apache.org"
-        ], 
         "all_ping_ports": [
             "8670", 
             "8670"

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/python/stacks/2.0.6/configs/flume_22.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/flume_22.json b/ambari-server/src/test/python/stacks/2.0.6/configs/flume_22.json
index 8ab2406..c4f44d8 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/flume_22.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/flume_22.json
@@ -15,8 +15,8 @@
         "ambari_db_rca_username": "mapred", 
         "java_home": "/usr/jdk64/jdk1.7.0_45",
         "db_name": "ambari",
-        "group_list": "[\"hadoop\",\"nobody\",\"users\",\"nagios\"]",
-        "user_list": "[\"hive\",\"oozie\",\"nobody\",\"nagios\",\"ambari-qa\",\"flume\",\"hdfs\",\"storm\",\"mapred\",\"hbase\",\"tez\",\"zookeeper\",\"falcon\",\"sqoop\",\"yarn\",\"hcat\"]"
+        "group_list": "[\"hadoop\",\"nobody\",\"users\"]",
+        "user_list": "[\"hive\",\"oozie\",\"nobody\",\"ambari-qa\",\"flume\",\"hdfs\",\"storm\",\"mapred\",\"hbase\",\"tez\",\"zookeeper\",\"falcon\",\"sqoop\",\"yarn\",\"hcat\"]"
     }, 
     "commandType": "EXECUTION_COMMAND", 
     "roleParams": {}, 
@@ -504,14 +504,6 @@
             "jobhistory_heapsize": "900", 
             "mapred_log_dir_prefix": "/var/log/hadoop-mapreduce"
         },
-        "nagios-env": {
-            "hive_metastore_user_passwd": "password", 
-            "nagios_web_password": "!`\"' 1",
-            "nagios_user": "nagios", 
-            "nagios_group": "nagios", 
-            "nagios_web_login": "nagiosadmin", 
-            "nagios_contact": "user@com.ua"
-        }, 
         "tez-env": {
             "content": "\n# Tez specific configuration\nexport TEZ_CONF_DIR={{config_dir}}\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}", 
             "tez_user": "tez"
@@ -771,9 +763,6 @@
             "c6401.ambari.apache.org", 
             "c6402.ambari.apache.org"
         ], 
-        "nagios_server_host": [
-            "c6402.ambari.apache.org"
-        ], 
         "all_ping_ports": [
             "8670", 
             "8670"

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/python/stacks/2.0.6/configs/flume_target.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/flume_target.json b/ambari-server/src/test/python/stacks/2.0.6/configs/flume_target.json
index 1d12254..0b7a3bb 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/flume_target.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/flume_target.json
@@ -233,7 +233,6 @@
             "yarn_log_dir_prefix": "/var/log/hadoop-yarn", 
             "zk_log_dir": "/var/log/zookeeper", 
             "hive_aux_jars_path": "/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar", 
-            "nagios_web_password": "!`\"' 1", 
             "tickTime": "2000", 
             "hive_ambari_database": "MySQL", 
             "falcon_user": "falcon", 
@@ -265,15 +264,12 @@
             "user_group": "hadoop", 
             "yarn_user": "yarn", 
             "gmond_user": "nobody", 
-            "nagios_web_login": "nagiosadmin", 
             "storm_user": "storm", 
-            "nagios_contact": "asd@asd.asd", 
             "hive_database": "New MySQL Database", 
             "storm_log_dir": "/var/log/storm", 
             "clientPort": "2181", 
             "oozie_derby_database": "Derby", 
             "mapred_log_dir_prefix": "/var/log/hadoop-mapreduce", 
-            "nagios_group": "nagios", 
             "hdfs_user": "hdfs", 
             "hbase_user": "hbase", 
             "oozie_database_type": "derby", 
@@ -290,7 +286,6 @@
             "hcat_user": "hcat", 
             "hadoop_heapsize": "1024", 
             "hadoop_pid_dir_prefix": "/var/run/hadoop", 
-            "nagios_user": "nagios", 
             "hbase_log_dir": "/var/log/hbase",
             "falcon_user": "falcon",
             "falcon_port": "15000",
@@ -683,9 +678,6 @@
             "c6401.ambari.apache.org", 
             "c6402.ambari.apache.org"
         ], 
-        "nagios_server_host": [
-            "c6402.ambari.apache.org"
-        ], 
         "all_ping_ports": [
             "8670", 
             "8670"

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
index fa3b505..253747a 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
@@ -104,8 +104,7 @@
             "storm_user": "storm", 
             "clientPort": "2181", 
             "yarn_log_dir_prefix": "/var/log/hadoop-yarn", 
-            "mapred_log_dir_prefix": "/var/log/hadoop-mapreduce", 
-            "nagios_group": "nagios", 
+            "mapred_log_dir_prefix": "/var/log/hadoop-mapreduce",  
             "hdfs_user": "hdfs", 
             "hbase_user": "hbase", 
             "webhcat_user": "hcat", 
@@ -118,8 +117,7 @@
             "resourcemanager_heapsize": "1024", 
             "hcat_user": "hcat", 
             "hadoop_heapsize": "1024", 
-            "hadoop_pid_dir_prefix": "/var/run/hadoop", 
-            "nagios_user": "nagios"
+            "hadoop_pid_dir_prefix": "/var/run/hadoop"
         }, 
         "capacity-scheduler": {
             "yarn.scheduler.capacity.node-locality-delay": "40", 
@@ -443,14 +441,6 @@
             "jobhistory_heapsize": "900", 
             "mapred_log_dir_prefix": "/var/log/hadoop-mapreduce"
         },
-        "nagios-env": {
-            "hive_metastore_user_passwd": "password", 
-            "nagios_web_password": "password", 
-            "nagios_user": "nagios", 
-            "nagios_group": "nagios", 
-            "nagios_web_login": "nagiosadmin", 
-            "nagios_contact": "user@com.ua"
-        }, 
         "tez-env": {
             "content": "\n# Tez specific configuration\nexport TEZ_CONF_DIR={{config_dir}}\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}", 
             "tez_user": "tez"

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json
index 15d6273..6c06f35 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json
@@ -369,11 +369,9 @@
             "hadoop_pid_dir_prefix": "/var/run/hadoop",
 			      "jobhistory_http_keytab": "/etc/security/keytabs/spnego.service.keytab",
             "resourcemanager_principal_name": "rm/_HOST", 
-            "hadoop_http_principal_name": "HTTP/_HOST",
-            "nagios_keytab_path": "/etc/security/keytabs/nagios.service.keytab", 
+            "hadoop_http_principal_name": "HTTP/_HOST", 
             "namenode_principal_name": "nn/_HOST", 
-            "namenode_keytab": "/etc/security/keytabs/nn.service.keytab", 
-            "nagios_principal_name": "nagios/c6402.ambari.apache.org@EXAMPLE.COM",  
+            "namenode_keytab": "/etc/security/keytabs/nn.service.keytab",   
             "hbase_user_keytab": "/etc/security/keytabs/hbase.headless.keytab", 
             "journalnode_keytab": "/etc/security/keytabs/jn.service.keytab", 
             "hbase_master_keytab": "/etc/security/keytabs/hbase.service.keytab", 
@@ -465,14 +463,6 @@
             "jobhistory_heapsize": "900", 
             "mapred_log_dir_prefix": "/var/log/hadoop-mapreduce"
         }, 
-        "nagios-env": {
-            "hive_metastore_user_passwd": "password", 
-            "nagios_web_password": "password", 
-            "nagios_user": "nagios", 
-            "nagios_group": "nagios", 
-            "nagios_web_login": "nagiosadmin", 
-            "nagios_contact": "user@com.ua"
-        }, 
         "tez-env": {
             "content": "\n# Tez specific configuration\nexport TEZ_CONF_DIR={{config_dir}}\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}", 
             "tez_user": "tez"

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json b/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
index dba36e5..c60227b 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
@@ -561,16 +561,6 @@
             "jobhistory_heapsize": "900", 
             "mapred_log_dir_prefix": "/var/log/hadoop-mapreduce"
         }, 
-        "nagios-env": {
-            "hive_metastore_user_passwd": "password", 
-            "nagios_web_password": "password", 
-            "nagios_user": "nagios", 
-            "nagios_group": "nagios", 
-            "nagios_web_login": "nagiosadmin", 
-            "nagios_contact": "user@com.ua",
-            "nagios_principal_name": "nagios/c6402.ambari.apache.org@EXAMPLE.COM",
-            "nagios_keytab_path": "/etc/security/keytabs/nagios.service.keytab"
-        },
         "oozie-env": {
             "oozie_derby_database": "Derby", 
             "oozie_admin_port": "11001", 
@@ -796,9 +786,6 @@
             "c6401.ambari.apache.org", 
             "c6402.ambari.apache.org"
         ], 
-        "nagios_server_host": [
-            "c6402.ambari.apache.org"
-        ], 
         "all_ping_ports": [
             "8670", 
             "8670"

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json b/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json
index 0f49ddf..a2c4fef 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json
@@ -549,16 +549,6 @@
             "jobhistory_heapsize": "900", 
             "mapred_log_dir_prefix": "/var/log/hadoop-mapreduce"
         }, 
-        "nagios-env": {
-            "hive_metastore_user_passwd": "password", 
-            "nagios_web_password": "password", 
-            "nagios_user": "nagios", 
-            "nagios_group": "nagios", 
-            "nagios_web_login": "nagiosadmin", 
-            "nagios_contact": "user@com.ua",
-            "nagios_principal_name": "nagios/c6402.ambari.apache.org@EXAMPLE.COM",
-            "nagios_keytab_path": "/etc/security/keytabs/nagios.service.keytab"
-        },
         "oozie-env": {
             "oozie_derby_database": "Derby", 
             "oozie_admin_port": "11001", 
@@ -781,9 +771,6 @@
             "c6401.ambari.apache.org", 
             "c6402.ambari.apache.org"
         ], 
-        "nagios_server_host": [
-            "c6402.ambari.apache.org"
-        ], 
         "all_ping_ports": [
             "8670", 
             "8670"

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/python/stacks/2.0.6/configs/secured_no_jce_name.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/secured_no_jce_name.json b/ambari-server/src/test/python/stacks/2.0.6/configs/secured_no_jce_name.json
index d779beb..f1eefdb 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/secured_no_jce_name.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/secured_no_jce_name.json
@@ -234,19 +234,16 @@
             "syncLimit": "5", 
             "resourcemanager_principal_name": "rm/_HOST", 
             "hadoop_http_principal_name": "HTTP/_HOST", 
-            "kinit_path_local": "/usr/bin", 
-            "nagios_keytab_path": "/etc/security/keytabs/nagios.service.keytab", 
+            "kinit_path_local": "/usr/bin",  
             "hbase_regionserver_heapsize": "1024m",
             "hbase_regionserver_xmn_max": "512",
             "hbase_regionserver_xmn_ratio": "0.2",
             "resourcemanager_http_primary_name": "HTTP", 
             "datanode_primary_name": "dn", 
             "namenode_principal_name": "nn/_HOST", 
-            "namenode_keytab": "/etc/security/keytabs/nn.service.keytab", 
-            "nagios_principal_name": "nagios/c6402.ambari.apache.org@EXAMPLE.COM", 
+            "namenode_keytab": "/etc/security/keytabs/nn.service.keytab",  
             "dfs_datanode_http_address": "1022", 
             "falcon_user": "falcon", 
-            "nagios_web_login": "nagiosadmin", 
             "hbase_user_keytab": "/etc/security/keytabs/hbase.headless.keytab", 
             "yarn_heapsize": "1024", 
             "hbase_pid_dir": "/var/run/hbase", 
@@ -256,7 +253,6 @@
             "oozie_jdbc_driver": "org.apache.derby.jdbc.EmbeddedDriver", 
             "hive_metastore_primary_name": "hive", 
             "hbase_master_keytab": "/etc/security/keytabs/hbase.service.keytab", 
-            "nagios_primary_name": "nagios", 
             "hive_database": "New MySQL Database", 
             "clientPort": "2181", 
             "oozie_derby_database": "Derby", 
@@ -264,7 +260,6 @@
             "oozie_pid_dir": "/var/run/oozie", 
             "datanode_principal_name": "dn/_HOST", 
             "hive_metastore_keytab": "/etc/security/keytabs/hive.service.keytab", 
-            "nagios_group": "nagios", 
             "hcat_user": "hcat", 
             "hadoop_heapsize": "1024", 
             "hbase_regionserver_primary_name": "hbase", 
@@ -283,7 +278,6 @@
             "yarn_nodemanager_container-executor_class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor", 
             "snamenode_keytab": "/etc/security/keytabs/nn.service.keytab", 
             "dfs_datanode_address": "1019", 
-            "nagios_server": "c6402.ambari.apache.org", 
             "ganglia_runtime_dir": "/var/run/ganglia/hdp", 
             "lzo_enabled": "true", 
             "oozie_principal_name": "oozie/c6402.ambari.apache.org", 
@@ -334,8 +328,7 @@
             "oozie_keytab": "/etc/security/keytabs/oozie.service.keytab", 
             "yarn_user": "yarn", 
             "gmond_user": "nobody", 
-            "keytab_path": "/etc/security/keytabs", 
-            "nagios_contact": "asd@asd.asd", 
+            "keytab_path": "/etc/security/keytabs",  
             "snamenode_primary_name": "nn", 
             "jobhistory_primary_name": "jhs", 
             "hdfs_user": "hdfs", 
@@ -375,13 +368,11 @@
             "nodemanager_http_principal_name": "HTTP/_HOST", 
             "hive_user": "hive", 
             "resourcemanager_http_principal_name": "HTTP/_HOST", 
-            "webHCat_http_primary_name": "HTTP", 
-            "nagios_web_password": "!`\"' 1", 
+            "webHCat_http_primary_name": "HTTP",  
             "smokeuser": "ambari-qa", 
             "hbase_master_heapsize": "1024m", 
             "kerberos_install_type": "MANUALLY_SET_KERBEROS", 
-            "hadoop_pid_dir_prefix": "/var/run/hadoop", 
-            "nagios_user": "nagios", 
+            "hadoop_pid_dir_prefix": "/var/run/hadoop",  
             "resourcemanager_heapsize": "1024", 
             "hbase_regionserver_keytab": "/etc/security/keytabs/hbase.service.keytab", 
             "hbase_principal_name": "hbase", 
@@ -796,9 +787,6 @@
             "c6401.ambari.apache.org", 
             "c6402.ambari.apache.org"
         ], 
-        "nagios_server_host": [
-            "c6402.ambari.apache.org"
-        ], 
         "all_ping_ports": [
             "8670", 
             "8670"

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py b/ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py
index 6ca1377..8b5a698 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py
@@ -45,9 +45,6 @@ class TestHookBeforeInstall(RMFTestCase):
     self.assertResourceCalled('Group', 'users',
         ignore_failures = False,
     )
-    self.assertResourceCalled('Group', 'nagios',
-        ignore_failures = False,
-    )
     self.assertResourceCalled('User', 'hive',
         gid = 'hadoop',
         ignore_failures = False,
@@ -63,11 +60,6 @@ class TestHookBeforeInstall(RMFTestCase):
         ignore_failures = False,
         groups = [u'nobody'],
     )
-    self.assertResourceCalled('User', 'nagios',
-        gid = 'nagios',
-        ignore_failures = False,
-        groups = [u'hadoop'],
-    )
     self.assertResourceCalled('User', 'ambari-qa',
         gid = 'hadoop',
         ignore_failures = False,

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/python/stacks/2.1/common/services.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/common/services.json b/ambari-server/src/test/python/stacks/2.1/common/services.json
index c4af73c..6559077 100644
--- a/ambari-server/src/test/python/stacks/2.1/common/services.json
+++ b/ambari-server/src/test/python/stacks/2.1/common/services.json
@@ -1,5 +1,5 @@
 {
-  "href" : "/api/v1/stacks/HDP/versions/2.1?fields=Versions/stack_name,Versions/stack_version,Versions/parent_stack_version,services/StackServices/service_name,services/StackServices/service_version,services/components/StackServiceComponents,services/components/dependencies,services/components/auto_deploy&services/StackServices/service_name.in(HDFS,MAPREDUCE2,YARN,TEZ,NAGIOS,GANGLIA,HIVE,HBASE,PIG,SQOOP,OOZIE,ZOOKEEPER,FALCON,STORM,FLUME)",
+  "href" : "/api/v1/stacks/HDP/versions/2.1?fields=Versions/stack_name,Versions/stack_version,Versions/parent_stack_version,services/StackServices/service_name,services/StackServices/service_version,services/components/StackServiceComponents,services/components/dependencies,services/components/auto_deploy&services/StackServices/service_name.in(HDFS,MAPREDUCE2,YARN,TEZ,GANGLIA,HIVE,HBASE,PIG,SQOOP,OOZIE,ZOOKEEPER,FALCON,STORM,FLUME)",
   "Versions" : {
     "parent_stack_version" : "2.0.6",
     "stack_name" : "HDP",
@@ -562,85 +562,6 @@
       "dependencies" : [ ]
     } ]
   }, {
-    "href" : "/api/v1/stacks/HDP/versions/2.1/services/NAGIOS",
-    "StackServices" : {
-      "service_name" : "NAGIOS",
-      "service_version" : "3.5.0",
-      "stack_name" : "HDP",
-      "stack_version" : "2.1"
-    },
-    "components" : [ {
-      "href" : "/api/v1/stacks/HDP/versions/2.1/services/NAGIOS/components/NAGIOS_SERVER",
-      "StackServiceComponents" : {
-        "cardinality" : "1",
-        "component_category" : "MASTER",
-        "component_name" : "NAGIOS_SERVER",
-        "custom_commands" : [ ],
-        "display_name" : "Nagios Server",
-        "is_client" : false,
-        "is_master" : true,
-        "service_name" : "NAGIOS",
-        "stack_name" : "HDP",
-        "stack_version" : "2.1",
-        "hostnames" : [ ]
-      },
-      "dependencies" : [ {
-        "href" : "/api/v1/stacks/HDP/versions/2.1/services/NAGIOS/components/NAGIOS_SERVER/dependencies/HCAT",
-        "Dependencies" : {
-          "component_name" : "HCAT",
-          "dependent_component_name" : "NAGIOS_SERVER",
-          "dependent_service_name" : "NAGIOS",
-          "stack_name" : "HDP",
-          "stack_version" : "2.1"
-        }
-      }, {
-        "href" : "/api/v1/stacks/HDP/versions/2.1/services/NAGIOS/components/NAGIOS_SERVER/dependencies/HDFS_CLIENT",
-        "Dependencies" : {
-          "component_name" : "HDFS_CLIENT",
-          "dependent_component_name" : "NAGIOS_SERVER",
-          "dependent_service_name" : "NAGIOS",
-          "stack_name" : "HDP",
-          "stack_version" : "2.1"
-        }
-      }, {
-        "href" : "/api/v1/stacks/HDP/versions/2.1/services/NAGIOS/components/NAGIOS_SERVER/dependencies/MAPREDUCE2_CLIENT",
-        "Dependencies" : {
-          "component_name" : "MAPREDUCE2_CLIENT",
-          "dependent_component_name" : "NAGIOS_SERVER",
-          "dependent_service_name" : "NAGIOS",
-          "stack_name" : "HDP",
-          "stack_version" : "2.1"
-        }
-      }, {
-        "href" : "/api/v1/stacks/HDP/versions/2.1/services/NAGIOS/components/NAGIOS_SERVER/dependencies/OOZIE_CLIENT",
-        "Dependencies" : {
-          "component_name" : "OOZIE_CLIENT",
-          "dependent_component_name" : "NAGIOS_SERVER",
-          "dependent_service_name" : "NAGIOS",
-          "stack_name" : "HDP",
-          "stack_version" : "2.1"
-        }
-      }, {
-        "href" : "/api/v1/stacks/HDP/versions/2.1/services/NAGIOS/components/NAGIOS_SERVER/dependencies/TEZ_CLIENT",
-        "Dependencies" : {
-          "component_name" : "TEZ_CLIENT",
-          "dependent_component_name" : "NAGIOS_SERVER",
-          "dependent_service_name" : "NAGIOS",
-          "stack_name" : "HDP",
-          "stack_version" : "2.1"
-        }
-      }, {
-        "href" : "/api/v1/stacks/HDP/versions/2.1/services/NAGIOS/components/NAGIOS_SERVER/dependencies/YARN_CLIENT",
-        "Dependencies" : {
-          "component_name" : "YARN_CLIENT",
-          "dependent_component_name" : "NAGIOS_SERVER",
-          "dependent_service_name" : "NAGIOS",
-          "stack_name" : "HDP",
-          "stack_version" : "2.1"
-        }
-      } ]
-    } ]
-  }, {
     "href" : "/api/v1/stacks/HDP/versions/2.1/services/OOZIE",
     "StackServices" : {
       "service_name" : "OOZIE",

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py
index 1dd7211..64b3170 100644
--- a/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py
@@ -115,7 +115,7 @@ class TestHDP21StackAdvisor(TestCase):
     hosts = json.load(open(os.path.join(self.testDirectory, 'hosts.json')))
 
     expected_layout = [
-      [u'NAMENODE', u'NAGIOS_SERVER', u'GANGLIA_SERVER', u'ZOOKEEPER_SERVER', u'DRPC_SERVER', u'NIMBUS', u'STORM_REST_API', u'STORM_UI_SERVER', u'MYSQL_SERVER'],
+      [u'NAMENODE', u'GANGLIA_SERVER', u'ZOOKEEPER_SERVER', u'DRPC_SERVER', u'NIMBUS', u'STORM_REST_API', u'STORM_UI_SERVER', u'MYSQL_SERVER'],
       [u'SECONDARY_NAMENODE', u'HISTORYSERVER', u'APP_TIMELINE_SERVER', u'RESOURCEMANAGER', u'ZOOKEEPER_SERVER'],
       [u'HIVE_METASTORE', u'HIVE_SERVER', u'WEBHCAT_SERVER', u'HBASE_MASTER', u'OOZIE_SERVER', u'ZOOKEEPER_SERVER', u'FALCON_SERVER']
     ]

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/python/stacks/2.1/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/default.json b/ambari-server/src/test/python/stacks/2.1/configs/default.json
index 3ff06a5..f291201 100644
--- a/ambari-server/src/test/python/stacks/2.1/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.1/configs/default.json
@@ -273,7 +273,6 @@
             "yarn_log_dir_prefix": "/var/log/hadoop-yarn",
             "zk_log_dir": "/var/log/zookeeper",
             "hive_aux_jars_path": "/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar",
-            "nagios_web_password": "asd",
             "tickTime": "2000",
             "hive_ambari_database": "MySQL",
             "falcon_user": "falcon",
@@ -305,15 +304,12 @@
             "user_group": "hadoop",
             "yarn_user": "yarn",
             "gmond_user": "nobody",
-            "nagios_web_login": "nagiosadmin",
             "storm_user": "storm",
-            "nagios_contact": "asd@asd.asd",
             "hive_database": "New MySQL Database",
             "storm_log_dir": "/var/log/storm",
             "clientPort": "2181",
             "oozie_derby_database": "Derby",
             "mapred_log_dir_prefix": "/var/log/hadoop-mapreduce",
-            "nagios_group": "nagios",
             "hdfs_user": "hdfs",
             "hbase_user": "hbase",
             "oozie_database_type": "derby",
@@ -330,7 +326,6 @@
             "hcat_user": "hcat",
             "hadoop_heapsize": "1024",
             "hadoop_pid_dir_prefix": "/var/run/hadoop",
-            "nagios_user": "nagios",
             "hbase_log_dir": "/var/log/hbase",
             "falcon_user": "falcon",
             "falcon_port": "15000",
@@ -582,14 +577,6 @@
             "hbase_regionserver_xmn_ratio": "0.2",
             "hbase_log_dir": "/var/log/hbase"
         },
-        "nagios-env": {
-            "hive_metastore_user_passwd": "password", 
-            "nagios_web_password": "!`\"' 1",
-            "nagios_user": "nagios", 
-            "nagios_group": "nagios", 
-            "nagios_web_login": "nagiosadmin", 
-            "nagios_contact": "user@com.ua"
-        }, 
         "ganglia-env": {
             "gmond_user": "nobody", 
             "ganglia_runtime_dir": "/var/run/ganglia/hdp", 
@@ -853,9 +840,6 @@
             "c6401.ambari.apache.org", 
             "c6402.ambari.apache.org"
         ], 
-        "nagios_server_host": [
-            "c6402.ambari.apache.org"
-        ], 
         "all_ping_ports": [
             "8670", 
             "8670"

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/python/stacks/2.1/configs/secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/secured.json b/ambari-server/src/test/python/stacks/2.1/configs/secured.json
index be88df4..3eedb77 100644
--- a/ambari-server/src/test/python/stacks/2.1/configs/secured.json
+++ b/ambari-server/src/test/python/stacks/2.1/configs/secured.json
@@ -564,16 +564,6 @@
             "hbase_log_dir": "/var/log/hbase",
             "hbase_user_keytab": "/etc/security/keytabs/hbase.headless.keytab"
         },
-        "nagios-env": {
-            "hive_metastore_user_passwd": "password", 
-            "nagios_web_password": "!`\"' 1",
-            "nagios_user": "nagios", 
-            "nagios_group": "nagios", 
-            "nagios_web_login": "nagiosadmin", 
-            "nagios_contact": "user@com.ua",
-            "nagios_principal_name": "nagios/c6402.ambari.apache.org@EXAMPLE.COM",
-            "nagios_keytab_path": "/etc/security/keytabs/nagios.service.keytab"
-        }, 
         "ganglia-env": {
             "gmond_user": "nobody", 
             "ganglia_runtime_dir": "/var/run/ganglia/hdp", 
@@ -834,9 +824,6 @@
             "c6401.ambari.apache.org", 
             "c6402.ambari.apache.org"
         ], 
-        "nagios_server_host": [
-            "c6402.ambari.apache.org"
-        ], 
         "all_ping_ports": [
             "8670", 
             "8670"

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/python/stacks/2.2/common/1/services.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/common/1/services.json b/ambari-server/src/test/python/stacks/2.2/common/1/services.json
index c47f139..5815a9f 100644
--- a/ambari-server/src/test/python/stacks/2.2/common/1/services.json
+++ b/ambari-server/src/test/python/stacks/2.2/common/1/services.json
@@ -1,5 +1,5 @@
 {
-  "href" : "/api/v1/stacks/HDP/versions/2.2?fields=Versions/stack_name,Versions/stack_version,Versions/parent_stack_version,services/StackServices/service_name,services/StackServices/service_version,services/components/StackServiceComponents,services/components/dependencies,services/components/auto_deploy&services/StackServices/service_name.in(HDFS,MAPREDUCE2,YARN,TEZ,NAGIOS,GANGLIA,HIVE,HBASE,PIG,SQOOP,OOZIE,ZOOKEEPER,FALCON,STORM,FLUME,SLIDER,KNOX,KAFKA)",
+  "href" : "/api/v1/stacks/HDP/versions/2.2?fields=Versions/stack_name,Versions/stack_version,Versions/parent_stack_version,services/StackServices/service_name,services/StackServices/service_version,services/components/StackServiceComponents,services/components/dependencies,services/components/auto_deploy&services/StackServices/service_name.in(HDFS,MAPREDUCE2,YARN,TEZ,GANGLIA,HIVE,HBASE,PIG,SQOOP,OOZIE,ZOOKEEPER,FALCON,STORM,FLUME,SLIDER,KNOX,KAFKA)",
   "Versions" : {
     "parent_stack_version" : "2.1",
     "stack_name" : "HDP",
@@ -621,85 +621,6 @@
       "dependencies" : [ ]
     } ]
   }, {
-    "href" : "/api/v1/stacks/HDP/versions/2.2/services/NAGIOS",
-    "StackServices" : {
-      "service_name" : "NAGIOS",
-      "service_version" : "3.5.0",
-      "stack_name" : "HDP",
-      "stack_version" : "2.2"
-    },
-    "components" : [ {
-      "href" : "/api/v1/stacks/HDP/versions/2.2/services/NAGIOS/components/NAGIOS_SERVER",
-      "StackServiceComponents" : {
-        "cardinality" : "1",
-        "component_category" : "MASTER",
-        "component_name" : "NAGIOS_SERVER",
-        "custom_commands" : [ ],
-        "display_name" : "Nagios Server",
-        "is_client" : false,
-        "is_master" : true,
-        "service_name" : "NAGIOS",
-        "stack_name" : "HDP",
-        "stack_version" : "2.2",
-        "hostnames" : [ ]
-      },
-      "dependencies" : [ {
-        "href" : "/api/v1/stacks/HDP/versions/2.2/services/NAGIOS/components/NAGIOS_SERVER/dependencies/HCAT",
-        "Dependencies" : {
-          "component_name" : "HCAT",
-          "dependent_component_name" : "NAGIOS_SERVER",
-          "dependent_service_name" : "NAGIOS",
-          "stack_name" : "HDP",
-          "stack_version" : "2.2"
-        }
-      }, {
-        "href" : "/api/v1/stacks/HDP/versions/2.2/services/NAGIOS/components/NAGIOS_SERVER/dependencies/HDFS_CLIENT",
-        "Dependencies" : {
-          "component_name" : "HDFS_CLIENT",
-          "dependent_component_name" : "NAGIOS_SERVER",
-          "dependent_service_name" : "NAGIOS",
-          "stack_name" : "HDP",
-          "stack_version" : "2.2"
-        }
-      }, {
-        "href" : "/api/v1/stacks/HDP/versions/2.2/services/NAGIOS/components/NAGIOS_SERVER/dependencies/MAPREDUCE2_CLIENT",
-        "Dependencies" : {
-          "component_name" : "MAPREDUCE2_CLIENT",
-          "dependent_component_name" : "NAGIOS_SERVER",
-          "dependent_service_name" : "NAGIOS",
-          "stack_name" : "HDP",
-          "stack_version" : "2.2"
-        }
-      }, {
-        "href" : "/api/v1/stacks/HDP/versions/2.2/services/NAGIOS/components/NAGIOS_SERVER/dependencies/OOZIE_CLIENT",
-        "Dependencies" : {
-          "component_name" : "OOZIE_CLIENT",
-          "dependent_component_name" : "NAGIOS_SERVER",
-          "dependent_service_name" : "NAGIOS",
-          "stack_name" : "HDP",
-          "stack_version" : "2.2"
-        }
-      }, {
-        "href" : "/api/v1/stacks/HDP/versions/2.2/services/NAGIOS/components/NAGIOS_SERVER/dependencies/TEZ_CLIENT",
-        "Dependencies" : {
-          "component_name" : "TEZ_CLIENT",
-          "dependent_component_name" : "NAGIOS_SERVER",
-          "dependent_service_name" : "NAGIOS",
-          "stack_name" : "HDP",
-          "stack_version" : "2.2"
-        }
-      }, {
-        "href" : "/api/v1/stacks/HDP/versions/2.2/services/NAGIOS/components/NAGIOS_SERVER/dependencies/YARN_CLIENT",
-        "Dependencies" : {
-          "component_name" : "YARN_CLIENT",
-          "dependent_component_name" : "NAGIOS_SERVER",
-          "dependent_service_name" : "NAGIOS",
-          "stack_name" : "HDP",
-          "stack_version" : "2.2"
-        }
-      } ]
-    } ]
-  }, {
     "href" : "/api/v1/stacks/HDP/versions/2.2/services/OOZIE",
     "StackServices" : {
       "service_name" : "OOZIE",

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/python/stacks/2.2/common/2/services.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/common/2/services.json b/ambari-server/src/test/python/stacks/2.2/common/2/services.json
index c47f139..6e6678e 100644
--- a/ambari-server/src/test/python/stacks/2.2/common/2/services.json
+++ b/ambari-server/src/test/python/stacks/2.2/common/2/services.json
@@ -1,5 +1,5 @@
 {
-  "href" : "/api/v1/stacks/HDP/versions/2.2?fields=Versions/stack_name,Versions/stack_version,Versions/parent_stack_version,services/StackServices/service_name,services/StackServices/service_version,services/components/StackServiceComponents,services/components/dependencies,services/components/auto_deploy&services/StackServices/service_name.in(HDFS,MAPREDUCE2,YARN,TEZ,NAGIOS,GANGLIA,HIVE,HBASE,PIG,SQOOP,OOZIE,ZOOKEEPER,FALCON,STORM,FLUME,SLIDER,KNOX,KAFKA)",
+  "href" : "/api/v1/stacks/HDP/versions/2.2?fields=Versions/stack_name,Versions/stack_version,Versions/parent_stack_version,services/StackServices/service_name,services/StackServices/service_version,services/components/StackServiceComponents,services/components/dependencies,services/components/auto_deploy&services/StackServices/service_name.in(HDFS,MAPREDUCE2,YARN,TEZ,GANGLIA,HIVE,HBASE,PIG,SQOOP,OOZIE,ZOOKEEPER,FALCON,STORM,FLUME,SLIDER,KNOX,KAFKA)",
   "Versions" : {
     "parent_stack_version" : "2.1",
     "stack_name" : "HDP",
@@ -620,86 +620,8 @@
       },
       "dependencies" : [ ]
     } ]
-  }, {
-    "href" : "/api/v1/stacks/HDP/versions/2.2/services/NAGIOS",
-    "StackServices" : {
-      "service_name" : "NAGIOS",
-      "service_version" : "3.5.0",
-      "stack_name" : "HDP",
-      "stack_version" : "2.2"
-    },
-    "components" : [ {
-      "href" : "/api/v1/stacks/HDP/versions/2.2/services/NAGIOS/components/NAGIOS_SERVER",
-      "StackServiceComponents" : {
-        "cardinality" : "1",
-        "component_category" : "MASTER",
-        "component_name" : "NAGIOS_SERVER",
-        "custom_commands" : [ ],
-        "display_name" : "Nagios Server",
-        "is_client" : false,
-        "is_master" : true,
-        "service_name" : "NAGIOS",
-        "stack_name" : "HDP",
-        "stack_version" : "2.2",
-        "hostnames" : [ ]
-      },
-      "dependencies" : [ {
-        "href" : "/api/v1/stacks/HDP/versions/2.2/services/NAGIOS/components/NAGIOS_SERVER/dependencies/HCAT",
-        "Dependencies" : {
-          "component_name" : "HCAT",
-          "dependent_component_name" : "NAGIOS_SERVER",
-          "dependent_service_name" : "NAGIOS",
-          "stack_name" : "HDP",
-          "stack_version" : "2.2"
-        }
-      }, {
-        "href" : "/api/v1/stacks/HDP/versions/2.2/services/NAGIOS/components/NAGIOS_SERVER/dependencies/HDFS_CLIENT",
-        "Dependencies" : {
-          "component_name" : "HDFS_CLIENT",
-          "dependent_component_name" : "NAGIOS_SERVER",
-          "dependent_service_name" : "NAGIOS",
-          "stack_name" : "HDP",
-          "stack_version" : "2.2"
-        }
-      }, {
-        "href" : "/api/v1/stacks/HDP/versions/2.2/services/NAGIOS/components/NAGIOS_SERVER/dependencies/MAPREDUCE2_CLIENT",
-        "Dependencies" : {
-          "component_name" : "MAPREDUCE2_CLIENT",
-          "dependent_component_name" : "NAGIOS_SERVER",
-          "dependent_service_name" : "NAGIOS",
-          "stack_name" : "HDP",
-          "stack_version" : "2.2"
-        }
-      }, {
-        "href" : "/api/v1/stacks/HDP/versions/2.2/services/NAGIOS/components/NAGIOS_SERVER/dependencies/OOZIE_CLIENT",
-        "Dependencies" : {
-          "component_name" : "OOZIE_CLIENT",
-          "dependent_component_name" : "NAGIOS_SERVER",
-          "dependent_service_name" : "NAGIOS",
-          "stack_name" : "HDP",
-          "stack_version" : "2.2"
-        }
-      }, {
-        "href" : "/api/v1/stacks/HDP/versions/2.2/services/NAGIOS/components/NAGIOS_SERVER/dependencies/TEZ_CLIENT",
-        "Dependencies" : {
-          "component_name" : "TEZ_CLIENT",
-          "dependent_component_name" : "NAGIOS_SERVER",
-          "dependent_service_name" : "NAGIOS",
-          "stack_name" : "HDP",
-          "stack_version" : "2.2"
-        }
-      }, {
-        "href" : "/api/v1/stacks/HDP/versions/2.2/services/NAGIOS/components/NAGIOS_SERVER/dependencies/YARN_CLIENT",
-        "Dependencies" : {
-          "component_name" : "YARN_CLIENT",
-          "dependent_component_name" : "NAGIOS_SERVER",
-          "dependent_service_name" : "NAGIOS",
-          "stack_name" : "HDP",
-          "stack_version" : "2.2"
-        }
-      } ]
-    } ]
-  }, {
+  },
+  {
     "href" : "/api/v1/stacks/HDP/versions/2.2/services/OOZIE",
     "StackServices" : {
       "service_name" : "OOZIE",

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/resources/TestAmbaryServer.samples/multinode-default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/TestAmbaryServer.samples/multinode-default.json b/ambari-server/src/test/resources/TestAmbaryServer.samples/multinode-default.json
index ce88922..a9a44da 100644
--- a/ambari-server/src/test/resources/TestAmbaryServer.samples/multinode-default.json
+++ b/ambari-server/src/test/resources/TestAmbaryServer.samples/multinode-default.json
@@ -1 +1 @@
-{"host_groups":[{"name":"master_1","components":[{"name":"NAMENODE"},{"name":"ZOOKEEPER_SERVER"},{"name":"HBASE_MASTER"},{"name":"GANGLIA_SERVER"},{"name":"HDFS_CLIENT"},{"name":"YARN_CLIENT"},{"name":"HCAT"},{"name":"GANGLIA_MONITOR"}],"cardinality":"1"},{"name":"master_2","components":[{"name":"ZOOKEEPER_CLIENT"},{"name":"HISTORYSERVER"},{"name":"HIVE_SERVER"},{"name":"SECONDARY_NAMENODE"},{"name":"HIVE_METASTORE"},{"name":"HDFS_CLIENT"},{"name":"HIVE_CLIENT"},{"name":"YARN_CLIENT"},{"name":"MYSQL_SERVER"},{"name":"GANGLIA_MONITOR"},{"name":"WEBHCAT_SERVER"}],"cardinality":"1"},{"name":"master_3","components":[{"name":"RESOURCEMANAGER"},{"name":"ZOOKEEPER_SERVER"},{"name":"GANGLIA_MONITOR"}],"cardinality":"1"},{"name":"master_4","components":[{"name":"OOZIE_SERVER"},{"name":"ZOOKEEPER_SERVER"},{"name":"GANGLIA_MONITOR"}],"cardinality":"1"},{"name":"slave","components":[{"name":"HBASE_REGIONSERVER"},{"name":"NODEMANAGER"},{"name":"DATANODE"},{"name":"GANGLIA_MONITOR"}],"cardinality
 ":"${slavesCount}"},{"name":"gateway","components":[{"name":"AMBARI_SERVER"},{"name":"NAGIOS_SERVER"},{"name":"GANGLIA_SERVER"},{"name":"ZOOKEEPER_CLIENT"},{"name":"PIG"},{"name":"OOZIE_CLIENT"},{"name":"HBASE_CLIENT"},{"name":"HCAT"},{"name":"SQOOP"},{"name":"HDFS_CLIENT"},{"name":"HIVE_CLIENT"},{"name":"YARN_CLIENT"},{"name":"MAPREDUCE2_CLIENT"},{"name":"GANGLIA_MONITOR"}],"cardinality":"1"}],"Blueprints":{"blueprint_name":"blueprint-multinode-default","stack_name":"HDP","stack_version":"2.1"}}
\ No newline at end of file
+{"host_groups":[{"name":"master_1","components":[{"name":"NAMENODE"},{"name":"ZOOKEEPER_SERVER"},{"name":"HBASE_MASTER"},{"name":"GANGLIA_SERVER"},{"name":"HDFS_CLIENT"},{"name":"YARN_CLIENT"},{"name":"HCAT"},{"name":"GANGLIA_MONITOR"}],"cardinality":"1"},{"name":"master_2","components":[{"name":"ZOOKEEPER_CLIENT"},{"name":"HISTORYSERVER"},{"name":"HIVE_SERVER"},{"name":"SECONDARY_NAMENODE"},{"name":"HIVE_METASTORE"},{"name":"HDFS_CLIENT"},{"name":"HIVE_CLIENT"},{"name":"YARN_CLIENT"},{"name":"MYSQL_SERVER"},{"name":"GANGLIA_MONITOR"},{"name":"WEBHCAT_SERVER"}],"cardinality":"1"},{"name":"master_3","components":[{"name":"RESOURCEMANAGER"},{"name":"ZOOKEEPER_SERVER"},{"name":"GANGLIA_MONITOR"}],"cardinality":"1"},{"name":"master_4","components":[{"name":"OOZIE_SERVER"},{"name":"ZOOKEEPER_SERVER"},{"name":"GANGLIA_MONITOR"}],"cardinality":"1"},{"name":"slave","components":[{"name":"HBASE_REGIONSERVER"},{"name":"NODEMANAGER"},{"name":"DATANODE"},{"name":"GANGLIA_MONITOR"}],"cardinality
 ":"${slavesCount}"},{"name":"gateway","components":[{"name":"AMBARI_SERVER"},{"name":"GANGLIA_SERVER"},{"name":"ZOOKEEPER_CLIENT"},{"name":"PIG"},{"name":"OOZIE_CLIENT"},{"name":"HBASE_CLIENT"},{"name":"HCAT"},{"name":"SQOOP"},{"name":"HDFS_CLIENT"},{"name":"HIVE_CLIENT"},{"name":"YARN_CLIENT"},{"name":"MAPREDUCE2_CLIENT"},{"name":"GANGLIA_MONITOR"}],"cardinality":"1"}],"Blueprints":{"blueprint_name":"blueprint-multinode-default","stack_name":"HDP","stack_version":"2.1"}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/test/resources/api_testscripts/curl-addnagios.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/api_testscripts/curl-addnagios.sh b/ambari-server/src/test/resources/api_testscripts/curl-addnagios.sh
deleted file mode 100644
index 59e56eb..0000000
--- a/ambari-server/src/test/resources/api_testscripts/curl-addnagios.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-curl -i -X POST http://localhost:8080/api/clusters/c1/services/NAGIOS
-curl -i -X POST http://localhost:8080/api/clusters/c1/services/NAGIOS/components/NAGIOS_SERVER
-curl -i -X POST http://localhost:8080/api/clusters/c1/hosts/localhost.localdomain/host_components/NAGIOS_SERVER
-curl -i -X POST -d '{"type": "nagios-global", "tag": "version1", "properties" : { "nagios_web_login" : "nagiosadmin", "nagios_web_password" : "password", "nagios_contact": "a\u0040b.c" }}' http://localhost:8080/api/clusters/c1/configurations
-curl -i -X PUT -d '{"config": {"nagios-global": "version1" }}'  http://localhost:8080/api/clusters/c1/services/NAGIOS
-curl -i -X PUT  -d '{"ServiceInfo": {"state" : "INSTALLED"}}' http://localhost:8080/api/clusters/c1/services/NAGIOS/


[09/17] ambari git commit: AMBARI-8276 - Alerts: Remove Nagios Service From The Stack (jonathanhurley)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/mm_wrapper.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/mm_wrapper.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/mm_wrapper.py
deleted file mode 100644
index 8923e6c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/mm_wrapper.py
+++ /dev/null
@@ -1,335 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-import sys
-import subprocess
-import os
-
-N_SGN = 'NAGIOS_SERVICEGROUPNAME'
-N_SD = 'NAGIOS__SERVICEHOST_COMPONENT'
-N_HOST = 'NAGIOS_HOSTNAME'
-
-LIST_SEPARATOR = "--"
-HOSTNAME_PLACEHOLDER = "^^"
-IGNORE_DAT_FILE = "/var/nagios/ignore.dat"
-
-# Mode constants
-OR = 0
-AND = 1
-ENV_ONLY = 2
-FILTER_MM = 3
-LEGACY_CHECK_WRAPPER = 4
-MODES = ['or', 'and', 'env_only', 'filter_mm', 'legacy_check_wrapper']
-
-
-def ignored_host_list(service, component):
-  """
-  :param service: current service
-  :param component: current component
-  :return: all hosts where specified host component is in ignored state
-  """
-  def str_norm(s):
-    return s.strip().upper()
-
-  result = []
-
-  try:
-    with open(IGNORE_DAT_FILE, 'r') as f:
-      lines = filter(None, f.read().split(os.linesep))
-  except IOError:
-    return result
-
-  if lines:
-    for l in lines:
-      tokens = l.split(' ')
-      if len(tokens) == 3 and str_norm(tokens[1]) == str_norm(service)\
-                          and str_norm(tokens[2]) == str_norm(component):
-        result.append(tokens[0])
-  return result
-
-
-def get_real_service():
-  try:
-    service = os.environ[N_SGN].strip().upper()  # e.g. 'HBASE'
-  except KeyError:
-    service = ''
-  return service
-
-
-def get_real_component():
-  try:
-    comp_name = os.environ[N_SD].strip()
-  except KeyError:
-    comp_name = ''
-  mapping = {
-    'HBASEMASTER': 'HBASE_MASTER',
-    'REGIONSERVER': 'HBASE_REGIONSERVER',
-    'JOBHISTORY': 'HISTORYSERVER',
-    'HIVE-METASTORE': 'HIVE_METASTORE',
-    'HIVE-SERVER': 'HIVE_SERVER',
-    'FLUME': 'FLUME_HANDLER',
-    'HUE': 'HUE_SERVER',
-    'WEBHCAT': 'WEBHCAT_SERVER',
-  }
-  if comp_name in mapping:
-    comp_name = mapping.get(comp_name)
-  return comp_name
-
-
-def check_output(*popenargs, **kwargs):
-  """
-  Imitate subprocess.check_output() for python 2.6
-  """
-  process = subprocess.Popen(stdout=subprocess.PIPE, stderr=subprocess.PIPE,
-                             *popenargs, **kwargs)
-  output, unused_err = process.communicate()
-  retcode = process.poll()
-  if retcode:
-    cmd = kwargs.get("args")
-    if cmd is None:
-      cmd = popenargs[0]
-    err = subprocess.CalledProcessError(retcode, cmd)
-    # Monkey-patching for python 2.6
-    err.output = output
-    raise err
-  return output
-
-
-def print_usage():
-  """
-  Prints usage and exits with a non-zero exit code
-  """
-  print "Usage: mm_wrapper.py MODE HOST1 HOST2 .. HOSTN %s command arg1 arg2 .. argN" % LIST_SEPARATOR
-  print "MODE is one of the following: or, and, env_only, filter_mm, legacy_check_wrapper"
-  print "%s is a separator between list of hostnames and command with args" % LIST_SEPARATOR
-  print "%s is used as a hostname placeholder at command args" % HOSTNAME_PLACEHOLDER
-  print "Also script provides $MM_HOSTS shell variable to commands"
-  print "NOTE: Script makes use of Nagios-populated env vars %s and %s" % (N_SGN, N_SD)
-  print "For more info, please see docstrings at %s" % os.path.realpath(__file__)
-  sys.exit(1)
-
-
-def parse_args(args):
-  # ToDo: re-organize params parsing, possibly use standard python class for that?
-  if not args or not LIST_SEPARATOR in args or args[0] not in MODES:
-    print_usage()
-  else:
-    mode = MODES.index(args[0])  # identify operation mode
-    args = args[1:]  # Shift args left
-    hostnames = []
-    command_line = []
-    # Parse command line args
-    passed_separator = False  # True if met LIST_SEPARATOR
-    for arg in args:
-      if not passed_separator:
-        if arg != LIST_SEPARATOR:
-          #check if was passed list of hosts instead of one
-          if ',' in arg:
-            hostnames += arg.split(',')
-          else:
-            hostnames.append(arg)
-        else:
-          passed_separator = True
-      else:
-        if arg != LIST_SEPARATOR:
-          command_line.append(arg)
-        else:  # Something definitely goes wrong
-          print "Could not parse arguments: " \
-                "There is more than one %s argument." % LIST_SEPARATOR
-          print_usage()
-
-    if not command_line:
-      print "No command provided."
-      print_usage()
-    return mode, hostnames, command_line
-
-
-def do_work(mode, hostnames, command_line):
-  # Execute commands
-  ignored_hosts = ignored_host_list(get_real_service(), get_real_component())
-  empty_check_result = {
-    'message': 'No checks have been run (no hostnames provided)',
-    'retcode': -1,
-    'real_retcode': None
-  }
-  custom_env = os.environ.copy()
-  if ignored_hosts:
-    custom_env['MM_HOSTS'] = \
-      reduce(lambda a, b: "%s %s" % (a, b), ignored_hosts)
-  if mode == OR:
-    check_result = work_in_or_mode(hostnames, ignored_hosts, command_line, custom_env, empty_check_result)
-  elif mode == AND:
-    check_result = work_in_and_mode(hostnames, ignored_hosts, command_line, custom_env, empty_check_result)
-  elif mode == ENV_ONLY:
-    check_result = work_in_env_only_mode(hostnames, command_line, custom_env)
-  elif mode == FILTER_MM:
-    check_result = work_in_filter_mm_mode(hostnames, ignored_hosts, command_line, custom_env, empty_check_result)
-  else:  # mode == LEGACY_CHECK_WRAPPER:
-    check_result = work_in_legacy_check_wrapper_mode(ignored_hosts, command_line, custom_env)
-  # Build the final output
-  final_output = []
-  output = check_result.get('message')
-  if output is not None:
-    for string in output.splitlines():
-      final_output.append(string.strip())
-  real_retcode = check_result.get('real_retcode')
-  if real_retcode:
-    # This string is used at check_aggregate.php when aggregating alerts
-    final_output.append("AMBARIPASSIVE=%s" % real_retcode)
-  return final_output, check_result.get('retcode')
-
-
-def work_in_or_mode(hostnames, ignored_hosts, command_line, custom_env, empty_check_result):
-  check_result = empty_check_result
-  for hostname in hostnames:
-    concrete_command_line = map(  # Substitute hostname where needed
-                                  lambda x: hostname if x == HOSTNAME_PLACEHOLDER else x,
-                                  command_line)
-    try:
-      returncode = 0
-      real_retcode = None
-      message = check_output(concrete_command_line, env=custom_env)
-    except subprocess.CalledProcessError, e:
-      if hostname not in ignored_hosts:
-        returncode = e.returncode
-      else:  # Host is in MM
-        real_retcode = e.returncode
-      message = e.output
-    really_positive_result = hostname not in ignored_hosts and returncode == 0
-    if check_result.get('retcode') <= returncode or really_positive_result:
-      check_result = {
-        'message': message,
-        'retcode': returncode,
-        'real_retcode': real_retcode  # Real (not suppressed) program retcode
-      }
-    if really_positive_result:
-      break  # Exit on first real success
-  return check_result
-
-
-def work_in_and_mode(hostnames, ignored_hosts, command_line, custom_env, empty_check_result):
-  check_result = empty_check_result
-  for hostname in hostnames:
-    concrete_command_line = map(  # Substitute hostname where needed
-                                  lambda x: hostname if x == HOSTNAME_PLACEHOLDER else x,
-                                  command_line)
-    try:
-      returncode = 0
-      real_retcode = None
-      message = check_output(concrete_command_line, env=custom_env)
-    except subprocess.CalledProcessError, e:
-      if hostname not in ignored_hosts:
-        returncode = e.returncode
-      else:
-        real_retcode = e.returncode
-      message = e.output
-    if check_result.get('retcode') <= returncode:
-      check_result = {
-        'message': message,
-        'retcode': returncode,
-        'real_retcode': real_retcode  # Real (not suppressed) program retcode
-      }
-  return check_result
-
-
-def work_in_env_only_mode(hostnames, command_line, custom_env):
-  concrete_command_line = []
-  for item in command_line:
-    if item == HOSTNAME_PLACEHOLDER:
-      concrete_command_line.extend(hostnames)
-    else:
-      concrete_command_line.append(item)
-  try:
-    returncode = 0
-    message = check_output(concrete_command_line, env=custom_env)
-  except subprocess.CalledProcessError, e:
-    returncode = e.returncode
-    message = e.output
-  check_result = {
-    'message': message,
-    'retcode': returncode,
-    'real_retcode': None  # Real (not suppressed) program retcode
-  }
-  return check_result
-
-
-def work_in_filter_mm_mode(hostnames, ignored_hosts, command_line, custom_env, empty_check_result):
-  not_mm_hosts = [hostname for hostname in hostnames if hostname not in ignored_hosts]
-  if not not_mm_hosts:  # All hosts have been filtered
-    return empty_check_result
-  else:
-    return work_in_env_only_mode(not_mm_hosts, command_line, custom_env)
-
-
-def work_in_legacy_check_wrapper_mode(ignored_hosts, command_line, custom_env):
-  host = os.environ[N_HOST]
-  result = work_in_env_only_mode([host], command_line, custom_env)
-  real_retcode = result['retcode']
-  if host in ignored_hosts and real_retcode != 0:  # Ignore fail
-    result['retcode'] = 0
-    result['real_retcode'] = real_retcode
-  return result
-
-
-def main():
-  """
-  This script allows to run nagios service check commands for host components
-  located at different hosts.
-  Also script passes to every command a $MM_HOSTS shell variable with a list of
-  hosts that are in MM
-
-  or mode: return 0 exit code if at least one service check succeeds.
-  Command exits on a first success.
-  Failures for host components that are in MM are suppressed (return code
-  is set to 0).
-  If command fails for all provided hostnames, script returns alert with the
-  greatest exit code value.
-
-  and mode:
-  Perform checks of all host components (effectively ignoring negative results
-  for MM components). If service check is successful for all hosts, script
-  also returns zero exit code. Otherwise alert with the greatest exit code is
-  returned.
-
-  env_only mode:
-  Pass list of all hosts to command and run it once. The only role of
-  mm_wrapper script in this mode is to provide properly initialized
-  $MM_HOSTS env variable to command being run. All duties of ignoring failures
-  of MM host components are delegated to a command being run.
-
-  filter_mm
-  Similar to env_only mode. The only difference is that hostnames for
-  host components that are in MM are filtered (not passed to command at all)
-
-  legacy_check_wrapper
-  Designed as a drop-in replacement for check_wrapper.sh . It reads $NAGIOS_HOSTNAME
-  env var and ignores check results if host component on this host is in MM.
-  When host subtitution symbol is encountered, hostname defined by $NAGIOS_HOSTNAME
-  is substituted,
-  """
-  args = sys.argv[1:]  # Shift args left
-  mode, hostnames, command_line = parse_args(args)
-  output, ret_code = do_work(mode, hostnames, command_line)
-  for line in output:
-    print line
-  sys.exit(ret_code)
-
-
-if __name__ == "__main__":
-  main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/nagios_alerts.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/nagios_alerts.php b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/nagios_alerts.php
deleted file mode 100644
index c7aa517..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/nagios_alerts.php
+++ /dev/null
@@ -1,515 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Constants. */
-define("HDP_MON_RESPONSE_OPTION_KEY__PROPERTIES", "Properties");
-define("HDP_MON_RESPONSE_OPTION_KEY__TYPE", "Type");
-
-define("HDP_MON_RESPONSE_OPTION_VALUE__PROPERTIES_UNCACHEABLE", "Uncacheable");
-define("HDP_MON_RESPONSE_OPTION_VALUE__TYPE_JSON", "JSON");
-define("HDP_MON_RESPONSE_OPTION_VALUE__TYPE_JAVASCRIPT", "JAVASCRIPT");
-
-define("HDP_MON_QUERY_ARG__JSONP", "jsonp");
-
-/** Spits out appropriate response headers, as per the options passed in. */
-function hdp_mon_generate_response_headers( $response_options )
-{
-  if( $response_options[HDP_MON_RESPONSE_OPTION_KEY__PROPERTIES] == HDP_MON_RESPONSE_OPTION_VALUE__PROPERTIES_UNCACHEABLE )
-  {
-    // Make the response uncache-able.
-    header("Expires: Mon, 26 Jul 1997 05:00:00 GMT"); // Date in the past
-    header("Last-Modified: " . gmdate("D, d M Y H:i:s") . " GMT"); // Always modified
-    header("Cache-Control: no-cache, must-revalidate"); // HTTP/1.1
-    header("Pragma: no-cache"); // HTTP/1.0
-  }
-
-  switch( $response_options[HDP_MON_RESPONSE_OPTION_KEY__TYPE] )
-  {
-    case HDP_MON_RESPONSE_OPTION_VALUE__TYPE_JSON:
-      {
-        header('Content-type: application/json');
-      }
-      break;
-
-    case HDP_MON_RESPONSE_OPTION_VALUE__TYPE_JAVASCRIPT:
-      {
-        header('Content-type: application/javascript');
-      }
-      break;
-  }
-}
-
-/** Given $response_data (which we expect to be a JSON string), generate an
- *  HTTP response, which includes emitting the necessary HTTP response headers
- *  followed by the response body (that is either plain ol' $response_data,
- *  or a JSONP wrapper around it).
- */
-function hdp_mon_generate_response( $response_data )
-{
-  $jsonpFunctionName = NULL;
-  if (isset($_GET[HDP_MON_QUERY_ARG__JSONP])) {
-    $jsonpFunctionName = $_GET[HDP_MON_QUERY_ARG__JSONP];
-  }
-
-  hdp_mon_generate_response_headers( array
-  ( HDP_MON_RESPONSE_OPTION_KEY__PROPERTIES => HDP_MON_RESPONSE_OPTION_VALUE__PROPERTIES_UNCACHEABLE,
-  HDP_MON_RESPONSE_OPTION_KEY__TYPE =>
-  isset( $jsonpFunctionName )  && $jsonpFunctionName != "" ?
-  HDP_MON_RESPONSE_OPTION_VALUE__TYPE_JAVASCRIPT :
-  HDP_MON_RESPONSE_OPTION_VALUE__TYPE_JSON ) );
-
-  if( isset( $jsonpFunctionName ) )
-  {
-    echo "$jsonpFunctionName( $response_data );";
-  }
-  else
-  {
-    echo $response_data;
-  }
-}
-
-  /* alert_type { ok, non-ok, warning, critical, all } */
-  define ("all", "-2");
-  define ("nok", "-1");
-  define ("ok", "0");
-  define ("warn", "1");
-  define ("critical", "2");
-
-  define ("HDFS_SERVICE_CHECK", "NAMENODE::NameNode process down");
-  define ("MAPREDUCE_SERVICE_CHECK", "JOBTRACKER::JobTracker process down");
-  define ("HBASE_SERVICE_CHECK", "HBASEMASTER::HBaseMaster process down");
-  define ("ZOOKEEPER_SERVICE_CHECK", "ZOOKEEPER::Percent ZooKeeper Servers down");
-  define ("HIVE_SERVICE_CHECK", "HIVE-METASTORE::Hive Metastore status check");
-  define ("OOZIE_SERVICE_CHECK", "OOZIE::Oozie Server status check");
-  define ("WEBHCAT_SERVICE_CHECK", "WEBHCAT::WebHCat Server status check");
-  define ("PUPPET_SERVICE_CHECK", "PUPPET::Puppet agent down");
-
-  // on SUSE, some versions of Nagios stored data in /var/lib
-  $status_file = "/var/nagios/status.dat";
-  if (!file_exists($status_file) && file_exists("/etc/SuSE-release")) {
-    $status_file = "/var/lib/nagios/status.dat";
-  }
-  
-  $q1="";
-  if (array_key_exists('q1', $_GET)) {
-    $q1=$_GET["q1"];
-  }
-  $q2="";
-  if (array_key_exists('q2', $_GET)) {
-    $q2=$_GET["q2"];
-  }
-  $alert_type="";
-  if (array_key_exists('alert_type', $_GET)) {
-    $alert_type=$_GET["alert_type"];
-  }
-  $host="";
-  if (array_key_exists('host_name', $_GET)) {
-    $host=$_GET["host_name"];
-  }
-  $indent="";
-  if (array_key_exists('indent', $_GET)) {
-    $indent=$_GET["indent"];
-  }
-
-  $result = array();
-  $status_file_content = file_get_contents($status_file);
-
-  if ($q1 == "alerts") {
-    /* Add the service status object to result array */
-    $result['alerts'] = query_alerts ($status_file_content, $alert_type, $host);
-  }
-
-  if ($q2 == "hosts") {
-    /* Add the service status object to result array */
-    $result['hosts'] = query_hosts ($status_file_content, $alert_type, $host);
-  }
-
-  /* Add host count object to the results */
-  $result['hostcounts'] = query_host_count ($status_file_content);
-
-  /* Add services runtime states */
-  $result['servicestates'] = query_service_states ($status_file_content);
-
-  /* Return results */
-  if ($indent == "true") {
-    hdp_mon_generate_response(indent(json_encode($result)));
-  } else {
-    hdp_mon_generate_response(json_encode($result));
-  }
-
-  # Functions
-  /* Query service states */
-  function query_service_states ($status_file_content) {
-    $num_matches = preg_match_all("/servicestatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    $services_object = array ();
-    $services_object["PUPPET"] = 0;
-    foreach ($matches[0] as $object) {
-
-      if (getParameter($object, "service_description") == HDFS_SERVICE_CHECK) {
-        $services_object["HDFS"] = getParameter($object, "last_hard_state");
-        if ($services_object["HDFS"] >= 1) {
-          $services_object["HDFS"] = 1;
-        }
-        continue;
-      }
-      if (getParameter($object, "service_description") == MAPREDUCE_SERVICE_CHECK) {
-        $services_object["MAPREDUCE"] = getParameter($object, "last_hard_state");
-        if ($services_object["MAPREDUCE"] >= 1) {
-          $services_object["MAPREDUCE"] = 1;
-        }
-        continue;
-      }
-      if (getParameter($object, "service_description") == HBASE_SERVICE_CHECK) {
-        $services_object["HBASE"] = getParameter($object, "last_hard_state");
-        if ($services_object["HBASE"] >= 1) {
-          $services_object["HBASE"] = 1;
-        }
-        continue;
-      }
-      if (getParameter($object, "service_description") == HIVE_SERVICE_CHECK) {
-        $services_object["HIVE"] = getParameter($object, "last_hard_state");
-        if ($services_object["HIVE"] >= 1) {
-          $services_object["HIVE"] = 1;
-        }
-        continue;
-      }
-      if (getParameter($object, "service_description") == OOZIE_SERVICE_CHECK) {
-        $services_object["OOZIE"] = getParameter($object, "last_hard_state");
-        if ($services_object["OOZIE"] >= 1) {
-          $services_object["OOZIE"] = 1;
-        }
-        continue;
-      }
-      if (getParameter($object, "service_description") == WEBHCAT_SERVICE_CHECK) {
-        $services_object["WEBHCAT"] = getParameter($object, "last_hard_state");
-        if ($services_object["WEBHCAT"] >= 1) {
-          $services_object["WEBHCAT"] = 1;
-        }
-        continue;
-      }
-      /* In case of zookeeper, service is treated running if alert is ok or warning (i.e partial
-       * instances of zookeepers are running
-       */
-      if (getParameter($object, "service_description") == ZOOKEEPER_SERVICE_CHECK) {
-        $services_object["ZOOKEEPER"] = getParameter($object, "last_hard_state");
-        if ($services_object["ZOOKEEPER"] <= 1) {
-          $services_object["ZOOKEEPER"] = 0;
-        }
-        continue;
-      }
-      if (getParameter($object, "service_description") == PUPPET_SERVICE_CHECK) {
-        $state = getParameter($object, "last_hard_state");
-        if ($state >= 1) {
-          $services_object["PUPPET"]++;
-        }
-        continue;
-      }
-    }
-    if ($services_object["PUPPET"] >= 1) {
-      $services_object["PUPPET"] = 1;
-    }
-    $services_object = array_map('strval', $services_object);
-    return $services_object;
-  }
-
-  /* Query host count */
-  function query_host_count ($status_file_content) {
-    $num_matches = preg_match_all("/hoststatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    $hostcounts_object = array ();
-    $up_hosts = 0;
-    $down_hosts = 0;
-
-    foreach ($matches[0] as $object) {
-      if (getParameter($object, "last_hard_state") != ok) {
-        $down_hosts++;
-      } else {
-        $up_hosts++;
-      }
-    }
-    $hostcounts_object['up_hosts'] = $up_hosts;
-    $hostcounts_object['down_hosts'] = $down_hosts;
-    $hostcounts_object = array_map('strval', $hostcounts_object);
-    return $hostcounts_object;
-  }
-
-  /* Query Hosts */
-  function query_hosts ($status_file_content, $alert_type, $host) {
-    $hoststatus_attributes = array ("host_name", "current_state", "last_hard_state",
-                              "plugin_output", "last_check", "current_attempt",
-                              "last_hard_state_change", "last_time_up", "last_time_down",
-                              "last_time_unreachable", "is_flapping", "last_check");
-
-    $num_matches = preg_match_all("/hoststatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    $hosts_objects = array ();
-    $i = 0;
-    foreach ($matches[0] as $object) {
-      $hoststatus = array ();
-      $chost = getParameter($object, "host_name");
-      if (empty($host) || $chost == $host) {
-        foreach ($hoststatus_attributes as $attrib) {
-          $hoststatus[$attrib] = htmlentities(getParameter($object, $attrib), ENT_COMPAT);
-        }
-        $hoststatus['alerts'] = query_alerts ($status_file_content, $alert_type, $chost);
-        if (!empty($host)) {
-          $hosts_objects[$i] = $hoststatus;
-          $i++;
-          break;
-        }
-      }
-      if (!empty($hoststatus)) {
-        $hosts_objects[$i] = $hoststatus;
-        $i++;
-      }
-    }
-    /* echo "COUNT : " . count ($services_objects) . "\n"; */
-    return $hosts_objects;
-  }
-
-  /* Query Alerts */
-  function query_alerts ($status_file_content, $alert_type, $host) {
-
-    $servicestatus_attributes = array ("service_description", "host_name", "current_attempt",
-                                       "current_state", "plugin_output", "last_hard_state_change", "last_hard_state",
-                                       "last_time_ok", "last_time_warning", "last_time_unknown",
-                                       "last_time_critical", "is_flapping", "last_check",
-                                       "long_plugin_output");
-
-    $num_matches = preg_match_all("/servicestatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    #echo $matches[0][0] . ", " . $matches[0][1] . "\n";
-    #echo $matches[1][0] . ", " . $matches[1][1] . "\n";
-    $services_objects = array ();
-    $i = 0;
-    foreach ($matches[1] as $object) {      
-      $servicestatus = getParameterMap($object, $servicestatus_attributes);
-      switch ($alert_type) {
-      case "all":
-        if (empty($host) || $servicestatus['host_name'] == $host) {
-          $servicestatus['service_type'] = get_service_type($servicestatus['service_description']);
-          $srv_desc = explode ("::",$servicestatus['service_description'],2);
-
-          $servicestatus['service_description'] = $srv_desc[1];
-        }
-        break;
-      case "nok":
-        if (getParameterMapValue($map, "last_hard_state") != ok &&
-           (empty($host) || getParameterMapValue($map, "host_name") == $host)) {
-          foreach ($servicestatus_attributes as $attrib) {
-            $servicestatus[$attrib] = htmlentities(getParameterMapValue($map, $attrib), ENT_COMPAT);
-          }
-          $servicestatus['service_type'] = get_service_type($servicestatus['service_description']);
-          $srv_desc = explode ("::",$servicestatus['service_description'],2);
-          $servicestatus['service_description'] = $srv_desc[1];
-        }
-        break;
-      case "ok":
-        if (getParameterMapValue($map, "last_hard_state") == ok &&
-           (empty($host) || getParameterMapValue($map, "host_name") == $host)) {
-          foreach ($servicestatus_attributes as $attrib) {
-            $servicestatus[$attrib] = htmlentities(getParameterMapValue($map, $attrib), ENT_COMPAT);
-          }
-          $servicestatus['service_type'] = get_service_type($servicestatus['service_description']);
-          $srv_desc = explode ("::",$servicestatus['service_description'],2);
-          $servicestatus['service_description'] = $srv_desc[1];
-        }
-        break;
-      case "warn":
-        if (getParameterMapValue($map, "last_hard_state") == warn &&
-           (empty($host) || getParameterMapValue($map, "host_name") == $host)) {
-          foreach ($servicestatus_attributes as $attrib) {
-            $servicestatus[$attrib] = htmlentities(getParameterMapValue($map, $attrib), ENT_COMPAT);
-          }
-          $servicestatus['service_type'] = get_service_type($servicestatus['service_description']);
-          $srv_desc = explode ("::",$servicestatus['service_description'],2);
-          $servicestatus['service_description'] = $srv_desc[1];
-        }
-        break;
-      case "critical":
-        if (getParameterMapValue($map, "last_hard_state") == critical &&
-           (empty($host) || getParameterMapValue($map, "host_name") == $host)) {
-          foreach ($servicestatus_attributes as $attrib) {
-            $servicestatus[$attrib] = htmlentities(getParameterMapValue($map, $attrib), ENT_COMPAT);
-          }
-          $servicestatus['service_type'] = get_service_type($servicestatus['service_description']);
-          $srv_desc = explode ("::",$servicestatus['service_description'],2);
-          $servicestatus['service_description'] = $srv_desc[1];
-        }
-        break;
-      }
-      
-      if (!empty($servicestatus)) {
-        $services_objects[$i] = $servicestatus;
-        $i++;
-      }
-    }
-
-    // echo "COUNT : " . count ($services_objects) . "\n";
-    return $services_objects;
-  }
-
-  function get_service_type($service_description)
-  {
-    $pieces = explode("::", $service_description);
-    switch ($pieces[0]) {
-	  case "DATANODE":
-      case "NAMENODE":
-      case "JOURNALNODE":
-        $pieces[0] = "HDFS";
-        break;
-      case "JOBTRACKER":
-	  case "TASKTRACKER":
-        $pieces[0] = "MAPREDUCE";
-        break;
-      case "HBASEMASTER":
-      case "REGIONSERVER":
-        $pieces[0] = "HBASE";
-        break;
-      case "HIVE-METASTORE":
-      case "HIVE-SERVER":
-      case "WEBHCAT":
-        $pieces[0] = "HIVE";
-        break;
-      case "ZKSERVERS":
-	    $pieces[0] = "ZOOKEEPER";
-        break;
-      case "AMBARI":
-	    $pieces[0] = "AMBARI";
-      break;
-      case "FLUME":
-            $pieces[0] = "FLUME";
-      break;      
-      case "JOBHISTORY":
-        $pieces[0] = "MAPREDUCE2";
-        break;
-      case "RESOURCEMANAGER":
-      case "APP_TIMELINE_SERVER":
-      case "NODEMANAGER":
-        $pieces[0] = "YARN";
-        break;
-      case "STORM_UI_SERVER":
-      case "NIMBUS":
-      case "DRPC_SERVER":
-      case "SUPERVISOR":
-      case "STORM_REST_API":
-        $pieces[0] = "STORM";
-        break;
-      case "NAGIOS":
-      case "HDFS":
-      case "MAPREDUCE":
-      case "HBASE":
-      case "ZOOKEEPER":
-      case "OOZIE":
-      case "GANGLIA":
-      case "STORM":
-      case "FALCON":
-      case "KNOX":
-      case "KAFKA":
-      case "PUPPET":
-        break;
-      default:
-        $pieces[0] = "UNKNOWN";
-    }
-    return $pieces[0];
-  }
-
-  function getParameter($object, $key)
-  {
-    $pattern="/\s" . $key . "[\s= ]*([\S, ]*)\n/";
-    $num_mat = preg_match($pattern, $object, $matches);
-    $value = "";
-    if ($num_mat) {
-      $value = $matches[1];
-    }
-    return $value;
-  }
-
-  function getParameterMapValue($map, $key) {
-    $value = $map[$key];
-
-    if (!is_null($value))
-      return "" . $value;
-
-    return "";
-  }
-
-
-  function getParameterMap($object, $keynames) {
-
-    $cnt = preg_match_all('/\t([\S]*)=[\n]?[\t]?([\S= ]*)/', $object, $matches, PREG_PATTERN_ORDER);
-
-    $tmpmap = array_combine($matches[1], $matches[2]);
-
-    $map = array();
-    foreach ($keynames as $key) {
-      $map[$key] = htmlentities($tmpmap[$key], ENT_COMPAT);
-    }
-
-    return $map;
-  }
-  
-function indent($json) {
-
-    $result      = '';
-    $pos         = 0;
-    $strLen      = strlen($json);
-    $indentStr   = '  ';
-    $newLine     = "\n";
-    $prevChar    = '';
-    $outOfQuotes = true;
-
-    for ($i=0; $i<=$strLen; $i++) {
-
-        // Grab the next character in the string.
-        $char = substr($json, $i, 1);
-
-        // Are we inside a quoted string?
-        if ($char == '"' && $prevChar != '\\') {
-            $outOfQuotes = !$outOfQuotes;
-
-        // If this character is the end of an element,
-        // output a new line and indent the next line.
-        } else if(($char == '}' || $char == ']') && $outOfQuotes) {
-            $result .= $newLine;
-            $pos --;
-            for ($j=0; $j<$pos; $j++) {
-                $result .= $indentStr;
-            }
-        }
-
-        // Add the character to the result string.
-        $result .= $char;
-
-        // If the last character was the beginning of an element,
-        // output a new line and indent the next line.
-        if (($char == ',' || $char == '{' || $char == '[') && $outOfQuotes) {
-            $result .= $newLine;
-            if ($char == '{' || $char == '[') {
-                $pos ++;
-            }
-
-            for ($j = 0; $j < $pos; $j++) {
-                $result .= $indentStr;
-            }
-        }
-
-        $prevChar = $char;
-    }
-
-    return $result;
-}
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/sys_logger.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/sys_logger.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/sys_logger.py
deleted file mode 100644
index 6683342..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/sys_logger.py
+++ /dev/null
@@ -1,197 +0,0 @@
-#!/usr/bin/python
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import sys
-import syslog
-
-# dictionary of state->severity mappings
-severities = {'UP':'OK', 'DOWN':'Critical', 'UNREACHABLE':'Critical', 'OK':'OK',
-              'WARNING':'Warning', 'UNKNOWN':'Warning', 'CRITICAL':'Critical'}
-
-# List of services which can result in events at the Degraded severity
-degraded_alert_services = ['HBASEMASTER::HBaseMaster CPU utilization',
-                           'HDFS::Namenode RPC Latency',
-                           'MAPREDUCE::JobTracker RPC Latency',
-                           'JOBTRACKER::Jobtracker CPU utilization']
-
-# List of services which can result in events at the Fatal severity
-fatal_alert_services = ['NAMENODE::Namenode Process down',
-                        'NAMENODE::NameNode process']
-
-# dictionary of service->msg_id mappings
-msg_ids = {'Host::Ping':'host_down',
-           'HBASEMASTER::HBaseMaster CPU utilization':'master_cpu_utilization',
-           'HDFS::HDFS Capacity utilization':'hdfs_percent_capacity',
-           'HDFS::Corrupt/Missing blocks':'hdfs_block',
-           'NAMENODE::Namenode Edit logs directory status':'namenode_edit_log_write',
-           'HDFS::Percent DataNodes down':'datanode_down',
-           'DATANODE::Process down':'datanode_process_down',
-           'HDFS::Percent DataNodes storage full':'datanodes_percent_storage_full',
-           'NAMENODE::Namenode Process down':'namenode_process_down',
-           'HDFS::Namenode RPC Latency':'namenode_rpc_latency',
-           'DATANODE::Storage full':'datanodes_storage_full',
-           'JOBTRACKER::Jobtracker Process down':'jobtracker_process_down',
-           'MAPREDUCE::JobTracker RPC Latency':'jobtracker_rpc_latency',
-           'MAPREDUCE::Percent TaskTrackers down':'tasktrackers_down',
-           'TASKTRACKER::Process down':'tasktracker_process_down',
-           'HBASEMASTER::HBaseMaster Process down':'hbasemaster_process_down',
-           'REGIONSERVER::Process down':'regionserver_process_down',
-           'HBASE::Percent region servers down':'regionservers_down',
-           'HIVE-METASTORE::HIVE-METASTORE status check':'hive_metastore_process_down',
-           'ZOOKEEPER::Percent zookeeper servers down':'zookeepers_down',
-           'ZKSERVERS::ZKSERVERS Process down':'zookeeper_process_down',
-           'OOZIE::Oozie status check':'oozie_down',
-           'TEMPLETON::Templeton status check':'templeton_down',
-           'PUPPET::Puppet agent down':'puppet_down',
-           'NAGIOS::Nagios status log staleness':'nagios_status_log_stale',
-           'GANGLIA::Ganglia [gmetad] Process down':'ganglia_process_down',
-           'GANGLIA::Ganglia collector [gmond] Process down alert for hbasemaster':'ganglia_collector_process_down',
-           'GANGLIA::Ganglia collector [gmond] Process down alert for jobtracker':'ganglia_collector_process_down',
-           'GANGLIA::Ganglia collector [gmond] Process down alert for namenode':'ganglia_collector_process_down',
-           'GANGLIA::Ganglia collector [gmond] Process down alert for slaves':'ganglia_collector_process_down',
-           'NAMENODE::Secondary Namenode Process down':'secondary_namenode_process_down',
-           'JOBTRACKER::Jobtracker CPU utilization':'jobtracker_cpu_utilization',
-           'HBASEMASTER::HBase Web UI down':'hbase_ui_down',
-           'NAMENODE::Namenode Web UI down':'namenode_ui_down',
-           'JOBTRACKER::JobHistory Web UI down':'jobhistory_ui_down',
-           'JOBTRACKER::JobTracker Web UI down':'jobtracker_ui_down',
-
-           'HBASEMASTER::HBase Master CPU utilization':'master_cpu_utilization',
-           'HDFS::HDFS capacity utilization':'hdfs_percent_capacity',
-           'NAMENODE::NameNode edit logs directory status':'namenode_edit_log_write',
-           'DATANODE::DataNode process down':'datanode_process_down',
-           'NAMENODE::NameNode process down':'namenode_process_down',
-           'HDFS::NameNode RPC latency':'namenode_rpc_latency',
-           'DATANODE::DataNode storage full':'datanodes_storage_full',
-           'JOBTRACKER::JobTracker process down':'jobtracker_process_down',
-           'MAPREDUCE::JobTracker RPC latency':'jobtracker_rpc_latency',
-           'TASKTRACKER::TaskTracker process down':'tasktracker_process_down',
-           'HBASEMASTER::HBase Master process down':'hbasemaster_process_down',
-           'REGIONSERVER::RegionServer process down':'regionserver_process_down',
-           'HBASE::Percent RegionServers down':'regionservers_down',
-           'HIVE-METASTORE::Hive Metastore status check':'hive_metastore_process_down',
-           'HIVE-METASTORE::Hive Metastore process':'hive_metastore_process_down',
-           'ZOOKEEPER::Percent ZooKeeper Servers down':'zookeepers_down',
-           'ZOOKEEPER::ZooKeeper Server process down':'zookeeper_process_down',
-           'OOZIE::Oozie Server status check':'oozie_down',
-           'WEBHCAT::WebHCat Server status check':'templeton_down',
-           'GANGLIA::Ganglia [gmetad] process down':'ganglia_process_down',
-           'GANGLIA::Ganglia Collector [gmond] process down alert for HBase Master':'ganglia_collector_process_down',
-           'GANGLIA::Ganglia Collector [gmond] process down alert for JobTracker':'ganglia_collector_process_down',
-           'GANGLIA::Ganglia Collector [gmond] process down alert for NameNode':'ganglia_collector_process_down',
-           'GANGLIA::Ganglia Collector [gmond] process down alert for slaves':'ganglia_collector_process_down',
-           'NAMENODE::Secondary NameNode process down':'secondary_namenode_process_down',
-           'JOBTRACKER::JobTracker CPU utilization':'jobtracker_cpu_utilization',
-           'HBASEMASTER::HBase Master Web UI down':'hbase_ui_down',
-           'NAMENODE::NameNode Web UI down':'namenode_ui_down',
-           'Oozie status check':'oozie_down',
-           'WEBHCAT::WebHcat status check':'templeton_down',
-
-           # Ambari Nagios service check descriptions
-           'DATANODE::DataNode process':'datanode_process',
-           'NAMENODE::NameNode process':'namenode_process',
-           'NAMENODE::Secondary NameNode process':'secondary_namenode_process',
-           'JOURNALNODE::JournalNode process':'journalnode_process',
-           'ZOOKEEPER::ZooKeeper Server process':'zookeeper_process_down',
-           'JOBTRACKER::JobTracker process':'jobtracker_process',
-           'TASKTRACKER::TaskTracker process':'tasktracker_process',
-           'GANGLIA::Ganglia Server process':'ganglia_server_process',
-           'GANGLIA::Ganglia Monitor process for Slaves':'ganglia_monitor_process',
-           'GANGLIA::Ganglia Monitor process for NameNode':'ganglia_monitor_process',
-           'GANGLIA::Ganglia Monitor process for JobTracker':'ganglia_monitor_process',
-           'GANGLIA::Ganglia Monitor process for HBase Master':'ganglia_monitor_process',
-           'GANGLIA::Ganglia Monitor process for ResourceManager':'ganglia_monitor_process',
-           'GANGLIA::Ganglia Monitor process for HistoryServer':'ganglia_monitor_process',
-           'HBASEMASTER::HBase Master process':'hbase_master_process',
-           'HBASE::Percent RegionServers live':'regionservers_down',
-           'REGIONSERVER::RegionServer process':'regionserver_process',
-           'NAGIOS::Nagios status log freshness':'nagios_process',
-           'FLUME::Flume Agent process':'flume_agent_process',
-           'OOZIE::Oozie Server status':'oozie_down',
-           'HIVE-METASTORE::Hive Metastore status':'hive_metastore_process',
-           'WEBHCAT::WebHCat Server status':'webhcat_down',
-           'RESOURCEMANAGER::ResourceManager process':'resourcemanager_process_down',
-           'RESOURCEMANAGER::ResourceManager RPC latency':'resourcemanager_rpc_latency',
-           'RESOURCEMANAGER::ResourceManager CPU utilization':'resourcemanager_cpu_utilization',
-           'RESOURCEMANAGER::ResourceManager Web UI':'recourcemanager_ui',
-           'NODEMANAGER::NodeManager process':'nodemanager_process_down',
-           'NODEMANAGER::NodeManager health':'nodemanager_health',
-           'NODEMANAGER::Percent NodeManagers live':'nodemanagers_down',
-           'APP_TIMELINE_SERVER::App Timeline Server process':'timelineserver_process',
-           'JOBHISTORY::HistoryServer RPC latency':'historyserver_rpc_latency',
-           'JOBHISTORY::HistoryServer CPU utilization':'historyserver_cpu_utilization',
-           'JOBHISTORY::HistoryServer Web UI':'historyserver_ui',
-           'JOBHISTORY::HistoryServer process':'historyserver_process'}
-
-# Determine the severity of the TVI alert based on the Nagios alert state.
-def determine_severity(state, service):
-    if severities.has_key(state):
-        severity = severities[state]
-    else: severity = 'Warning'
-
-    # For some alerts, warning should be converted to Degraded
-    if severity == 'Warning' and service in degraded_alert_services:
-        severity = 'Degraded'
-    elif severity != 'OK' and service in fatal_alert_services:
-        severity = 'Fatal'
-
-    return severity
-
-
-# Determine the msg id for the TVI alert from based on the service which generates the Nagios alert.
-# The msg id is used to correlate a log msg to a TVI rule.
-def determine_msg_id(service, severity):
-  for k, v in msg_ids.iteritems():
-    if(k in service):
-      msg_id = v
-      if severity == 'OK':
-        msg_id = '{0}_ok'.format(msg_id)
-      return msg_id
-  return 'HADOOP_UNKNOWN_MSG'
-
-
-# Determine the domain.  Currently the domain is always 'Hadoop'.
-def determine_domain():
-    return 'Hadoop'
-
-
-# log the TVI msg to the syslog
-def log_tvi_msg(msg):
-    syslog.openlog('nagios', syslog.LOG_PID)
-    syslog.syslog(msg)
-
-
-# generate a tvi log msg from a Hadoop alert
-def generate_tvi_log_msg(alert_type, attempt, state, service, msg):
-    # Determine the TVI msg contents
-    severity = determine_severity(state, service)  # The TVI alert severity.
-    domain   = determine_domain()                  # The domain specified in the TVI alert.
-    msg_id   = determine_msg_id(service, severity) # The msg_id used to correlate to a TVI rule.
-
-    # Only log HARD alerts
-    if alert_type == 'HARD':
-        # Format and log msg
-        log_tvi_msg('{0}: {1}: {2}# {3}'.format(severity, domain, msg_id, msg))
-
-
-# main method which is called when invoked on the command line
-def main():
-    generate_tvi_log_msg(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])
-
-
-# run the main method
-if __name__ == '__main__':
-    main()
-    sys.exit(0)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/functions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/functions.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/functions.py
deleted file mode 100644
index 7252f8f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/functions.py
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-from resource_management import *
-
-# Gets if the java version is greater than 6
-def is_jdk_greater_6(java64_home):
-  import os
-  import re
-  java_bin = os.path.join(java64_home, 'bin', 'java')
-  ver_check = shell.call([java_bin, '-version'])
-
-  ver = ''
-  if 0 != ver_check[0]:
-    # java is not local, try the home name as a fallback
-    ver = java64_home
-  else:
-    ver = ver_check[1]
-
-  regex = re.compile('"1\.([0-9]*)\.0_([0-9]*)"', re.IGNORECASE)
-  r = regex.search(ver)
-  if r:
-    strs = r.groups()
-    if 2 == len(strs):
-      minor = int(strs[0])
-      if minor > 6:
-        return True
-
-  return False

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/nagios.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/nagios.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/nagios.py
deleted file mode 100644
index a63ea38..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/nagios.py
+++ /dev/null
@@ -1,109 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-from nagios_server_config import nagios_server_config
-
-def nagios():
-  import params
-
-  File( params.nagios_httpd_config_file,
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    content = Template("nagios.conf.j2"),
-    mode   = 0644
-  )
-  
-  Directory( params.conf_dir,
-    owner = params.nagios_user,
-    group = params.nagios_group
-  )
-
-  Directory( [params.plugins_dir, params.nagios_obj_dir])
-
-  Directory( params.nagios_pid_dir,
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    mode = 0755,
-    recursive = True
-  )
-
-  Directory( [params.nagios_var_dir, params.check_result_path, params.nagios_rw_dir, params.ambarinagios_php_dir],
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    recursive = True
-  )
-  
-  Directory( [params.nagios_log_dir, params.nagios_log_archives_dir],
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    mode = 0755
-  )
-
-  nagios_server_config()
-
-  set_web_permisssions()
-
-  File( format("{conf_dir}/command.cfg"),
-    owner = params.nagios_user,
-    group = params.nagios_group
-  )
-
-  File( format("{ambarinagios_php_dir}/{ambarinagios_php_filename}"),
-    content = StaticFile(params.ambarinagios_php_filename),
-  )
-
-  File( params.hdp_mon_nagios_addons_path,
-    content = StaticFile("hdp_mon_nagios_addons.conf"),
-  )
-
-  File(format("{nagios_var_dir}/ignore.dat"),
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    mode = 0664)
-  
-  if System.get_instance().os_family == "ubuntu":
-    Link(params.ubuntu_stylesheets_desired_location,
-         to = params.ubuntu_stylesheets_real_location
-    )
-  
-  
-def set_web_permisssions():
-  import params
-
-  cmd = format("{htpasswd_cmd} -c -b  {conf_dir}/htpasswd.users {nagios_web_login} {nagios_web_password!p}")
-  Execute(cmd)
-
-  File( format("{conf_dir}/htpasswd.users"),
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    mode  = 0640
-  )
-
-  if System.get_instance().os_family == "suse":
-    command = format("usermod -G {nagios_group} wwwrun")
-  elif System.get_instance().os_family == "ubuntu":
-    command = format("usermod -G {nagios_group} www-data") # check -a ???
-  elif System.get_instance().os_family == "redhat":
-    command = format("usermod -a -G {nagios_group} apache")
-  
-  Execute( command)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/nagios_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/nagios_server.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/nagios_server.py
deleted file mode 100644
index da35b34..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/nagios_server.py
+++ /dev/null
@@ -1,111 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-from nagios import nagios
-from nagios_service import nagios_service
-from nagios_service import update_active_alerts
-
-         
-class NagiosServer(Script):
-  def install(self, env):
-    remove_conflicting_packages()
-    self.install_packages(env)
-    self.configure(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    nagios()
-
-    
-  def start(self, env):
-    import params
-    env.set_params(params)
-
-    update_ignorable(params)
-
-    self.configure(env) # done for updating configs after Security enabled
-    nagios_service(action='start')
-
-    
-  def stop(self, env):
-    import params
-    env.set_params(params)
-    
-    nagios_service(action='stop')
-
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.nagios_pid_file)
-
-    # check for alert structures
-    update_active_alerts()
-
-    
-def remove_conflicting_packages():  
-  Package('hdp_mon_nagios_addons', action = "remove")
-
-  Package('nagios-plugins', action = "remove")
-  
-  if System.get_instance().os_family in ["redhat","suse"]:
-    Execute("rpm -e --allmatches --nopostun nagios",
-      path  = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
-      ignore_failures = True)
-
-def update_ignorable(params):
-  if not params.config.has_key('passiveInfo'):
-    return
-  else:
-    buf = ""
-    count = 0
-    for define in params.config['passiveInfo']:
-      try:
-        host = str(define['host'])
-        service = str(define['service'])
-        component = str(define['component'])
-        buf += host + " " + service + " " + component + "\n"
-        count += 1
-      except KeyError:
-        pass
-
-    f = None
-    try:
-      f = open('/var/nagios/ignore.dat', 'w')
-      f.write(buf)
-      if 1 == count:
-        Logger.info("Persisted '/var/nagios/ignore.dat' with 1 entry")
-      elif count > 1:
-        Logger.info("Persisted '/var/nagios/ignore.dat' with " + str(count) + " entries")
-    except:
-      Logger.info("Could not persist '/var/nagios/ignore.dat'")
-      pass
-    finally:
-      if f is not None:
-        f.close()
-
-
-if __name__ == "__main__":
-  NagiosServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/nagios_server_config.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/nagios_server_config.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/nagios_server_config.py
deleted file mode 100644
index 883442c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/nagios_server_config.py
+++ /dev/null
@@ -1,99 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-def nagios_server_config():
-  import params
-  
-  nagios_server_configfile( 'nagios.cfg', 
-                            config_dir = params.conf_dir, 
-                            group = params.nagios_group
-  )
-  nagios_server_configfile( 'resource.cfg', 
-                            config_dir = params.conf_dir, 
-                            group = params.nagios_group
-  )
-  nagios_server_configfile( 'hadoop-hosts.cfg')
-  nagios_server_configfile( 'hadoop-hostgroups.cfg')
-  nagios_server_configfile( 'hadoop-servicegroups.cfg')
-  nagios_server_configfile( 'hadoop-services.cfg')
-  nagios_server_configfile( 'hadoop-commands.cfg')
-  nagios_server_configfile( 'contacts.cfg')
-  
-  if System.get_instance().os_family != "suse":
-    nagios_server_configfile( 'nagios',
-                              config_dir = '/etc/init.d',
-                              mode = 0755, 
-                              owner = 'root', 
-                              group = 'root'
-    )
-
-  nagios_server_check( 'check_cpu.pl')
-  nagios_server_check( 'check_cpu.php')
-  nagios_server_check( 'check_cpu_ha.php')
-  nagios_server_check( 'check_datanode_storage.php')
-  nagios_server_check( 'check_aggregate.php')
-  nagios_server_check( 'check_hdfs_blocks.php')
-  nagios_server_check( 'check_hdfs_capacity.php')
-  nagios_server_check( 'check_rpcq_latency.php')
-  nagios_server_check( 'check_rpcq_latency_ha.php')
-  nagios_server_check( 'check_webui.sh')
-  nagios_server_check( 'check_webui_ha.sh')
-  nagios_server_check( 'check_name_dir_status.php')
-  nagios_server_check( 'check_oozie_status.sh')
-  nagios_server_check( 'check_templeton_status.sh')
-  nagios_server_check( 'check_hive_metastore_status.sh')
-  nagios_server_check( 'check_hue_status.sh')
-  nagios_server_check( 'check_mapred_local_dir_used.sh')
-  nagios_server_check( 'check_nodemanager_health.sh')
-  nagios_server_check( 'check_namenodes_ha.sh')
-  nagios_server_check( 'hdp_nagios_init.php')
-  nagios_server_check( 'check_checkpoint_time.py' )
-  nagios_server_check( 'sys_logger.py' )
-  nagios_server_check( 'check_ambari_alerts.py' )
-  nagios_server_check( 'mm_wrapper.py' )
-  nagios_server_check( 'check_hive_thrift_port.py' )
-
-def nagios_server_configfile(
-  name,
-  owner = None,
-  group = None,
-  config_dir = None,
-  mode = None
-):
-  import params
-  owner = params.nagios_user if not owner else owner
-  group = params.user_group if not group else group
-  config_dir = params.nagios_obj_dir if not config_dir else config_dir
-  
-  TemplateConfig( format("{config_dir}/{name}"),
-    owner          = owner,
-    group          = group,
-    mode           = mode
-  )
-
-def nagios_server_check(name):
-  File( format("{plugins_dir}/{name}"),
-    content = StaticFile(name), 
-    mode = 0755
-  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/nagios_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/nagios_service.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/nagios_service.py
deleted file mode 100644
index b7f512b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/nagios_service.py
+++ /dev/null
@@ -1,103 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import json
-import os
-import signal
-
-from resource_management import *
-from os.path import isfile
-
-
-def nagios_service(action='start'): # start or stop
-  import params
-  
-  nagios_pid_file = format("{nagios_pid_file}")
-
-  if action == 'start':
-    command = format("service {nagios_service_name} start")
-    Execute(command)   
-  elif action == 'stop':
-    # attempt to grab the pid in case we need it later
-    nagios_pid = 0    
-    if isfile(nagios_pid_file):   
-      with open(nagios_pid_file, "r") as file:
-        try:
-          nagios_pid = int(file.read())
-          Logger.info("Nagios is running with a PID of {0}".format(nagios_pid))
-        except:
-          Logger.info("Unable to read PID file {0}".format(nagios_pid_file))
-        finally:
-          file.close()
-
-    command = format("service {nagios_service_name} stop")  
-    Execute(command)
-
-    # on SUSE, there is a bug where Nagios doesn't kill the process 
-    # but this could also affect any OS, so don't restrict this to SUSE
-    if nagios_pid > 0:
-      try:
-        os.kill(nagios_pid, 0)
-      except:
-        Logger.info("The Nagios process has successfully terminated")
-      else:
-        Logger.info("The Nagios process with ID {0} failed to terminate; explicitly killing.".format(nagios_pid))
-        os.kill(nagios_pid, signal.SIGKILL)
-
-    # in the event that the Nagios scripts don't remove the pid file
-    if isfile( nagios_pid_file ):   
-      Execute(format("rm -f {nagios_pid_file}"))
-        
-  MonitorWebserver("restart")
-
-def update_active_alerts():
-  import status_params
-
-  alerts = None
-  if 'alerts' in status_params.config and status_params.config['alerts'] is not None:
-    alerts = status_params.config['alerts']
-
-  if alerts is None:
-    return
-
-  output = {}
-
-  for a in alerts:
-    alert_name = a['name']
-    alert_text = a['text']
-    alert_state = a['state']
-    alert_host = a['host']
-    if not output.has_key(alert_name):
-      output[alert_name] = {}
-
-    if not output[alert_name].has_key(alert_host):
-      output[alert_name][alert_host] = []
-
-    host_items = output[alert_name][alert_host]
-    alert_out = {}
-    alert_out['state'] = alert_state
-    alert_out['text'] = alert_text
-    host_items.append(alert_out)
-
-  with open(os.path.join(status_params.nagios_var_dir, 'ambari.json'), 'w') as f:
-    json.dump(output, f)
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/params.py
deleted file mode 100644
index 778d830..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/params.py
+++ /dev/null
@@ -1,363 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from functions import is_jdk_greater_6
-from resource_management import *
-import status_params
-
-HADOOP_HTTP_POLICY = "HTTP_ONLY"
-HADOOP_HTTPS_POLICY = "HTTPS_ONLY"
-
-# server configurations
-config = Script.get_config()
-
-if System.get_instance().os_family == "ubuntu":
-  nagios_service_name = "nagios3"
-else:
-  nagios_service_name = "nagios"
-
-conf_dir = format("/etc/{nagios_service_name}")
-nagios_obj_dir = format("{conf_dir}/objects")
-nagios_var_dir = status_params.nagios_var_dir
-nagios_rw_dir = status_params.nagios_rw_dir
-
-# HACK: Stylesheets for Nagios UI on Ubuntu are in wrong place so we have to do a symlink.
-# In future we can fix this directly in the package.
-ubuntu_stylesheets_real_location = "/etc/nagios3/stylesheets"
-ubuntu_stylesheets_desired_location = "/usr/share/nagios3/htdocs/stylesheets"
-
-if System.get_instance().os_family == "ubuntu":
-  host_template = "generic-host"
-  plugins_dir = "/usr/lib/nagios/plugins"
-  nagios_web_dir = "/usr/share/nagios3/htdocs"
-  
-  cfg_files = [
-    format("{conf_dir}/commands.cfg"),
-    format("{conf_dir}/conf.d/contacts_nagios2.cfg"),
-    format("{conf_dir}/conf.d/generic-host_nagios2.cfg"),
-    format("{conf_dir}/conf.d/generic-service_nagios2.cfg"),
-    format("{conf_dir}/conf.d/timeperiods_nagios2.cfg"),
-  ]
-  cgi_dir = "/usr/lib/cgi-bin/nagios3"
-  cgi_weblink = "/cgi-bin/nagios3"
-else:
-  host_template = "linux-server"
-  plugins_dir = "/usr/lib64/nagios/plugins"
-  nagios_web_dir = "/usr/share/nagios"
-  
-  cfg_files = [
-    format("{nagios_obj_dir}/commands.cfg"),
-    format("{nagios_obj_dir}/contacts.cfg"),
-    format("{nagios_obj_dir}/timeperiods.cfg"),
-    format("{nagios_obj_dir}/templates.cfg"),
-  ]
-  
-  cgi_dir = "/usr/lib/nagios/cgi"
-  cgi_weblink = "/nagios/cgi-bin"
-  
-check_result_path = "/var/nagios/spool/checkresults"
-nagios_log_dir = "/var/log/nagios"
-nagios_log_archives_dir = format("{nagios_log_dir}/archives")
-nagios_host_cfg = format("{nagios_obj_dir}/hadoop-hosts.cfg")
-nagios_lookup_daemon_str = "/usr/sbin/nagios"
-nagios_pid_dir = status_params.nagios_pid_dir
-nagios_pid_file = status_params.nagios_pid_file
-nagios_resource_cfg = format("{conf_dir}/resource.cfg")
-nagios_hostgroup_cfg = format("{nagios_obj_dir}/hadoop-hostgroups.cfg")
-nagios_servicegroup_cfg = format("{nagios_obj_dir}/hadoop-servicegroups.cfg")
-nagios_service_cfg = format("{nagios_obj_dir}/hadoop-services.cfg")
-nagios_command_cfg = format("{nagios_obj_dir}/hadoop-commands.cfg")
-eventhandlers_dir = "/usr/lib/nagios/eventhandlers"
-nagios_principal_name = default("/configurations/nagios-env/nagios_principal_name", "nagios")
-
-oozie_server_port = get_port_from_url(config['configurations']['oozie-site']['oozie.base.url'])
-namenode_host = default("/clusterHostInfo/namenode_host", None)
-_rm_host = default("/clusterHostInfo/rm_host", None)
-if type(_rm_host) is list:
-  rm_hosts_in_str = ','.join(_rm_host)
-
-has_namenode = not namenode_host == None
-has_rm = not _rm_host == None
-
-# - test for HDFS or HCFS (glusterfs)
-if 'namenode_host' in config['clusterHostInfo']:
-  ishdfs_value = "HDFS"
-else:
-  ishdfs_value = None
-
-dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', HADOOP_HTTP_POLICY)
-yarn_http_policy = default('configurations/yarn-site/yarn.http.policy', HADOOP_HTTP_POLICY)
-mapreduce_http_policy = default('configurations/mapred-site/mapreduce.jobhistory.http.policy', HADOOP_HTTP_POLICY)
-
-hdfs_ssl_enabled = (dfs_http_policy == HADOOP_HTTPS_POLICY)
-yarn_ssl_enabled = (yarn_http_policy == HADOOP_HTTPS_POLICY)
-mapreduce_ssl_enabled = (mapreduce_http_policy == HADOOP_HTTPS_POLICY)
-#
-if has_namenode:
-  if 'dfs.http.policy' in config['configurations']['hdfs-site']:
-    dfs_http_policy = config['configurations']['hdfs-site']['dfs.http.policy']
-  if dfs_http_policy == HADOOP_HTTPS_POLICY:
-    hdfs_ssl_enabled = True
-if has_rm:
-  if 'yarn.http.policy' in config['configurations']['yarn-site']:
-    yarn_http_policy = config['configurations']['yarn-site']['yarn.http.policy']
-
-  if 'mapreduce.jobhistory.http.policy' in config['configurations']['mapred-site']:
-    mapreduce_http_policy = config['configurations']['mapred-site']['mapreduce.jobhistory.http.policy']
-
-if dfs_http_policy == HADOOP_HTTPS_POLICY:
-  hdfs_ssl_enabled = True
-
-if yarn_http_policy == HADOOP_HTTPS_POLICY:
-  yarn_ssl_enabled = True
-
-if mapreduce_http_policy == HADOOP_HTTPS_POLICY:
-  mapreduce_ssl_enabled = True
-
-# set default ports and webui lookup properties
-dfs_namenode_webui_default_port = '50070'
-dfs_snamenode_webui_default_port = '50090'
-yarn_nodemanager_default_port = '8042'
-dfs_namenode_webui_property = 'dfs.namenode.http-address'
-dfs_snamenode_webui_property = 'dfs.namenode.secondary.http-address'
-dfs_datanode_webui_property = 'dfs.datanode.http.address'
-yarn_rm_webui_property = 'yarn.resourcemanager.webapp.address'
-yarn_timeline_service_webui_property = 'yarn.timeline-service.webapp.address'
-yarn_nodemanager_webui_property = 'yarn.nodemanager.webapp.address'
-mapreduce_jobhistory_webui_property = 'mapreduce.jobhistory.webapp.address'
- 
-# if HDFS is protected by SSL, adjust the ports and lookup properties
-if hdfs_ssl_enabled == True:
-  dfs_namenode_webui_default_port = '50470'
-  dfs_snamenode_webui_default_port = '50091'
-  dfs_namenode_webui_property = 'dfs.namenode.https-address'
-  dfs_snamenode_webui_property = 'dfs.namenode.secondary.https-address'
-  dfs_datanode_webui_property = 'dfs.datanode.https.address'
-
-# if YARN is protected by SSL, adjust the ports and lookup properties  
-if yarn_ssl_enabled == True:
-  yarn_rm_webui_property = 'yarn.resourcemanager.webapp.https.address'
-  yarn_nodemanager_webui_property = 'yarn.nodemanager.webapp.https.address'  
-  yarn_timeline_service_webui_property = 'yarn.timeline-service.webapp.https.address'
-
-# if MR is protected by SSL, adjust the ports and lookup properties
-if mapreduce_ssl_enabled == True:
-  mapreduce_jobhistory_webui_property = 'mapreduce.jobhistory.webapp.https.address'
-  
-if has_namenode:
-  # extract NameNode
-  if dfs_namenode_webui_property in config['configurations']['hdfs-site']:
-    namenode_port = get_port_from_url(config['configurations']['hdfs-site'][dfs_namenode_webui_property])
-  else:
-    namenode_port = dfs_namenode_webui_default_port
-
-  # extract Secondary NameNode
-  if dfs_snamenode_webui_property in config['configurations']['hdfs-site']:
-    snamenode_port = get_port_from_url(config['configurations']['hdfs-site'][dfs_snamenode_webui_property])
-  else:
-    snamenode_port = dfs_snamenode_webui_default_port
-
-  if 'dfs.journalnode.http-address' in config['configurations']['hdfs-site']:
-    journalnode_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.journalnode.http-address'])
-    datanode_port = get_port_from_url(config['configurations']['hdfs-site'][dfs_datanode_webui_property])
-
-nm_port = yarn_nodemanager_default_port
-if has_rm:
-  if yarn_nodemanager_webui_property in config['configurations']['yarn-site']:
-    nm_port = get_port_from_url(config['configurations']['yarn-site'][yarn_nodemanager_webui_property])
-  
-flume_port = "4159"
-hbase_master_rpc_port = default('/configurations/hbase-site/hbase.master.port', "60000")
-rm_port = get_port_from_url(config['configurations']['yarn-site'][yarn_rm_webui_property])
-hs_port = get_port_from_url(config['configurations']['mapred-site'][mapreduce_jobhistory_webui_property])
-hive_metastore_port = get_port_from_url(config['configurations']['hive-site']['hive.metastore.uris']) #"9083"
-hive_server_port = default('/configurations/hive-site/hive.server2.thrift.port',"10000")
-templeton_port = config['configurations']['webhcat-site']['templeton.port'] #"50111"
-hbase_master_port = config['configurations']['hbase-site']['hbase.master.info.port'] #"60010"
-hbase_rs_port = config['configurations']['hbase-site']['hbase.regionserver.info.port'] #"60030"
-storm_ui_port = config['configurations']['storm-site']['ui.port']
-drpc_port = config['configurations']['storm-site']['drpc.port']
-nimbus_port = config['configurations']['storm-site']['nimbus.thrift.port']
-supervisor_port = "56431"
-storm_rest_api_port = "8745"
-falcon_port = config['configurations']['falcon-env']['falcon_port']
-ahs_port = get_port_from_url(config['configurations']['yarn-site'][yarn_timeline_service_webui_property])
-knox_gateway_port = config['configurations']['gateway-site']['gateway.port']
-kafka_broker_port = config['configurations']['kafka-broker']['port']
-
-# use sensible defaults for checkpoint as they are required by Nagios and 
-# may not be part of hdfs-site.xml on an upgrade
-if has_namenode:
-  if 'dfs.namenode.checkpoint.period' in config['configurations']['hdfs-site']:
-    dfs_namenode_checkpoint_period = config['configurations']['hdfs-site']['dfs.namenode.checkpoint.period']
-  else:
-    dfs_namenode_checkpoint_period = '21600'
-  
-  if 'dfs.namenode.checkpoint.txns' in config['configurations']['hdfs-site']:
-    dfs_namenode_checkpoint_txns = config['configurations']['hdfs-site']['dfs.namenode.checkpoint.txns']
-  else:
-    dfs_namenode_checkpoint_txns = '1000000'
-
-# this is different for HDP1
-nn_metrics_property = "FSNamesystem"
-clientPort = config['configurations']['zoo.cfg']['clientPort'] #ZK
-
-
-java64_home = config['hostLevelParams']['java_home']
-check_cpu_on = is_jdk_greater_6(java64_home)
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-nagios_keytab_path = default("/configurations/nagios-env/nagios_keytab_path", "/etc/security/keytabs/nagios.service.keytab")
-kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-
-dfs_ha_enabled = False
-dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
-dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
-if dfs_ha_namenode_ids:
-  dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
-  dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
-  if dfs_ha_namenode_ids_array_len > 1:
-    dfs_ha_enabled = True
-
-nn_ha_host_port_map = {}
-if dfs_ha_enabled:
-  for nn_id in dfs_ha_namemodes_ids_list:
-    nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
-    nn_ha_host_port_map[nn_host.split(":")[0]] = nn_host.split(":")[1]
-else:
-  if 'namenode_host' in config['clusterHostInfo']:
-    namenode_metadata_port = get_port_from_url(config['configurations']['core-site']['fs.defaultFS'])
-    nn_ha_host_port_map[config['clusterHostInfo']['namenode_host'][0]] = namenode_metadata_port
-  else:
-    namenode_metadata_port = '8020'
-    
-os_family = System.get_instance().os_family
-
-ganglia_port = "8651"
-ganglia_collector_slaves_port = "8660"
-ganglia_collector_namenode_port = "8661"
-ganglia_collector_jobtracker_port = "8662"
-ganglia_collector_hbase_port = "8663"
-ganglia_collector_rm_port = "8664"
-ganglia_collector_nm_port = "8660"
-ganglia_collector_hs_port = "8666"
-  
-all_ping_ports = config['clusterHostInfo']['all_ping_ports']
-
-if System.get_instance().os_family == "suse":
-  nagios_p1_pl = "/usr/lib/nagios/p1.pl"
-  htpasswd_cmd = "htpasswd2"
-  web_conf_dir = "/etc/apache2/conf.d"
-elif System.get_instance().os_family == "ubuntu":
-  nagios_p1_pl = "/usr/lib/nagios3/p1.pl"
-  htpasswd_cmd = "htpasswd"
-  web_conf_dir = "/etc/apache2/conf.d"
-elif System.get_instance().os_family == "redhat":
-  nagios_p1_pl = "/usr/bin/p1.pl"
-  htpasswd_cmd = "htpasswd"
-  web_conf_dir = "/etc/httpd/conf.d"
-
-nagios_httpd_config_file = format("{web_conf_dir}/{nagios_service_name}.conf")
-hdp_mon_nagios_addons_path = format("{web_conf_dir}/hdp_mon_nagios_addons.conf")
-
-ambarinagios_php_dir = "/usr/share/hdp/nagios/"
-ambarinagios_php_filename = "nagios_alerts.php"
-
-nagios_user = config['configurations']['nagios-env']['nagios_user']
-nagios_group = config['configurations']['nagios-env']['nagios_group']
-nagios_web_login = config['configurations']['nagios-env']['nagios_web_login']
-nagios_web_password = config['configurations']['nagios-env']['nagios_web_password']
-user_group = config['configurations']['cluster-env']['user_group']
-nagios_contact = config['configurations']['nagios-env']['nagios_contact']
-
-
-_snamenode_host = default("/clusterHostInfo/snamenode_host", None)
-_jtnode_host = default("/clusterHostInfo/jtnode_host", None)
-_slave_hosts = default("/clusterHostInfo/slave_hosts", None)
-_journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", None)
-_zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", None)
-_rm_host = default("/clusterHostInfo/rm_host", None)
-if type(_rm_host) is list:
-  rm_hosts_in_str = ','.join(_rm_host)
-_nm_hosts = default("/clusterHostInfo/nm_hosts", None)
-_hs_host = default("/clusterHostInfo/hs_host", None)
-_zookeeper_hosts = default("/clusterHostInfo/zookeeper_hosts", None)
-_flume_hosts = default("/clusterHostInfo/flume_hosts", None)
-_nagios_server_host = default("/clusterHostInfo/nagios_server_host",None)
-_ganglia_server_host = default("/clusterHostInfo/ganglia_server_host",None)
-_app_timeline_server_hosts = default("/clusterHostInfo/app_timeline_server_hosts",None)
-_nimbus_host = default("/clusterHostInfo/nimbus_hosts",None)
-_drpc_host = default("/clusterHostInfo/drpc_server_hosts",None)
-_supervisor_hosts = default("/clusterHostInfo/supervisor_hosts",None)
-_storm_ui_host = default("/clusterHostInfo/storm_ui_server_hosts",None)
-_storm_rest_api_hosts = default("/clusterHostInfo/storm_rest_api_hosts",None)
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts",None)
-if type(hbase_master_hosts) is list:
-  hbase_master_hosts_in_str = ','.join(hbase_master_hosts)
-_hive_server_host = default("/clusterHostInfo/hive_server_host",None)
-_oozie_server = default("/clusterHostInfo/oozie_server",None)
-_webhcat_server_host = default("/clusterHostInfo/webhcat_server_host",None)
-_falcon_host = default("/clusterHostInfo/falcon_server_hosts", None)
-# can differ on HDP1
-#_mapred_tt_hosts = _slave_hosts
-#if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
-_hbase_rs_hosts = default("/clusterHostInfo/hbase_rs_hosts", _slave_hosts)
-_hue_server_host = default("/clusterHostInfo/hue_server_host", None)
-_knox_gateway_host =  default("/clusterHostInfo/knox_gateway_hosts", None)
-_kafka_broker_host =  default("/clusterHostInfo/kafka_broker_hosts", None)
-all_hosts = config['clusterHostInfo']['all_hosts']
-
-if 'namenode_host' in config['clusterHostInfo']:
-  nn_hosts_string = " ".join(namenode_host)
-else:
-  nn_hosts_string = " ".join(config['clusterHostInfo']['ambari_server_host'])
-
-
-hostgroup_defs = {
-    'namenode' : namenode_host,
-    'snamenode' : _snamenode_host,
-    'slaves' : _slave_hosts,
-    'agent-servers' : all_hosts,
-    'nagios-server' : _nagios_server_host,
-    'jobtracker' : _jtnode_host,
-    'ganglia-server' : _ganglia_server_host,
-    'flume-servers' : _flume_hosts,
-    'zookeeper-servers' : _zookeeper_hosts,
-    'hbasemasters' : hbase_master_hosts,
-    'hiveserver' : _hive_server_host,
-    'region-servers' : _hbase_rs_hosts,
-    'oozie-server' : _oozie_server,
-    'webhcat-server' : _webhcat_server_host,
-    'hue-server' : _hue_server_host,
-    'resourcemanager' : _rm_host,
-    'nodemanagers' : _nm_hosts,
-    'historyserver2' : _hs_host,
-    'journalnodes' : _journalnode_hosts,
-    'nimbus' : _nimbus_host,
-    'drpc-server' : _drpc_host,
-    'storm_ui' : _storm_ui_host,
-    'supervisors' : _supervisor_hosts,
-    'storm_rest_api' : _storm_rest_api_hosts,
-    'falcon-server' : _falcon_host,
-    'ats-servers' : _app_timeline_server_hosts,
-    'knox-gateway' : _knox_gateway_host,
-    'kafka-broker' : _kafka_broker_host
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/status_params.py
deleted file mode 100644
index 11d4aa9..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/status_params.py
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-nagios_pid_dir = "/var/run/nagios"
-nagios_pid_file = format("{nagios_pid_dir}/nagios.pid")
-
-nagios_var_dir = "/var/nagios"
-nagios_rw_dir = "/var/nagios/rw"

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/contacts.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/contacts.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/contacts.cfg.j2
deleted file mode 100644
index 610b2bd..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/contacts.cfg.j2
+++ /dev/null
@@ -1,109 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-###############################################################################
-# CONTACTS.CFG - SAMPLE CONTACT/CONTACTGROUP DEFINITIONS
-#
-# Last Modified: 05-31-2007
-#
-# NOTES: This config file provides you with some example contact and contact
-#        group definitions that you can reference in host and service
-#        definitions.
-#       
-#        You don't need to keep these definitions in a separate file from your
-#        other object definitions.  This has been done just to make things
-#        easier to understand.
-#
-###############################################################################
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-
-###############################################################################
-###############################################################################
-#
-# CONTACTS
-#
-###############################################################################
-###############################################################################
-
-# Just one contact defined by default - the Nagios admin (that's you)
-# This contact definition inherits a lot of default values from the 'generic-contact' 
-# template which is defined elsewhere.
-
-define contact{
-        contact_name    {{nagios_web_login}}                                        ; Short name of user
-        use             generic-contact                                             ; Inherit default values from generic-contact template (defined above)
-        alias           Nagios Admin                                                ; Full name of user
-
-        email           {{nagios_contact}}	; <<***** CHANGE THIS TO YOUR EMAIL ADDRESS ******
-        }
-
-# Contact which writes all Nagios alerts to the system logger.
-define contact{
-        contact_name                    sys_logger         ; Short name of user
-        use                             generic-contact    ; Inherit default values from generic-contact template (defined above)
-        alias                           System Logger      ; Full name of user
-        host_notifications_enabled      1
-        service_notifications_enabled   1
-        service_notification_period     24x7
-        host_notification_period        24x7
-        service_notification_options    w,u,c,r,s
-        host_notification_options       d,u,r,s
-        can_submit_commands             1
-        retain_status_information       1
-        service_notification_commands   service_sys_logger
-        host_notification_commands      host_sys_logger
-        }
-
-###############################################################################
-###############################################################################
-#
-# CONTACT GROUPS
-#
-###############################################################################
-###############################################################################
-
-# We only have one contact in this simple configuration file, so there is
-# no need to create more than one contact group.
-
-define contactgroup {
-        contactgroup_name       admins
-        alias                   Nagios Administrators
-        members                 {{nagios_web_login}},sys_logger
-}


[15/17] ambari git commit: AMBARI-8276 - Alerts: Remove Nagios Service From The Stack (jonathanhurley)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/scripts/nagios_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/scripts/nagios_service.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/scripts/nagios_service.py
deleted file mode 100644
index b7f512b..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/scripts/nagios_service.py
+++ /dev/null
@@ -1,103 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import json
-import os
-import signal
-
-from resource_management import *
-from os.path import isfile
-
-
-def nagios_service(action='start'): # start or stop
-  import params
-  
-  nagios_pid_file = format("{nagios_pid_file}")
-
-  if action == 'start':
-    command = format("service {nagios_service_name} start")
-    Execute(command)   
-  elif action == 'stop':
-    # attempt to grab the pid in case we need it later
-    nagios_pid = 0    
-    if isfile(nagios_pid_file):   
-      with open(nagios_pid_file, "r") as file:
-        try:
-          nagios_pid = int(file.read())
-          Logger.info("Nagios is running with a PID of {0}".format(nagios_pid))
-        except:
-          Logger.info("Unable to read PID file {0}".format(nagios_pid_file))
-        finally:
-          file.close()
-
-    command = format("service {nagios_service_name} stop")  
-    Execute(command)
-
-    # on SUSE, there is a bug where Nagios doesn't kill the process 
-    # but this could also affect any OS, so don't restrict this to SUSE
-    if nagios_pid > 0:
-      try:
-        os.kill(nagios_pid, 0)
-      except:
-        Logger.info("The Nagios process has successfully terminated")
-      else:
-        Logger.info("The Nagios process with ID {0} failed to terminate; explicitly killing.".format(nagios_pid))
-        os.kill(nagios_pid, signal.SIGKILL)
-
-    # in the event that the Nagios scripts don't remove the pid file
-    if isfile( nagios_pid_file ):   
-      Execute(format("rm -f {nagios_pid_file}"))
-        
-  MonitorWebserver("restart")
-
-def update_active_alerts():
-  import status_params
-
-  alerts = None
-  if 'alerts' in status_params.config and status_params.config['alerts'] is not None:
-    alerts = status_params.config['alerts']
-
-  if alerts is None:
-    return
-
-  output = {}
-
-  for a in alerts:
-    alert_name = a['name']
-    alert_text = a['text']
-    alert_state = a['state']
-    alert_host = a['host']
-    if not output.has_key(alert_name):
-      output[alert_name] = {}
-
-    if not output[alert_name].has_key(alert_host):
-      output[alert_name][alert_host] = []
-
-    host_items = output[alert_name][alert_host]
-    alert_out = {}
-    alert_out['state'] = alert_state
-    alert_out['text'] = alert_text
-    host_items.append(alert_out)
-
-  with open(os.path.join(status_params.nagios_var_dir, 'ambari.json'), 'w') as f:
-    json.dump(output, f)
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/scripts/params.py
deleted file mode 100644
index ec6c885..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/scripts/params.py
+++ /dev/null
@@ -1,287 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from functions import is_jdk_greater_6
-from resource_management import *
-import status_params
-
-# server configurations
-config = Script.get_config()
-
-if System.get_instance().os_family == "ubuntu":
-  nagios_service_name = "nagios3"
-else:
-  nagios_service_name = "nagios"
-
-conf_dir = format("/etc/{nagios_service_name}")
-nagios_obj_dir = format("{conf_dir}/objects")
-nagios_var_dir = status_params.nagios_var_dir
-nagios_rw_dir = status_params.nagios_rw_dir
-
-# HACK: Stylesheets for Nagios UI on Ubuntu are in wrong place so we have to do a symlink.
-# In future we can fix this directly in the package.
-ubuntu_stylesheets_real_location = "/etc/nagios3/stylesheets"
-ubuntu_stylesheets_desired_location = "/usr/share/nagios3/htdocs/stylesheets"
-
-if System.get_instance().os_family == "ubuntu":
-  host_template = "generic-host"
-  plugins_dir = "/usr/lib/nagios/plugins"
-  nagios_web_dir = "/usr/share/nagios3/htdocs"
-  
-  cfg_files = [
-    format("{conf_dir}/commands.cfg"),
-    format("{conf_dir}/conf.d/contacts_nagios2.cfg"),
-    format("{conf_dir}/conf.d/generic-host_nagios2.cfg"),
-    format("{conf_dir}/conf.d/generic-service_nagios2.cfg"),
-    format("{conf_dir}/conf.d/timeperiods_nagios2.cfg"),
-  ]
-  cgi_dir = "/usr/lib/cgi-bin/nagios3"
-  cgi_weblink = "/cgi-bin/nagios3"
-else:
-  host_template = "linux-server"
-  plugins_dir = "/usr/lib64/nagios/plugins"
-  nagios_web_dir = "/usr/share/nagios"
-  
-  cfg_files = [
-    format("{nagios_obj_dir}/commands.cfg"),
-    format("{nagios_obj_dir}/contacts.cfg"),
-    format("{nagios_obj_dir}/timeperiods.cfg"),
-    format("{nagios_obj_dir}/templates.cfg"),
-  ]
-  
-  cgi_dir = "/usr/lib/nagios/cgi"
-  cgi_weblink = "/nagios/cgi-bin"
-  
-check_result_path = "/var/nagios/spool/checkresults"
-nagios_log_dir = "/var/log/nagios"
-nagios_log_archives_dir = format("{nagios_log_dir}/archives")
-nagios_host_cfg = format("{nagios_obj_dir}/hadoop-hosts.cfg")
-nagios_lookup_daemon_str = "/usr/sbin/nagios"
-nagios_pid_dir = status_params.nagios_pid_dir
-nagios_pid_file = status_params.nagios_pid_file
-nagios_resource_cfg = format("{conf_dir}/resource.cfg")
-nagios_hostgroup_cfg = format("{nagios_obj_dir}/hadoop-hostgroups.cfg")
-nagios_servicegroup_cfg = format("{nagios_obj_dir}/hadoop-servicegroups.cfg")
-nagios_service_cfg = format("{nagios_obj_dir}/hadoop-services.cfg")
-nagios_command_cfg = format("{nagios_obj_dir}/hadoop-commands.cfg")
-eventhandlers_dir = "/usr/lib/nagios/eventhandlers"
-nagios_principal_name = default("/configurations/nagios-env/nagios_principal_name", "nagios")
-hadoop_ssl_enabled = False
-
-oozie_server_port = get_port_from_url(config['configurations']['oozie-site']['oozie.base.url'])
-namenode_host = default("/clusterHostInfo/namenode_host", None)
-
-# - test for HDFS or HCFS (glusterfs)
-if 'namenode_host' in config['clusterHostInfo']:
-  ishdfs_value = "HDFS"
-else:
-  ishdfs_value = None
-
-has_namenode = not namenode_host == None
-
-# different to HDP1
-if has_namenode:
-  if 'dfs.namenode.http-address' in config['configurations']['hdfs-site']:
-    namenode_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.namenode.http-address'])
-  else:
-    namenode_port = "50070"
-
-  if 'dfs.namenode.secondary.http-address' in config['configurations']['hdfs-site']:
-    snamenode_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.namenode.secondary.http-address'])
-  else:
-    snamenode_port = "50071"
-
-  if 'dfs.journalnode.http-address' in config['configurations']['hdfs-site']:
-    journalnode_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.journalnode.http-address'])
-    datanode_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.datanode.http.address'])
-
-hbase_master_rpc_port = default('/configurations/hbase-site/hbase.master.port', "60000")
-rm_port = get_port_from_url(config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'])
-nm_port = "8042"
-hs_port = get_port_from_url(config['configurations']['mapred-site']['mapreduce.jobhistory.webapp.address'])
-flume_port = "4159"
-hive_metastore_port = get_port_from_url(config['configurations']['hive-site']['hive.metastore.uris']) #"9083"
-hive_server_port = default('/configurations/hive-site/hive.server2.thrift.port',"10000")
-templeton_port = config['configurations']['webhcat-site']['templeton.port'] #"50111"
-hbase_master_port = config['configurations']['hbase-site']['hbase.master.info.port'] #"60010"
-hbase_rs_port = config['configurations']['hbase-site']['hbase.regionserver.info.port'] #"60030"
-storm_ui_port = config['configurations']['storm-site']['ui.port']
-drpc_port = config['configurations']['storm-site']['drpc.port']
-nimbus_port = config['configurations']['storm-site']['nimbus.thrift.port']
-supervisor_port = "56431"
-storm_rest_api_port = "8745"
-falcon_port = config['configurations']['falcon-env']['falcon_port']
-ahs_port = get_port_from_url(config['configurations']['yarn-site']['yarn.timeline-service.webapp.address'])
-
-# use sensible defaults for checkpoint as they are required by Nagios and 
-# may not be part of hdfs-site.xml on an upgrade
-if has_namenode:
-  if 'dfs.namenode.checkpoint.period' in config['configurations']['hdfs-site']:
-    dfs_namenode_checkpoint_period = config['configurations']['hdfs-site']['dfs.namenode.checkpoint.period']
-  else:
-    dfs_namenode_checkpoint_period = '21600'
-  
-  if 'dfs.namenode.checkpoint.txns' in config['configurations']['hdfs-site']:
-    dfs_namenode_checkpoint_txns = config['configurations']['hdfs-site']['dfs.namenode.checkpoint.txns']
-  else:
-    dfs_namenode_checkpoint_txns = '1000000'
-
-# this is different for HDP1
-nn_metrics_property = "FSNamesystem"
-clientPort = config['configurations']['zookeeper-env']['clientPort'] #ZK 
-
-
-java64_home = config['hostLevelParams']['java_home']
-check_cpu_on = is_jdk_greater_6(java64_home)
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-nagios_keytab_path = default("/configurations/nagios-env/nagios_keytab_path", "/etc/security/keytabs/nagios.service.keytab")
-kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-
-dfs_ha_enabled = False
-dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
-dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
-if dfs_ha_namenode_ids:
-  dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
-  dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
-  if dfs_ha_namenode_ids_array_len > 1:
-    dfs_ha_enabled = True
-
-nn_ha_host_port_map = {}
-if dfs_ha_enabled:
-  for nn_id in dfs_ha_namemodes_ids_list:
-    nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
-    nn_ha_host_port_map[nn_host.split(":")[0]] = nn_host.split(":")[1]
-else:
-  if 'namenode_host' in config['clusterHostInfo']:
-    namenode_metadata_port = get_port_from_url(config['configurations']['core-site']['fs.defaultFS'])
-    nn_ha_host_port_map[config['clusterHostInfo']['namenode_host'][0]] = namenode_metadata_port
-  else:
-    namenode_metadata_port = '8020'
-    
-os_family = System.get_instance().os_family
-
-ganglia_port = "8651"
-ganglia_collector_slaves_port = "8660"
-ganglia_collector_namenode_port = "8661"
-ganglia_collector_jobtracker_port = "8662"
-ganglia_collector_hbase_port = "8663"
-ganglia_collector_rm_port = "8664"
-ganglia_collector_nm_port = "8660"
-ganglia_collector_hs_port = "8666"
-  
-all_ping_ports = config['clusterHostInfo']['all_ping_ports']
-
-if System.get_instance().os_family == "suse":
-  nagios_p1_pl = "/usr/lib/nagios/p1.pl"
-  htpasswd_cmd = "htpasswd2"
-  web_conf_dir = "/etc/apache2/conf.d"
-elif System.get_instance().os_family == "ubuntu":
-  nagios_p1_pl = "/usr/lib/nagios3/p1.pl"
-  htpasswd_cmd = "htpasswd"
-  web_conf_dir = "/etc/apache2/conf.d"
-elif System.get_instance().os_family == "redhat":
-  nagios_p1_pl = "/usr/bin/p1.pl"
-  htpasswd_cmd = "htpasswd"
-  web_conf_dir = "/etc/httpd/conf.d"
-
-nagios_httpd_config_file = format("{web_conf_dir}/{nagios_service_name}.conf")
-hdp_mon_nagios_addons_path = format("{web_conf_dir}/hdp_mon_nagios_addons.conf")
-
-ambarinagios_php_dir = "/usr/share/hdp/nagios/"
-ambarinagios_php_filename = "nagios_alerts.php"
-
-nagios_user = config['configurations']['nagios-env']['nagios_user']
-nagios_group = config['configurations']['nagios-env']['nagios_group']
-nagios_web_login = config['configurations']['nagios-env']['nagios_web_login']
-nagios_web_password = config['configurations']['nagios-env']['nagios_web_password']
-user_group = config['configurations']['cluster-env']['user_group']
-nagios_contact = config['configurations']['nagios-env']['nagios_contact']
-
-
-_snamenode_host = default("/clusterHostInfo/snamenode_host", None)
-_jtnode_host = default("/clusterHostInfo/jtnode_host", None)
-_slave_hosts = default("/clusterHostInfo/slave_hosts", None)
-_journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", None)
-_zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", None)
-_rm_host = default("/clusterHostInfo/rm_host", None)
-if type(_rm_host) is list:
-  rm_hosts_in_str = ','.join(_rm_host)
-_nm_hosts = default("/clusterHostInfo/nm_hosts", None)
-_hs_host = default("/clusterHostInfo/hs_host", None)
-_zookeeper_hosts = default("/clusterHostInfo/zookeeper_hosts", None)
-_flume_hosts = default("/clusterHostInfo/flume_hosts", None)
-_nagios_server_host = default("/clusterHostInfo/nagios_server_host",None)
-_ganglia_server_host = default("/clusterHostInfo/ganglia_server_host",None)
-_app_timeline_server_hosts = default("/clusterHostInfo/app_timeline_server_hosts",None)
-_nimbus_host = default("/clusterHostInfo/nimbus_hosts",None)
-_drpc_host = default("/clusterHostInfo/drpc_server_hosts",None)
-_supervisor_hosts = default("/clusterHostInfo/supervisor_hosts",None)
-_storm_ui_host = default("/clusterHostInfo/storm_ui_server_hosts",None)
-_storm_rest_api_hosts = default("/clusterHostInfo/storm_rest_api_hosts",None)
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts",None)
-if type(hbase_master_hosts) is list:
-  hbase_master_hosts_in_str = ','.join(hbase_master_hosts)
-_hive_server_host = default("/clusterHostInfo/hive_server_host",None)
-_oozie_server = default("/clusterHostInfo/oozie_server",None)
-_webhcat_server_host = default("/clusterHostInfo/webhcat_server_host",None)
-_falcon_host = default("/clusterHostInfo/falcon_server_hosts", None)
-# can differ on HDP1
-#_mapred_tt_hosts = _slave_hosts
-#if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
-_hbase_rs_hosts = default("/clusterHostInfo/hbase_rs_hosts", _slave_hosts)
-_hue_server_host = default("/clusterHostInfo/hue_server_host", None)
-all_hosts = config['clusterHostInfo']['all_hosts']
-
-if 'namenode_host' in config['clusterHostInfo']:
-  nn_hosts_string = " ".join(namenode_host)
-else:
-  nn_hosts_string = " ".join(config['clusterHostInfo']['ambari_server_host'])
-
-
-hostgroup_defs = {
-    'namenode' : namenode_host,
-    'snamenode' : _snamenode_host,
-    'slaves' : _slave_hosts,
-    'agent-servers' : all_hosts,
-    'nagios-server' : _nagios_server_host,
-    'jobtracker' : _jtnode_host,
-    'ganglia-server' : _ganglia_server_host,
-    'flume-servers' : _flume_hosts,
-    'zookeeper-servers' : _zookeeper_hosts,
-    'hbasemasters' : hbase_master_hosts,
-    'hiveserver' : _hive_server_host,
-    'region-servers' : _hbase_rs_hosts,
-    'oozie-server' : _oozie_server,
-    'webhcat-server' : _webhcat_server_host,
-    'hue-server' : _hue_server_host,
-    'resourcemanager' : _rm_host,
-    'nodemanagers' : _nm_hosts,
-    'historyserver2' : _hs_host,
-    'journalnodes' : _journalnode_hosts,
-    'nimbus' : _nimbus_host,
-    'drpc-server' : _drpc_host,
-    'storm_ui' : _storm_ui_host,
-    'supervisors' : _supervisor_hosts,
-    'storm_rest_api' : _storm_rest_api_hosts,
-    'falcon-server' : _falcon_host,
-    'ats-servers' : _app_timeline_server_hosts
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/scripts/status_params.py
deleted file mode 100644
index 11d4aa9..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/scripts/status_params.py
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-nagios_pid_dir = "/var/run/nagios"
-nagios_pid_file = format("{nagios_pid_dir}/nagios.pid")
-
-nagios_var_dir = "/var/nagios"
-nagios_rw_dir = "/var/nagios/rw"

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/contacts.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/contacts.cfg.j2 b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/contacts.cfg.j2
deleted file mode 100644
index 610b2bd..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/contacts.cfg.j2
+++ /dev/null
@@ -1,109 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-###############################################################################
-# CONTACTS.CFG - SAMPLE CONTACT/CONTACTGROUP DEFINITIONS
-#
-# Last Modified: 05-31-2007
-#
-# NOTES: This config file provides you with some example contact and contact
-#        group definitions that you can reference in host and service
-#        definitions.
-#       
-#        You don't need to keep these definitions in a separate file from your
-#        other object definitions.  This has been done just to make things
-#        easier to understand.
-#
-###############################################################################
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-
-###############################################################################
-###############################################################################
-#
-# CONTACTS
-#
-###############################################################################
-###############################################################################
-
-# Just one contact defined by default - the Nagios admin (that's you)
-# This contact definition inherits a lot of default values from the 'generic-contact' 
-# template which is defined elsewhere.
-
-define contact{
-        contact_name    {{nagios_web_login}}                                        ; Short name of user
-        use             generic-contact                                             ; Inherit default values from generic-contact template (defined above)
-        alias           Nagios Admin                                                ; Full name of user
-
-        email           {{nagios_contact}}	; <<***** CHANGE THIS TO YOUR EMAIL ADDRESS ******
-        }
-
-# Contact which writes all Nagios alerts to the system logger.
-define contact{
-        contact_name                    sys_logger         ; Short name of user
-        use                             generic-contact    ; Inherit default values from generic-contact template (defined above)
-        alias                           System Logger      ; Full name of user
-        host_notifications_enabled      1
-        service_notifications_enabled   1
-        service_notification_period     24x7
-        host_notification_period        24x7
-        service_notification_options    w,u,c,r,s
-        host_notification_options       d,u,r,s
-        can_submit_commands             1
-        retain_status_information       1
-        service_notification_commands   service_sys_logger
-        host_notification_commands      host_sys_logger
-        }
-
-###############################################################################
-###############################################################################
-#
-# CONTACT GROUPS
-#
-###############################################################################
-###############################################################################
-
-# We only have one contact in this simple configuration file, so there is
-# no need to create more than one contact group.
-
-define contactgroup {
-        contactgroup_name       admins
-        alias                   Nagios Administrators
-        members                 {{nagios_web_login}},sys_logger
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/hadoop-commands.cfg.j2 b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
deleted file mode 100644
index a8a616c..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
+++ /dev/null
@@ -1,166 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-{% if check_cpu_on %}
-# 'check_cpu' check remote cpu load
-define command {
-        command_name    check_cpu
-        command_line    $USER1$/check_wrapper.sh php $USER1$/check_cpu.php -h $HOSTADDRESS$ -p $ARG1$ -w $ARG2$ -c $ARG3$ -e $ARG4$ -k $ARG5$ -r $ARG6$ -t $ARG7$ -u $ARG8$
-       }
-define command {
-        command_name    check_cpu_ha
-        command_line    $USER1$/check_wrapper.sh php $USER1$/check_cpu_ha.php -h $ARG1$ -p $ARG2$ -w $ARG3$ -c $ARG4$ -e $ARG5$ -k $ARG6$ -r $ARG7$ -t $ARG8$ -u $ARG9$
-       }
-{% endif %}
-
-# Check data node storage full 
-define command {
-        command_name    check_datanode_storage
-        command_line    $USER1$/check_wrapper.sh php $USER1$/check_datanode_storage.php -h $HOSTADDRESS$ -p $ARG1$ -w $ARG2$ -c $ARG3$ -e $ARG4$ -k $ARG5$ -r $ARG6$ -t $ARG7$ -s $ARG8$
-       }
-
-define command{
-        command_name    check_hdfs_blocks
-        command_line    $USER1$/check_wrapper.sh php $USER1$/check_hdfs_blocks.php -h $ARG1$ -p $ARG2$ -s $ARG3$ -e $ARG4$ -k $ARG5$ -r $ARG6$ -t $ARG7$ -u $ARG8$
-       }
-
-define command{
-        command_name    check_hdfs_capacity
-        command_line    $USER1$/check_wrapper.sh php $USER1$/check_hdfs_capacity.php -h $ARG1$ -p $ARG2$ -w $ARG3$ -c $ARG4$ -e $ARG5$ -k $ARG6$ -r $ARG7$ -t $ARG8$ -s $ARG9$
-       }
-
-define command{
-        command_name    check_aggregate
-        command_line    $USER1$/check_wrapper.sh php $USER1$/check_aggregate.php -f /var/nagios/status.dat -s 1 -t service -n $ARG1$ -w $ARG2$ -c $ARG3$
-       }
-
-define command{
-        command_name    check_rpcq_latency
-        command_line    $USER1$/check_wrapper.sh php $USER1$/check_rpcq_latency.php -h $HOSTADDRESS$ -p $ARG2$ -n $ARG1$ -w $ARG3$ -c $ARG4$ -e $ARG5$ -k $ARG6$ -r $ARG7$ -t $ARG8$ -s $ARG9$
-       }
-
-define command{
-        command_name    check_rpcq_latency_ha
-        command_line    $USER1$/check_wrapper.sh php $USER1$/check_rpcq_latency_ha.php -h $ARG1$ -p $ARG3$ -n $ARG2$ -w $ARG4$ -c $ARG5$ -e $ARG6$ -k $ARG7$ -r $ARG8$ -t $ARG9$ -s $ARG10$
-       }
-
-define command{
-        command_name    check_nagios
-        command_line    $USER1$/check_wrapper.sh $USER1$/check_nagios -e $ARG1$ -F $ARG2$ -C $ARG3$ 
-       }
-
-define command{
-        command_name    check_webui
-        command_line    $USER1$/check_wrapper.sh $USER1$/check_webui.sh $ARG1$ $HOSTADDRESS$ $ARG2$
-       }
-
-define command{
-        command_name    check_webui_ha
-        command_line    $USER1$/check_wrapper.sh $USER1$/check_webui_ha.sh $ARG1$ $ARG2$ $ARG3$
-       }
-
-define command{
-        command_name    check_name_dir_status
-        command_line    $USER1$/check_wrapper.sh php $USER1$/check_name_dir_status.php -h $HOSTADDRESS$ -p $ARG1$ -e $ARG2$ -k $ARG3$ -r $ARG4$ -t $ARG5$ -s $ARG6$
-       }
-
-define command{
-        command_name    check_oozie_status
-        command_line    $USER1$/check_wrapper.sh $USER1$/check_oozie_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$
-       }
-
-define command{
-        command_name    check_templeton_status
-        command_line    $USER1$/check_wrapper.sh $USER1$/check_templeton_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$
-       }
-
-define command{
-        command_name    check_hive_metastore_status
-        command_line    $USER1$/check_wrapper.sh $USER1$/check_hive_metastore_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$
-       }
-define command{
-        command_name    check_hue_status
-        command_line    $USER1$/check_wrapper.sh $USER1$/check_hue_status.sh
-       }
-
-define command{
-       command_name    check_mapred_local_dir_used_space
-       command_line    $USER1$/check_wrapper.sh $USER1$/check_mapred_local_dir_used.sh $ARG1$ $ARG2$
-       }
-
-define command{
-       command_name    check_namenodes_ha
-       command_line    $USER1$/check_wrapper.sh $USER1$/check_namenodes_ha.sh $ARG1$ $ARG2$
-       }
-
-define command{
-        command_name    check_nodemanager_health
-        command_line    $USER1$/check_wrapper.sh $USER1$/check_nodemanager_health.sh $HOSTADDRESS$ $ARG1$
-       }
-
-define command{
-        command_name    host_sys_logger
-        command_line    $USER1$/sys_logger.py $HOSTSTATETYPE$ $HOSTATTEMPT$ $HOSTSTATE$ "Host::Ping" "Event Host=$HOSTADDRESS$($HOSTSTATE$), $HOSTOUTPUT$ $LONGHOSTOUTPUT$"
-       }
-
-define command{
-        command_name    service_sys_logger
-        command_line    $USER1$/sys_logger.py $SERVICESTATETYPE$ $SERVICEATTEMPT$ $SERVICESTATE$ "$SERVICEDESC$" "Event Host=$HOSTADDRESS$ Service Description=$SERVICEDESC$($SERVICESTATE$), $SERVICEOUTPUT$ $LONGSERVICEOUTPUT$"
-       }
-
-define command{
-        command_name check_tcp_wrapper
-        command_line  $USER1$/check_wrapper.sh $USER1$/check_tcp -H $HOSTADDRESS$ -p $ARG1$ $ARG2$
-       }
-
-define command{
-        command_name check_checkpoint_time
-        command_line $USER1$/check_wrapper.sh /var/lib/ambari-agent/ambari-python-wrap $USER1$/check_checkpoint_time.py -H "$ARG1$" -p $ARG2$ -w $ARG3$ -c $ARG4$ -t $ARG5$ -x $ARG6$
-       }
-
-define command{
-        command_name check_tcp_wrapper_sasl
-        command_line  $USER1$/check_wrapper.sh $USER1$/check_tcp -H $HOSTADDRESS$ -p $ARG1$ $ARG2$ -s \"$ARG3$\"
-       }
-
-define command{
-        command_name check_ambari
-        command_line $USER1$/check_wrapper.sh /var/lib/ambari-agent/ambari-python-wrap $USER1$/check_ambari_alerts.py -H $HOSTADDRESS$ -f $ARG1$ -n $ARG2$
-       }

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2 b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2
deleted file mode 100644
index 05c1252..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2
+++ /dev/null
@@ -1,33 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for name, hosts in hostgroup_defs.iteritems() %}
-{% if hosts %}
-define hostgroup {
-        hostgroup_name  {{name}}
-        alias           {{name}}
-        members         {{','.join(hosts)}}
-}
-{% endif %}
-{% endfor %}
-
-define hostgroup {
-        hostgroup_name  all-servers
-        alias           All Servers
-        members         {{','.join(all_hosts)}}
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2 b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2
deleted file mode 100644
index 8bcc980..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2
+++ /dev/null
@@ -1,53 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-{% for host in all_hosts %}
-define host {
-        alias                     {{host}}
-        host_name                 {{host}}
-        use                       {{host_template}}
-        address                   {{host}}
-        check_command             check_tcp_wrapper!{{all_ping_ports[loop.index-1]}}!-w 1 -c 1
-        check_interval            0.25
-        retry_interval            0.25
-        max_check_attempts        4
-        notifications_enabled     1
-        first_notification_delay  0     # Send notification soon after change in the hard state
-        notification_interval     0     # Send the notification once
-        notification_options      d,u,r
-}
-
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2 b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2
deleted file mode 100644
index 00f0740..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2
+++ /dev/null
@@ -1,119 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-
-
-{% if hostgroup_defs['namenode'] or 
-  hostgroup_defs['snamenode']  or
-  hostgroup_defs['slaves'] %}
-  {% if hostgroup_defs['namenode'] != None %}
-  define servicegroup {
-    servicegroup_name  HDFS
-    alias  HDFS Checks
-  }
-  {% endif %}
-{% endif %} 
-{%if hostgroup_defs['jobtracker'] or
-  hostgroup_defs['historyserver2']-%}
-define servicegroup {
-  servicegroup_name  MAPREDUCE
-  alias  MAPREDUCE Checks
-}
-{% endif %}
-{%if hostgroup_defs['resourcemanager'] or
-  hostgroup_defs['nodemanagers'] %}
-define servicegroup {
-  servicegroup_name  YARN
-  alias  YARN Checks
-}
-{% endif %}
-{%if hostgroup_defs['hbasemasters'] %}
-define servicegroup {
-  servicegroup_name  HBASE
-  alias  HBASE Checks
-}
-{% endif %}
-{% if hostgroup_defs['oozie-server'] %}
-define servicegroup {
-  servicegroup_name  OOZIE
-  alias  OOZIE Checks
-}
-{% endif %}
-{% if hostgroup_defs['webhcat-server'] %}
-define servicegroup {
-  servicegroup_name  WEBHCAT
-  alias  WEBHCAT Checks
-}
-{% endif %}
-{% if hostgroup_defs['nagios-server'] %}
-define servicegroup {
-  servicegroup_name  NAGIOS
-  alias  NAGIOS Checks
-}
-{% endif %}
-{% if hostgroup_defs['ganglia-server'] %}
-define servicegroup {
-  servicegroup_name  GANGLIA
-  alias  GANGLIA Checks
-}
-{% endif %}
-{% if hostgroup_defs['hiveserver'] %}
-define servicegroup {
-  servicegroup_name  HIVE
-  alias  HIVE Checks
-}
-{% endif %}
-{% if hostgroup_defs['zookeeper-servers'] %}
-define servicegroup {
-  servicegroup_name  ZOOKEEPER
-  alias  ZOOKEEPER Checks
-}
-{% endif %}
-define servicegroup {
-  servicegroup_name  AMBARI
-  alias  AMBARI Checks
-}
-{% if hostgroup_defs['hue-server'] %}
-define servicegroup {
-  servicegroup_name  HUE
-  alias  HUE Checks
-}
-{% endif %}
-{% if hostgroup_defs['nimbus'] or
-  hostgroup_defs['drpc-server'] or
-  hostgroup_defs['storm_ui'] or
-  hostgroup_defs['supervisors'] or
-  hostgroup_defs['storm_rest_api']%}
-define servicegroup {
-  servicegroup_name  STORM
-  alias  STORM Checks
-}
-{% endif %}
-{% if hostgroup_defs['falcon-server'] %}
-define servicegroup {
-  servicegroup_name  FALCON
-  alias  FALCON Checks
-}
-{% endif %}
-
-{%if hostgroup_defs['flume-servers'] %}
-define servicegroup {
-  servicegroup_name  FLUME
-  alias  FLUME Checks
-}
-{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4ededeb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/hadoop-services.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/hadoop-services.cfg.j2 b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/hadoop-services.cfg.j2
deleted file mode 100644
index ac4bd47..0000000
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/NAGIOS/package/templates/hadoop-services.cfg.j2
+++ /dev/null
@@ -1,804 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-{# TODO: Look for { or } in created file #}
-# NAGIOS SERVER Check (status log update)
-{% if hostgroup_defs['nagios-server'] %}
-define service {
-        name                            hadoop-service
-        use                             generic-service
-        notification_options            w,u,c,r,f,s
-        first_notification_delay        0
-        notification_interval           0                 # Send the notification once
-        contact_groups                  admins
-        notifications_enabled           1
-        event_handler_enabled           1
-        register                        0
-}
-
-define service {        
-        hostgroup_name          nagios-server        
-        use                     hadoop-service
-        service_description     NAGIOS::Nagios status log freshness
-        servicegroups           NAGIOS
-        check_command           check_nagios!10!/var/nagios/status.dat!{{nagios_lookup_daemon_str}}
-        normal_check_interval   5
-        retry_check_interval    0.5
-        max_check_attempts      2
-}
-
-# NAGIOS SERVER HDFS Checks
-{% if hostgroup_defs['namenode'] != None %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::Percent DataNodes with space available
-        servicegroups           HDFS
-        check_command           check_aggregate!"DATANODE::DataNode space"!10%!30%
-        normal_check_interval   0.5
-        retry_check_interval    1 
-        max_check_attempts      1
-}
-
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::Percent DataNodes live
-        servicegroups           HDFS
-        check_command           check_aggregate!"DATANODE::DataNode process"!10%!30%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-{# used only for HDP2 #}
-{% if hostgroup_defs['namenode'] and hostgroup_defs['namenode'] != None and dfs_ha_enabled %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::NameNode HA Healthy
-        servicegroups           HDFS
-        check_command           check_namenodes_ha!$HOSTGROUPMEMBERS:namenode$!{{ namenode_port }}
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      5
-}
-{% endif %}
-
-# AMBARI AGENT Checks
-{% for hostname in all_hosts %}
-define service {
-        host_name	        {{ hostname }}
-        use                     hadoop-service
-        service_description     AMBARI::Ambari Agent process
-        servicegroups           AMBARI
-        check_command           check_tcp_wrapper!{{all_ping_ports[loop.index-1]}}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-
-{% endfor %}
-
-# NAGIOS SERVER ZOOKEEPER Checks
-{% if hostgroup_defs['zookeeper-servers'] %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     ZOOKEEPER::Percent ZooKeeper Servers live
-        servicegroups           ZOOKEEPER
-        check_command           check_aggregate!"ZOOKEEPER::ZooKeeper Server process"!35%!70%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-
-# NAGIOS SERVER HBASE Checks
-{% if hostgroup_defs['hbasemasters'] %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HBASE::Percent RegionServers live
-        servicegroups           HBASE
-        check_command           check_aggregate!"REGIONSERVER::RegionServer process"!10%!30%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-{% endif %}
-
-
-
-# GANGLIA SERVER Checks
-{% if hostgroup_defs['ganglia-server'] %}
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Server process
-        servicegroups           GANGLIA
-        check_command           check_tcp_wrapper!{{ ganglia_port }}!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-
-{% if hostgroup_defs['namenode'] %}
-define service {
-        hostgroup_name	        ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for NameNode
-        servicegroups           GANGLIA
-        check_command           check_tcp_wrapper!{{ ganglia_collector_namenode_port }}!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endif %}
-
-{% if hostgroup_defs['hbasemasters'] %}
-define service {
-        hostgroup_name	        ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for HBase Master
-        servicegroups           GANGLIA
-        check_command           check_tcp_wrapper!{{ ganglia_collector_hbase_port }}!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endif %}
-
-{% if hostgroup_defs['resourcemanager'] %}
-define service {
-        hostgroup_name	        ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for ResourceManager
-        servicegroups           GANGLIA
-        check_command           check_tcp_wrapper!{{ ganglia_collector_rm_port }}!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endif %}
-
-{% if hostgroup_defs['historyserver2'] %}
-define service {
-        hostgroup_name	        ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for HistoryServer
-        servicegroups           GANGLIA
-        check_command           check_tcp_wrapper!{{ ganglia_collector_hs_port }}!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endif %}
-{% endif %}
-
-
-{% if hostgroup_defs['snamenode'] and hostgroup_defs['namenode'] != None %}
-# Secondary namenode checks
-define service {
-        hostgroup_name          snamenode
-        use                     hadoop-service
-        service_description     NAMENODE::Secondary NameNode process
-        servicegroups           HDFS
-        check_command           check_tcp_wrapper!{{ snamenode_port }}!-w 1 -c 1
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-
-{% if hostgroup_defs['storm_ui'] %}
-# STORM UI Checks
-define service {
-        hostgroup_name          storm_ui
-        use                     hadoop-service
-        service_description     STORM_UI_SERVER::Storm UI on {{ hostgroup_defs['storm_ui'][0] }}
-        servicegroups           STORM
-        check_command           check_webui!storm_ui!{{ storm_ui_port }}
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-{% endif %}
-
-{% if hostgroup_defs['storm_ui'] %}
-# STORM UI Checks
-define service {
-        hostgroup_name          storm_ui
-        use                     hadoop-service
-        service_description     STORM_UI_SERVER::Storm UI Server process
-        servicegroups           STORM
-        check_command           check_tcp_wrapper!{{ storm_ui_port }}!-w 1 -c 1
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-
-{% if hostgroup_defs['nimbus'] %}
-# Nimbus Checks
-define service {
-        hostgroup_name          nimbus
-        use                     hadoop-service
-        service_description     NIMBUS::Nimbus process
-        servicegroups           STORM
-        check_command           check_tcp_wrapper!{{ nimbus_port }}!-w 1 -c 1
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-
-{% if hostgroup_defs['drpc-server'] %}
-# drpc Checks
-define service {
-        hostgroup_name          drpc-server
-        use                     hadoop-service
-        service_description     DRPC_SERVER::DRPC Server process
-        servicegroups           STORM
-        check_command           check_tcp_wrapper!{{ drpc_port }}!-w 1 -c 1
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-
-{% if hostgroup_defs['storm_rest_api'] %}
-# Storm REST API Checks
-define service {
-        hostgroup_name          storm_rest_api
-        use                     hadoop-service
-        service_description     STORM_REST_API::Storm REST API Server process
-        servicegroups           STORM
-        check_command           check_tcp_wrapper!{{ storm_rest_api_port }}!-w 1 -c 1
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-
-# NAGIOS SERVER Supervisor Checks
-{% if hostgroup_defs['supervisors'] %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     SUPERVISOR::Percent Supervisors live
-        servicegroups           STORM
-        check_command           check_aggregate!"SUPERVISOR::Supervisors process"!10%!30%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-
-define service {
-        hostgroup_name          supervisors
-        use                     hadoop-service
-        service_description     SUPERVISOR::Supervisors process
-        servicegroups           STORM
-        check_command           check_tcp_wrapper!{{ supervisor_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{%  endif %}
-
-{% if hostgroup_defs['namenode'] and hostgroup_defs['namenode'] != None %}
-# HDFS Checks
-{%  for namenode_hostname in namenode_host %}
-{# TODO: check if we can get rid of str, lower #}
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode edit logs directory status on {{ namenode_hostname }}
-        servicegroups           HDFS
-        check_command           check_name_dir_status!{{ namenode_port }}!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   0.5
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-{% if check_cpu_on %}
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode host CPU utilization on {{ namenode_hostname }}
-        servicegroups           HDFS
-#        check_command           check_cpu!200%!250%
-        check_command           check_cpu!{{ namenode_port }}!200%!250%!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   5
-        retry_check_interval    2
-        max_check_attempts      5
-}
-{% endif %}
-
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode Web UI on {{ namenode_hostname }}
-        servicegroups           HDFS
-        check_command           check_webui!namenode!{{ namenode_port }}
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode process on {{ namenode_hostname }}
-        servicegroups           HDFS
-        check_command           check_tcp_wrapper!{{nn_ha_host_port_map[namenode_hostname]}}!-w 1 -c 1
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     HDFS::NameNode RPC latency on {{ namenode_hostname }}
-        servicegroups           HDFS
-        check_command           check_rpcq_latency!NameNode!{{ namenode_port }}!3000!5000!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   5
-        retry_check_interval    1
-        max_check_attempts      5
-}
-
-{%  endfor  %}
-
-define service {
-        host_name               {{namenode_host[0]}}
-        use                     hadoop-service
-        service_description     NAMENODE::Last checkpoint time
-        servicegroups           HDFS
-        check_command           check_checkpoint_time!{{ nn_hosts_string }}!{{ namenode_port }}!200!200!{{ dfs_namenode_checkpoint_period }}!{{dfs_namenode_checkpoint_txns}}
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::Blocks health
-        servicegroups           HDFS
-        check_command           check_hdfs_blocks!$HOSTGROUPMEMBERS:namenode$!{{ namenode_port }}!{{ nn_metrics_property }}!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   2
-        retry_check_interval    1
-        max_check_attempts      1
-}
-
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::HDFS capacity utilization
-        servicegroups           HDFS
-        check_command           check_hdfs_capacity!$HOSTGROUPMEMBERS:namenode$!{{ namenode_port }}!80%!90%!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   2
-        retry_check_interval    1
-        max_check_attempts      1
-}
-
-{% endif %}
-
-{% if hostgroup_defs['resourcemanager'] %}
-# YARN::RESOURCEMANAGER Checks 
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     RESOURCEMANAGER::ResourceManager Web UI
-        servicegroups           YARN
-        check_command           check_webui_ha!resourcemanager!{{ rm_hosts_in_str }}!{{ rm_port }}
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-
-{% if check_cpu_on %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     RESOURCEMANAGER::ResourceManager CPU utilization
-        servicegroups           YARN
-#       check_command           check_cpu!200%!250%
-        check_command           check_cpu_ha!{{ rm_hosts_in_str }}!{{ rm_port }}!200%!250%!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   5
-        retry_check_interval    2 
-        max_check_attempts      5
-}
-{% endif %}
-
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     RESOURCEMANAGER::ResourceManager RPC latency
-        servicegroups           YARN
-        check_command           check_rpcq_latency_ha!{{ rm_hosts_in_str }}!ResourceManager!{{ rm_port }}!3000!5000!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   5
-        retry_check_interval    1 
-        max_check_attempts      5
-}
-
-{%  for rm_host in _rm_host  %}
-define service {
-        host_name               {{ rm_host }}
-        use                     hadoop-service
-        service_description     RESOURCEMANAGER::ResourceManager process on {{ rm_host }}
-        servicegroups           YARN
-        check_command           check_tcp_wrapper!{{ rm_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endfor %}
-{%  endif %}
-
-{% if hostgroup_defs['nodemanagers'] %}
-# YARN::NODEMANAGER Checks
-define service {
-        hostgroup_name          nodemanagers
-        use                     hadoop-service
-        service_description     NODEMANAGER::NodeManager process
-        servicegroups           YARN
-        check_command           check_tcp_wrapper!{{ nm_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-define service {
-        hostgroup_name          nodemanagers
-        use                     hadoop-service
-        service_description     NODEMANAGER::NodeManager health
-        servicegroups           YARN
-        check_command           check_nodemanager_health!{{ nm_port }}!{{ str(security_enabled).lower() }}!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     NODEMANAGER::Percent NodeManagers live
-        servicegroups           YARN
-        check_command           check_aggregate!"NODEMANAGER::NodeManager process"!10%!30%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{%  endif %}
-
-{% if hostgroup_defs['historyserver2'] %}
-# MAPREDUCE::JOBHISTORY Checks
-define service {
-        hostgroup_name          historyserver2
-        use                     hadoop-service
-        service_description     JOBHISTORY::HistoryServer Web UI
-        servicegroups           MAPREDUCE
-        check_command           check_webui!historyserver2!{{ hs_port }}
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-
-{% if check_cpu_on %}
-define service {
-        hostgroup_name          historyserver2
-        use                     hadoop-service
-        service_description     JOBHISTORY::HistoryServer CPU utilization
-        servicegroups           MAPREDUCE
-#        check_command           check_cpu!200%!250%
-        check_command           check_cpu!{{ hs_port }}!200%!250%!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   5
-        retry_check_interval    2 
-        max_check_attempts      5
-}
-{%  endif %}
-
-define service {
-        hostgroup_name          historyserver2
-        use                     hadoop-service
-        service_description     JOBHISTORY::HistoryServer RPC latency
-        servicegroups           MAPREDUCE
-        check_command           check_rpcq_latency!JobHistoryServer!{{ hs_port }}!3000!5000!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   5
-        retry_check_interval    1 
-        max_check_attempts      5
-}
-
-define service {
-        hostgroup_name          historyserver2
-        use                     hadoop-service
-        service_description     JOBHISTORY::HistoryServer process
-        servicegroups           MAPREDUCE
-        check_command           check_tcp_wrapper!{{ hs_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-{%  endif %}
-
-{% if hostgroup_defs['journalnodes'] %}
-# Journalnode checks
-define service {
-        hostgroup_name          journalnodes
-        use                     hadoop-service
-        service_description     JOURNALNODE::JournalNode process
-        servicegroups           HDFS
-        check_command           check_tcp_wrapper!{{ journalnode_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-{% if dfs_ha_enabled %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::Percent JournalNodes live
-        servicegroups           HDFS
-        check_command           check_aggregate!"JOURNALNODE::JournalNode process"!33%!50%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-{% endif %}
-
-{% if hostgroup_defs['slaves'] and hostgroup_defs['namenode'] != None %}
-# HDFS::DATANODE Checks
-define service {
-        hostgroup_name          slaves
-        use                     hadoop-service
-        service_description     DATANODE::DataNode process
-        servicegroups           HDFS
-        check_command           check_tcp_wrapper!{{datanode_port}}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-define service {
-        hostgroup_name          slaves
-        use                     hadoop-service
-        service_description     DATANODE::DataNode space
-        servicegroups           HDFS
-        check_command           check_datanode_storage!{{ datanode_port }}!90%!90%!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   2 
-        retry_check_interval    1
-        max_check_attempts      2
-}
-
-{% endif %}
-
-{% if hostgroup_defs['zookeeper-servers'] %}
-# ZOOKEEPER Checks
-define service {
-        hostgroup_name          zookeeper-servers
-        use                     hadoop-service
-        service_description     ZOOKEEPER::ZooKeeper Server process
-        servicegroups           ZOOKEEPER
-        check_command           check_tcp_wrapper!{{ clientPort }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-
-{% if hostgroup_defs['hbasemasters'] and hostgroup_defs['region-servers'] != None %}
-# HBASE::REGIONSERVER Checks
-define service {
-        hostgroup_name          region-servers
-        use                     hadoop-service
-        service_description     REGIONSERVER::RegionServer process
-        servicegroups           HBASE
-        check_command           check_tcp_wrapper!{{ hbase_rs_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-{# HBASE:: MASTER Checks
-# define service {
-#         hostgroup_name          hbasemasters
-#         use                     hadoop-service
-#         service_description     HBASEMASTER::HBase Master Web UI
-#         servicegroups           HBASE
-#         check_command           check_webui!hbase!{{ hbase_master_port }}
-#         normal_check_interval   1
-#         retry_check_interval    1
-#         max_check_attempts      3
-# #}
-{% if hostgroup_defs['hbasemasters'] %}
-{% if check_cpu_on %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HBASEMASTER::HBase Master CPU utilization
-        servicegroups           HBASE
-#        check_command           check_cpu!200%!250%
-        check_command           check_cpu_ha!{{ hbase_master_hosts_in_str }}!{{ hbase_master_port }}!200%!250%!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   5
-        retry_check_interval    2
-        max_check_attempts      5
-}
-{%  endif %}
-{%  endif %}
-
-{%  for hbasemaster in hbase_master_hosts  %}
-define service {
-        host_name               {{ hbasemaster }}
-        use                     hadoop-service
-        service_description     HBASEMASTER::HBase Master process on {{ hbasemaster }}
-        servicegroups           HBASE
-        check_command           check_tcp_wrapper!{{ hbase_master_rpc_port }}!-w 1 -c 1
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endfor %}
-{% endif %}
-
-{% if hostgroup_defs['hiveserver'] %}
-# HIVE Metastore check
-define service {
-        hostgroup_name          hiveserver
-        use                     hadoop-service
-        service_description     HIVE-METASTORE::Hive Metastore process
-        servicegroups           HIVE
-        check_command           check_tcp_wrapper!{{ hive_metastore_port }}!-w 1 -c 1
-        normal_check_interval   0.5
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-# HIVE Server check
-define service {
-        hostgroup_name          hiveserver
-        use                     hadoop-service
-        service_description     HIVE-SERVER::HiveServer2 process
-        servicegroups           HIVE
-        check_command           check_tcp_wrapper_sasl!{{ hive_server_port }}!-w 1 -c 1!A001 AUTHENTICATE ANONYMOUS
-        normal_check_interval   0.5
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-{% if hostgroup_defs['oozie-server'] %}
-# Oozie check
-define service {
-        hostgroup_name          oozie-server
-        use                     hadoop-service
-        service_description     OOZIE::Oozie Server status
-        servicegroups           OOZIE
-        {% if security_enabled %}
-        check_command           check_oozie_status!{{ oozie_server_port }}!{{ java64_home }}!true!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
-        {% else %}
-        check_command           check_oozie_status!{{ oozie_server_port }}!{{ java64_home }}!false
-        {% endif %}
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-{% endif %}
-{% if hostgroup_defs['webhcat-server'] %}
-# WEBHCAT check
-define service {
-        hostgroup_name          webhcat-server
-        use                     hadoop-service
-        service_description     WEBHCAT::WebHCat Server status
-        servicegroups           WEBHCAT 
-        {% if security_enabled %}
-        check_command           check_templeton_status!{{ templeton_port }}!v1!{{ str(security_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
-        {% else %}
-        check_command           check_templeton_status!{{ templeton_port }}!v1!false
-        {% endif %}
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-
-{% if hostgroup_defs['hue-server'] %}
-define service {
-        hostgroup_name          hue-server
-        use                     hadoop-service
-        service_description     HUE::Hue Server status
-        servicegroups           HUE
-        check_command           check_hue_status
-        normal_check_interval   100
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-
-#FALCON checks
-{% if hostgroup_defs['falcon-server'] %}
-define service {
-        hostgroup_name          falcon-server
-        service_description     FALCON::Falcon Server process
-        servicegroups           FALCON
-        check_command           check_tcp_wrapper!{{ falcon_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-define service {
-        hostgroup_name          falcon-server
-        service_description     FALCON::Falcon Server Web UI
-        servicegroups           FALCON
-        check_command           check_webui!falconserver!{{ falcon_port }}
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-{% if hostgroup_defs['ats-servers'] %}
-define service {
-        hostgroup_name          ats-servers
-        use                     hadoop-service
-        service_description     APP_TIMELINE_SERVER::App Timeline Server process
-        servicegroups           YARN
-        check_command           check_tcp_wrapper!{{ ahs_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-
-{% if hostgroup_defs['flume-servers'] %}
-# FLUME Checks
-define service {
-        hostgroup_name          flume-servers
-        use                     hadoop-service
-        service_description     FLUME::Flume Agent process
-        servicegroups           FLUME
-        check_command           check_ambari!/var/nagios/ambari.json!flume_agent
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-