You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ds...@apache.org on 2014/05/14 16:02:09 UTC

[01/15] AMBARI-5747 Remove facter-1.6.10, Ruby, Puppet dependencies from pom.xml and src (dsen)

Repository: ambari
Updated Branches:
  refs/heads/trunk d56db548d -> 3d1171b06


http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
index e4b5fc5..882f1e3 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
@@ -253,7 +253,6 @@ public class ExecutionCommand extends AgentCommand {
    */
   public static interface KeyNames {
 
-    String SCHEMA_VERSION = "schema_version";
     String COMMAND_TIMEOUT = "command_timeout";
     String SCRIPT = "script";
     String SCRIPT_TYPE = "script_type";

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
index 5eb7ead..2dddd20 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
@@ -250,7 +250,6 @@ public class HeartbeatMonitor implements Runnable {
 
     // Fill command params
     Map<String, String> commandParams = statusCmd.getCommandParams();
-    commandParams.put(SCHEMA_VERSION, serviceInfo.getSchemaVersion());
 
     String commandTimeout = configuration.getDefaultAgentTaskTimeout();
     CommandScriptDefinition script = componentInfo.getCommandScript();

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
index 0c7aa3e..bfcf3a8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
@@ -51,7 +51,6 @@ import java.util.TreeMap;
 
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.COMMAND_TIMEOUT;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.COMPONENT_CATEGORY;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SCHEMA_VERSION;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SCRIPT;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SCRIPT_TYPE;
 
@@ -316,7 +315,6 @@ public class AmbariActionExecutionHelper {
       commandParams.put(COMMAND_TIMEOUT, actionContext.getTimeout().toString());
       commandParams.put(SCRIPT, actionName + ".py");
       commandParams.put(SCRIPT_TYPE, TYPE_PYTHON);
-      commandParams.put(SCHEMA_VERSION, AmbariMetaInfo.SCHEMA_VERSION_2);
 
       ExecutionCommand execCmd = stage.getExecutionCommandWrapper(hostName,
         actionContext.getActionName()).getExecutionCommand();

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
index 8294895..b27698d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
@@ -70,7 +70,6 @@ import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.COMPONENT
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.CUSTOM_COMMAND;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.HOOKS_FOLDER;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.REPO_INFO;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SCHEMA_VERSION;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SCRIPT;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SCRIPT_TYPE;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SERVICE_PACKAGE_FOLDER;
@@ -268,7 +267,6 @@ public class AmbariCustomCommandExecutionHelper {
       execCmd.setHostLevelParams(hostLevelParams);
 
       Map<String, String> commandParams = new TreeMap<String, String>();
-      commandParams.put(SCHEMA_VERSION, serviceInfo.getSchemaVersion());
       if (additionalCommandParams != null) {
         for (String key : additionalCommandParams.keySet()) {
           commandParams.put(key, additionalCommandParams.get(key));
@@ -443,7 +441,6 @@ public class AmbariCustomCommandExecutionHelper {
     execCmd.setHostLevelParams(hostLevelParams);
 
     Map<String, String> commandParams = new TreeMap<String, String>();
-    commandParams.put(SCHEMA_VERSION, serviceInfo.getSchemaVersion());
 
     String commandTimeout = configs.getDefaultAgentTaskTimeout();
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index bdfd436..b2151f5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -72,7 +72,6 @@ import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.MYSQL_JDB
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.ORACLE_JDBC_URL;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.PACKAGE_LIST;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.REPO_INFO;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SCHEMA_VERSION;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SCRIPT;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SCRIPT_TYPE;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SERVICE_PACKAGE_FOLDER;
@@ -1281,7 +1280,6 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
     if (commandParams == null) { // if not defined
       commandParams = new TreeMap<String, String>();
     }
-    commandParams.put(SCHEMA_VERSION, serviceInfo.getSchemaVersion());
     String commandTimeout = configs.getDefaultAgentTaskTimeout();
     /*
      * This script is only used for

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-server/src/main/java/org/apache/ambari/server/state/CommandScriptDefinition.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/CommandScriptDefinition.java b/ambari-server/src/main/java/org/apache/ambari/server/state/CommandScriptDefinition.java
index 56e7438..e35619f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/CommandScriptDefinition.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/CommandScriptDefinition.java
@@ -58,9 +58,7 @@ public class CommandScriptDefinition {
   }
 
   public static enum Type {
-    PYTHON,
-
-    PUPPET // TODO: Not supported yet. Do we really need it?
+    PYTHON
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-server/src/test/python/stacks/1.3.2/configs/default.hbasedecom.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/configs/default.hbasedecom.json b/ambari-server/src/test/python/stacks/1.3.2/configs/default.hbasedecom.json
index 674b37a..ce92689 100644
--- a/ambari-server/src/test/python/stacks/1.3.2/configs/default.hbasedecom.json
+++ b/ambari-server/src/test/python/stacks/1.3.2/configs/default.hbasedecom.json
@@ -26,8 +26,7 @@
     "commandParams": {
         "command_timeout": "600", 
         "service_package_folder": "HDFS",
-        "script_type": "PYTHON", 
-        "schema_version": "2.0", 
+        "script_type": "PYTHON",
         "script": "scripts/datanode.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "true",

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-server/src/test/python/stacks/1.3.2/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/configs/default.json b/ambari-server/src/test/python/stacks/1.3.2/configs/default.json
index a5d38f6..8eabd30 100644
--- a/ambari-server/src/test/python/stacks/1.3.2/configs/default.json
+++ b/ambari-server/src/test/python/stacks/1.3.2/configs/default.json
@@ -26,8 +26,7 @@
     "commandParams": {
         "command_timeout": "600", 
         "service_package_folder": "HDFS",
-        "script_type": "PYTHON", 
-        "schema_version": "2.0", 
+        "script_type": "PYTHON",
         "script": "scripts/datanode.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-server/src/test/python/stacks/1.3.2/configs/default.non_gmetad_host.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/configs/default.non_gmetad_host.json b/ambari-server/src/test/python/stacks/1.3.2/configs/default.non_gmetad_host.json
index 43f7045..d9b928d 100644
--- a/ambari-server/src/test/python/stacks/1.3.2/configs/default.non_gmetad_host.json
+++ b/ambari-server/src/test/python/stacks/1.3.2/configs/default.non_gmetad_host.json
@@ -26,8 +26,7 @@
     "commandParams": {
         "command_timeout": "600", 
         "service_package_folder": "HDFS",
-        "script_type": "PYTHON", 
-        "schema_version": "2.0", 
+        "script_type": "PYTHON",
         "script": "scripts/datanode.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-server/src/test/python/stacks/1.3.2/configs/secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/configs/secured.json b/ambari-server/src/test/python/stacks/1.3.2/configs/secured.json
index c4a8498..fe3d6f6 100644
--- a/ambari-server/src/test/python/stacks/1.3.2/configs/secured.json
+++ b/ambari-server/src/test/python/stacks/1.3.2/configs/secured.json
@@ -26,8 +26,7 @@
     "commandParams": {
         "command_timeout": "600", 
         "service_package_folder": "HIVE",
-        "script_type": "PYTHON", 
-        "schema_version": "2.0", 
+        "script_type": "PYTHON",
         "script": "scripts/mysql_server.py",
         "excluded_hosts": "host1",
         "mark_draining_only" : "false",

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-server/src/test/python/stacks/1.3.2/configs/secured_no_jce_name.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/configs/secured_no_jce_name.json b/ambari-server/src/test/python/stacks/1.3.2/configs/secured_no_jce_name.json
index 04ca9ce..105d0b7 100644
--- a/ambari-server/src/test/python/stacks/1.3.2/configs/secured_no_jce_name.json
+++ b/ambari-server/src/test/python/stacks/1.3.2/configs/secured_no_jce_name.json
@@ -25,8 +25,7 @@
     "commandParams": {
         "command_timeout": "600", 
         "service_package_folder": "HIVE",
-        "script_type": "PYTHON", 
-        "schema_version": "2.0", 
+        "script_type": "PYTHON",
         "script": "scripts/mysql_server.py",
         "excluded_hosts": "host1",
         "mark_draining_only" : "false",

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-server/src/test/python/stacks/2.0.6/configs/default.hbasedecom.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default.hbasedecom.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default.hbasedecom.json
index c45918d..4817a58 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default.hbasedecom.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default.hbasedecom.json
@@ -22,8 +22,7 @@
     "commandParams": {
         "command_timeout": "300", 
         "service_package_folder": "OOZIE",
-        "script_type": "PYTHON", 
-        "schema_version": "2.0", 
+        "script_type": "PYTHON",
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1",
         "mark_draining_only": "true"

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
index efbafa5..3f5220e 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
@@ -22,8 +22,7 @@
     "commandParams": {
         "command_timeout": "300", 
         "service_package_folder": "OOZIE",
-        "script_type": "PYTHON", 
-        "schema_version": "2.0", 
+        "script_type": "PYTHON",
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false"

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-server/src/test/python/stacks/2.0.6/configs/default.non_gmetad_host.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default.non_gmetad_host.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default.non_gmetad_host.json
index 0a5ccae..b30714c 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default.non_gmetad_host.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default.non_gmetad_host.json
@@ -22,8 +22,7 @@
     "commandParams": {
         "command_timeout": "300", 
         "service_package_folder": "OOZIE",
-        "script_type": "PYTHON", 
-        "schema_version": "2.0", 
+        "script_type": "PYTHON",
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false"

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-server/src/test/python/stacks/2.0.6/configs/flume_target.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/flume_target.json b/ambari-server/src/test/python/stacks/2.0.6/configs/flume_target.json
index cb81436..931f202 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/flume_target.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/flume_target.json
@@ -22,8 +22,7 @@
     "commandParams": {
         "command_timeout": "300", 
         "service_package_folder": "OOZIE",
-        "script_type": "PYTHON", 
-        "schema_version": "2.0", 
+        "script_type": "PYTHON",
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
index f152ed7..0ea0db3 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
@@ -27,8 +27,7 @@
     "commandParams": {
         "service_package_folder": "HDP/2.0.6/services/YARN/package", 
         "script": "scripts/nodemanager.py", 
-        "hooks_folder": "HDP/2.0.6/hooks", 
-        "schema_version": "2.0", 
+        "hooks_folder": "HDP/2.0.6/hooks",
         "command_timeout": "600", 
         "script_type": "PYTHON"
     }, 

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json
index 6f60980..dce20fa 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json
@@ -27,8 +27,7 @@
     "commandParams": {
         "service_package_folder": "HDP/2.0.6/services/HDFS/package", 
         "script": "scripts/zkfc_slave.py", 
-        "hooks_folder": "HDP/2.0.6/hooks", 
-        "schema_version": "2.0", 
+        "hooks_folder": "HDP/2.0.6/hooks",
         "command_timeout": "600", 
         "script_type": "PYTHON"
     }, 

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json b/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
index b3e77fd..12de853 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
@@ -26,8 +26,7 @@
     "commandParams": {
         "command_timeout": "600", 
         "service_package_folder": "YARN",
-        "script_type": "PYTHON", 
-        "schema_version": "2.0", 
+        "script_type": "PYTHON",
         "script": "scripts/yarn_client.py",
         "excluded_hosts": "host1",
         "mark_draining_only" : "false"

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-server/src/test/python/stacks/2.0.6/configs/secured_no_jce_name.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/secured_no_jce_name.json b/ambari-server/src/test/python/stacks/2.0.6/configs/secured_no_jce_name.json
index f403cde..ecf5031 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/secured_no_jce_name.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/secured_no_jce_name.json
@@ -25,8 +25,7 @@
     "commandParams": {
         "command_timeout": "600", 
         "service_package_folder": "YARN",
-        "script_type": "PYTHON", 
-        "schema_version": "2.0", 
+        "script_type": "PYTHON",
         "script": "scripts/yarn_client.py",
         "excluded_hosts": "host1",
         "mark_draining_only" : "false"

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-server/src/test/python/stacks/2.1/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/default.json b/ambari-server/src/test/python/stacks/2.1/configs/default.json
index d80c28d..82e1013 100644
--- a/ambari-server/src/test/python/stacks/2.1/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.1/configs/default.json
@@ -22,8 +22,7 @@
     "commandParams": {
         "command_timeout": "300", 
         "service_package_folder": "OOZIE",
-        "script_type": "PYTHON", 
-        "schema_version": "2.0", 
+        "script_type": "PYTHON",
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2"
     },

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-server/src/test/python/stacks/2.1/configs/secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/secured.json b/ambari-server/src/test/python/stacks/2.1/configs/secured.json
index 68b04d0..774cd2f 100644
--- a/ambari-server/src/test/python/stacks/2.1/configs/secured.json
+++ b/ambari-server/src/test/python/stacks/2.1/configs/secured.json
@@ -27,8 +27,7 @@
     "commandParams": {
         "command_timeout": "600", 
         "service_package_folder": "YARN",
-        "script_type": "PYTHON", 
-        "schema_version": "2.0", 
+        "script_type": "PYTHON",
         "script": "scripts/yarn_client.py",
         "excluded_hosts": "host1"
     }, 

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/docs/src/site/apt/install-0.9.apt
----------------------------------------------------------------------
diff --git a/docs/src/site/apt/install-0.9.apt b/docs/src/site/apt/install-0.9.apt
index d11878e..25c14e0 100644
--- a/docs/src/site/apt/install-0.9.apt
+++ b/docs/src/site/apt/install-0.9.apt
@@ -62,7 +62,7 @@ Installing Ambari
   2) Install ambari-agent on each of the cluster nodes.
 
     * To install ambari-agent, point to the freely accessible HDP repo
-that contains the required dependencies (e.g., Puppet).  Then use yum to install ambari-agent RPM and all of its dependencies.
+that contains the required dependencies.  Then use yum to install ambari-agent RPM and all of its dependencies.
 
     On RHEL/CentOS 5:
 
@@ -90,7 +90,7 @@ $ sudo service iptables stop
 
   4) Install Ambari server on Ambari master:
 
-    * To install Ambari server, point to the freely accessible HDP repo that contains the required dependencies (e.g., Puppet, PHP, Ruby, etc).  Then use yum to install Ambari RPMs and all of the dependencies.
+    * To install Ambari server, point to the freely accessible HDP repo that contains the required dependencies (e.g., PHP, etc).  Then use yum to install Ambari RPMs and all of the dependencies.
 
     On RHEL/CentOS 5:
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/docs/src/site/apt/install.apt
----------------------------------------------------------------------
diff --git a/docs/src/site/apt/install.apt b/docs/src/site/apt/install.apt
index b48bb2b..e33c873 100644
--- a/docs/src/site/apt/install.apt
+++ b/docs/src/site/apt/install.apt
@@ -62,7 +62,7 @@ Installing Ambari
   2) Install ambari-agent on each of the cluster nodes.
 
     * To install ambari-agent, point to the freely accessible HDP repo
-that contains the required dependencies (e.g., Puppet).  Then use yum to install ambari-agent RPM and all of its dependencies.
+that contains the required dependencies.  Then use yum to install ambari-agent RPM and all of its dependencies.
 
     On RHEL/CentOS 5:
 
@@ -90,7 +90,7 @@ $ sudo service iptables stop
 
   4) Install Ambari server on Ambari master:
 
-    * To install Ambari server, point to the freely accessible HDP repo that contains the required dependencies (e.g., Puppet, PHP, Ruby, etc).  Then use yum to install Ambari RPMs and all of the dependencies.
+    * To install Ambari server, point to the freely accessible HDP repo that contains the required dependencies (e.g., PHP, etc).  Then use yum to install Ambari RPMs and all of the dependencies.
 
     On RHEL/CentOS 5:
 


[15/15] git commit: AMBARI-5747 Remove facter-1.6.10, Ruby, Puppet dependencies from pom.xml and src (dsen)

Posted by ds...@apache.org.
AMBARI-5747 Remove facter-1.6.10, Ruby, Puppet dependencies from pom.xml and src (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/3d1171b0
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/3d1171b0
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/3d1171b0

Branch: refs/heads/trunk
Commit: 3d1171b06ae77276ce1cb6e6553eb339840d799a
Parents: d56db54
Author: Dmitry Sen <ds...@hortonworks.com>
Authored: Wed May 14 17:00:48 2014 +0300
Committer: Dmitry Sen <ds...@hortonworks.com>
Committed: Wed May 14 17:00:50 2014 +0300

----------------------------------------------------------------------
 LICENSE.txt                                     |    7 -
 ambari-agent/conf/unix/ambari-agent.ini         |    6 -
 ambari-agent/pom.xml                            |   97 +-
 .../src/main/package/deb/control/postinst       |    3 -
 .../src/main/package/rpm/postinstall.sh         |    1 -
 .../src/main/puppet/manifestloader/site.pp      |   48 -
 .../configgenerator/manifests/configfile.pp     |   68 -
 .../modules/configgenerator/manifests/init.pp   |   23 -
 .../manifests/dashboard/service_check.pp        |   21 -
 .../modules/hdp-dashboard/manifests/init.pp     |   76 -
 .../modules/hdp-dashboard/manifests/params.pp   |   28 -
 .../templates/cluster_configuration.json.erb    |   97 --
 .../modules/hdp-flume/files/flumeSmoke.sh       |   23 -
 .../modules/hdp-flume/manifests/client.pp       |   25 -
 .../puppet/modules/hdp-flume/manifests/init.pp  |   27 -
 .../modules/hdp-flume/manifests/params.pp       |   24 -
 .../modules/hdp-flume/manifests/service.pp      |   28 -
 .../hdp-flume/templates/flume-env.sh.erb        |   24 -
 .../hdp-flume/templates/log4j.properties.erb    |   59 -
 .../modules/hdp-ganglia/files/checkGmetad.sh    |   37 -
 .../modules/hdp-ganglia/files/checkGmond.sh     |   62 -
 .../modules/hdp-ganglia/files/checkRrdcached.sh |   34 -
 .../modules/hdp-ganglia/files/gmetad.init       |   73 -
 .../modules/hdp-ganglia/files/gmetadLib.sh      |  204 ---
 .../puppet/modules/hdp-ganglia/files/gmond.init |   73 -
 .../modules/hdp-ganglia/files/gmondLib.sh       |  556 --------
 .../puppet/modules/hdp-ganglia/files/rrd.py     |  207 ---
 .../modules/hdp-ganglia/files/rrdcachedLib.sh   |   47 -
 .../modules/hdp-ganglia/files/setupGanglia.sh   |  141 --
 .../modules/hdp-ganglia/files/startGmetad.sh    |   64 -
 .../modules/hdp-ganglia/files/startGmond.sh     |   80 --
 .../modules/hdp-ganglia/files/startRrdcached.sh |   69 -
 .../modules/hdp-ganglia/files/stopGmetad.sh     |   43 -
 .../modules/hdp-ganglia/files/stopGmond.sh      |   55 -
 .../modules/hdp-ganglia/files/stopRrdcached.sh  |   41 -
 .../hdp-ganglia/files/teardownGanglia.sh        |   28 -
 .../modules/hdp-ganglia/manifests/config.pp     |   79 -
 .../manifests/config/generate_daemon.pp         |   43 -
 .../manifests/hdp-gmetad/service_check.pp       |   36 -
 .../manifests/hdp-gmond/service_check.pp        |   36 -
 .../modules/hdp-ganglia/manifests/init.pp       |   53 -
 .../modules/hdp-ganglia/manifests/monitor.pp    |  165 ---
 .../hdp-ganglia/manifests/monitor_and_server.pp |   79 -
 .../modules/hdp-ganglia/manifests/params.pp     |   35 -
 .../modules/hdp-ganglia/manifests/server.pp     |  259 ----
 .../templates/gangliaClusters.conf.erb          |   43 -
 .../hdp-ganglia/templates/gangliaEnv.sh.erb     |   24 -
 .../hdp-ganglia/templates/gangliaLib.sh.erb     |   62 -
 .../modules/hdp-hadoop/files/checkForFormat.sh  |   62 -
 .../modules/hdp-hadoop/files/checkWebUI.py      |   53 -
 .../hdp-hadoop/files/task-log4j.properties      |  132 --
 .../parser/functions/hdp_hadoop_get_mode.rb     |   65 -
 .../functions/hdp_hadoop_get_namenode_id.rb     |   47 -
 .../parser/functions/hdp_hadoop_get_owner.rb    |   51 -
 .../modules/hdp-hadoop/manifests/client.pp      |   56 -
 .../modules/hdp-hadoop/manifests/datanode.pp    |  100 --
 .../modules/hdp-hadoop/manifests/glusterfs.pp   |   36 -
 .../manifests/glusterfs_service_check.pp        |   26 -
 .../hdp-hadoop/manifests/hdfs/copyfromlocal.pp  |   84 --
 .../hdp-hadoop/manifests/hdfs/decommission.pp   |   49 -
 .../hdp-hadoop/manifests/hdfs/directory.pp      |  121 --
 .../manifests/hdfs/generate_exclude_file.pp     |   42 -
 .../hdp-hadoop/manifests/hdfs/service_check.pp  |  170 ---
 .../puppet/modules/hdp-hadoop/manifests/init.pp |  547 -------
 .../modules/hdp-hadoop/manifests/jobtracker.pp  |   96 --
 .../manifests/jobtracker/service_check.pp       |   29 -
 .../modules/hdp-hadoop/manifests/journalnode.pp |   60 -
 .../manifests/mapred/service_check.pp           |   75 -
 .../modules/hdp-hadoop/manifests/namenode.pp    |  285 ----
 .../hdp-hadoop/manifests/namenode/format.pp     |   61 -
 .../manifests/namenode/service_check.pp         |   28 -
 .../modules/hdp-hadoop/manifests/package.pp     |   44 -
 .../modules/hdp-hadoop/manifests/params.pp      |  222 ---
 .../modules/hdp-hadoop/manifests/service.pp     |  132 --
 .../manifests/slave/jobtracker-conn.pp          |   24 -
 .../hdp-hadoop/manifests/slave/master-conn.pp   |   27 -
 .../hdp-hadoop/manifests/slave/namenode-conn.pp |   27 -
 .../modules/hdp-hadoop/manifests/smoketest.pp   |   46 -
 .../modules/hdp-hadoop/manifests/snamenode.pp   |   98 --
 .../modules/hdp-hadoop/manifests/tasktracker.pp |   94 --
 .../puppet/modules/hdp-hadoop/manifests/zkfc.pp |   51 -
 .../templates/commons-logging.properties.erb    |   25 -
 .../hdp-hadoop/templates/exclude_hosts_list.erb |    3 -
 .../hdp-hadoop/templates/hadoop-env.sh.erb      |  122 --
 .../hadoop-metrics2.properties-GANGLIA.erb      |   45 -
 .../templates/hadoop-metrics2.properties.erb    |   45 -
 .../modules/hdp-hadoop/templates/hdfs.conf.erb  |   17 -
 .../hdp-hadoop/templates/health_check-v2.erb    |   91 --
 .../hdp-hadoop/templates/health_check.erb       |  118 --
 .../hdp-hadoop/templates/include_hosts_list.erb |    3 -
 .../hdp-hadoop/templates/log4j.properties.erb   |  227 ---
 .../modules/hdp-hadoop/templates/slaves.erb     |    3 -
 .../hdp-hadoop/templates/taskcontroller.cfg.erb |   20 -
 .../modules/hdp-hbase/files/hbaseSmoke.sh       |   26 -
 .../modules/hdp-hbase/files/hbaseSmokeVerify.sh |   32 -
 .../modules/hdp-hbase/manifests/client.pp       |   51 -
 .../hdp-hbase/manifests/hbase/service_check.pp  |  113 --
 .../puppet/modules/hdp-hbase/manifests/init.pp  |  155 --
 .../modules/hdp-hbase/manifests/master-conn.pp  |   24 -
 .../modules/hdp-hbase/manifests/master.pp       |   66 -
 .../modules/hdp-hbase/manifests/params.pp       |  110 --
 .../modules/hdp-hbase/manifests/regionserver.pp |   73 -
 .../modules/hdp-hbase/manifests/service.pp      |   82 --
 .../modules/hdp-hbase/manifests/zk-conn.pp      |   26 -
 ...hadoop-metrics.properties-GANGLIA-MASTER.erb |   50 -
 .../hadoop-metrics.properties-GANGLIA-RS.erb    |   50 -
 .../templates/hadoop-metrics.properties.erb     |   50 -
 ...metrics2-hbase.properties-GANGLIA-MASTER.erb |   62 -
 ...oop-metrics2-hbase.properties-GANGLIA-RS.erb |   62 -
 .../hdp-hbase/templates/hbase-env.sh.erb        |   83 --
 .../hdp-hbase/templates/hbase-smoke.sh.erb      |   26 -
 .../templates/hbase_client_jaas.conf.erb        |    5 -
 .../templates/hbase_grant_permissions.erb       |   21 -
 .../templates/hbase_master_jaas.conf.erb        |    8 -
 .../templates/hbase_regionserver_jaas.conf.erb  |    8 -
 .../hdp-hbase/templates/regionservers.erb       |    3 -
 .../puppet/modules/hdp-hcat/files/hcatSmoke.sh  |   35 -
 .../puppet/modules/hdp-hcat/files/pigSmoke.sh   |   18 -
 .../hdp-hcat/manifests/hcat/service_check.pp    |   73 -
 .../puppet/modules/hdp-hcat/manifests/init.pp   |   88 --
 .../puppet/modules/hdp-hcat/manifests/params.pp |   36 -
 .../modules/hdp-hcat/templates/hcat-env.sh.erb  |   25 -
 .../puppet/modules/hdp-hive/files/hiveSmoke.sh  |   23 -
 .../modules/hdp-hive/files/hiveserver2.sql      |   23 -
 .../modules/hdp-hive/files/hiveserver2Smoke.sh  |   31 -
 .../modules/hdp-hive/files/startHiveserver2.sh  |   22 -
 .../modules/hdp-hive/files/startMetastore.sh    |   22 -
 .../puppet/modules/hdp-hive/manifests/client.pp |   40 -
 .../hdp-hive/manifests/hive/service_check.pp    |   88 --
 .../puppet/modules/hdp-hive/manifests/init.pp   |  145 --
 .../hdp-hive/manifests/jdbc-connector.pp        |   63 -
 .../modules/hdp-hive/manifests/metastore.pp     |   61 -
 .../puppet/modules/hdp-hive/manifests/params.pp |   86 --
 .../puppet/modules/hdp-hive/manifests/server.pp |   61 -
 .../modules/hdp-hive/manifests/service.pp       |  129 --
 .../modules/hdp-hive/templates/hive-env.sh.erb  |   55 -
 .../hdp-hue/manifests/hue/service_check.pp      |   47 -
 .../puppet/modules/hdp-hue/manifests/init.pp    |   83 --
 .../puppet/modules/hdp-hue/manifests/params.pp  |  108 --
 .../puppet/modules/hdp-hue/manifests/server.pp  |   46 -
 .../puppet/modules/hdp-hue/manifests/service.pp |   76 -
 .../modules/hdp-hue/templates/hue-ini.cfg.erb   |  496 -------
 .../hdp-kerberos/lib/facter/kadm_keytab.rb      |   21 -
 .../parser/functions/kerberos_keytabs_input.rb  |   34 -
 .../hdp-kerberos/manifests/adminclient.pp       |  140 --
 .../hdp-kerberos/manifests/bigtop/init.pp       |  217 ---
 .../modules/hdp-kerberos/manifests/client.pp    |   50 -
 .../modules/hdp-kerberos/manifests/init.pp      |   25 -
 .../modules/hdp-kerberos/manifests/params.pp    |   70 -
 .../modules/hdp-kerberos/manifests/server.pp    |  116 --
 .../modules/hdp-kerberos/templates/kadm5.acl    |   21 -
 .../modules/hdp-kerberos/templates/kdc.conf     |   36 -
 .../modules/hdp-kerberos/templates/krb5.conf    |   47 -
 .../puppet/modules/hdp-kerberos/tests/init.pp   |   31 -
 .../hdp-monitor-webserver/manifests/init.pp     |  123 --
 .../modules/hdp-mysql/files/addMysqlUser.sh     |   41 -
 .../puppet/modules/hdp-mysql/manifests/init.pp  |   22 -
 .../modules/hdp-mysql/manifests/params.pp       |   28 -
 .../modules/hdp-mysql/manifests/server.pp       |  141 --
 .../hdp-nagios/files/check_aggregate.php        |  243 ----
 .../modules/hdp-nagios/files/check_cpu.pl       |  114 --
 .../hdp-nagios/files/check_datanode_storage.php |  100 --
 .../hdp-nagios/files/check_hdfs_blocks.php      |  115 --
 .../hdp-nagios/files/check_hdfs_capacity.php    |  109 --
 .../files/check_hive_metastore_status.sh        |   45 -
 .../hdp-nagios/files/check_hue_status.sh        |   31 -
 .../files/check_mapred_local_dir_used.sh        |   34 -
 .../hdp-nagios/files/check_name_dir_status.php  |   93 --
 .../hdp-nagios/files/check_namenodes_ha.sh      |   82 --
 .../files/check_nodemanager_health.sh           |   44 -
 .../hdp-nagios/files/check_oozie_status.sh      |   45 -
 .../hdp-nagios/files/check_rpcq_latency.php     |  104 --
 .../hdp-nagios/files/check_templeton_status.sh  |   45 -
 .../modules/hdp-nagios/files/check_webui.sh     |   87 --
 .../hdp-nagios/files/hdp_nagios_init.php        |   81 --
 .../parser/functions/hdp_nagios_all_hosts.rb    |   35 -
 .../hdp_nagios_compute_target_hosts.rb          |   42 -
 .../functions/hdp_nagios_members_exist.rb       |   34 -
 .../parser/functions/hdp_nagios_target_hosts.rb |   28 -
 .../puppet/modules/hdp-nagios/manifests/init.pp |   21 -
 .../manifests/nagios/service_check.pp           |   36 -
 .../modules/hdp-nagios/manifests/params.pp      |   98 --
 .../modules/hdp-nagios/manifests/server.pp      |  298 ----
 .../hdp-nagios/manifests/server/config.pp       |   86 --
 .../hdp-nagios/manifests/server/packages.pp     |  110 --
 .../modules/hdp-nagios/manifests/target.pp      |   21 -
 .../hdp-nagios/templates/contacts.cfg.erb       |   91 --
 .../templates/hadoop-commands.cfg.erb           |  114 --
 .../templates/hadoop-hostgroups.cfg.erb         |   20 -
 .../hdp-nagios/templates/hadoop-hosts.cfg.erb   |   16 -
 .../templates/hadoop-servicegroups.cfg.erb      |   80 --
 .../templates/hadoop-services.cfg.erb           |  753 ----------
 .../modules/hdp-nagios/templates/nagios.cfg.erb | 1349 ------------------
 .../hdp-nagios/templates/nagios.conf.erb        |   62 -
 .../modules/hdp-nagios/templates/nagios.erb     |  146 --
 .../hdp-nagios/templates/resource.cfg.erb       |   33 -
 .../modules/hdp-oozie/files/oozieSmoke.sh       |   93 --
 .../modules/hdp-oozie/files/oozieSmoke2.sh      |   95 --
 .../modules/hdp-oozie/files/wrap_ooziedb.sh     |   31 -
 .../modules/hdp-oozie/manifests/client.pp       |   40 -
 .../hdp-oozie/manifests/download-ext-zip.pp     |   31 -
 .../puppet/modules/hdp-oozie/manifests/init.pp  |  150 --
 .../hdp-oozie/manifests/oozie/service_check.pp  |   73 -
 .../modules/hdp-oozie/manifests/params.pp       |   65 -
 .../modules/hdp-oozie/manifests/server.pp       |   72 -
 .../modules/hdp-oozie/manifests/service.pp      |  223 ---
 .../hdp-oozie/templates/oozie-env.sh.erb        |   64 -
 .../templates/oozie-log4j.properties.erb        |   74 -
 .../puppet/modules/hdp-pig/files/pigSmoke.sh    |   18 -
 .../puppet/modules/hdp-pig/manifests/init.pp    |   76 -
 .../puppet/modules/hdp-pig/manifests/params.pp  |   24 -
 .../hdp-pig/manifests/pig/service_check.pp      |   70 -
 .../hdp-pig/templates/log4j.properties.erb      |   30 -
 .../modules/hdp-pig/templates/pig-env.sh.erb    |   17 -
 .../hdp-pig/templates/pig.properties.erb        |   55 -
 .../puppet/modules/hdp-repos/manifests/init.pp  |   21 -
 .../modules/hdp-repos/manifests/process_repo.pp |   42 -
 .../puppet/modules/hdp-repos/templates/repo.erb |   27 -
 .../puppet/modules/hdp-sqoop/manifests/init.pp  |  104 --
 .../hdp-sqoop/manifests/mysql-connector.pp      |   44 -
 .../modules/hdp-sqoop/manifests/params.pp       |   30 -
 .../hdp-sqoop/manifests/sqoop/service_check.pp  |   50 -
 .../hdp-sqoop/templates/sqoop-env.sh.erb        |   36 -
 .../hdp-templeton/files/templetonSmoke.sh       |   95 --
 .../modules/hdp-templeton/manifests/client.pp   |   40 -
 .../manifests/download-hive-tar.pp              |   46 -
 .../hdp-templeton/manifests/download-pig-tar.pp |   46 -
 .../modules/hdp-templeton/manifests/init.pp     |  103 --
 .../modules/hdp-templeton/manifests/params.pp   |   60 -
 .../modules/hdp-templeton/manifests/server.pp   |  134 --
 .../modules/hdp-templeton/manifests/service.pp  |   67 -
 .../manifests/templeton/service_check.pp        |   70 -
 .../hdp-templeton/templates/webhcat-env.sh.erb  |   44 -
 .../puppet/modules/hdp-tez/manifests/init.pp    |   24 -
 .../modules/hdp-tez/manifests/tez_client.pp     |   40 -
 .../files/validateYarnComponentStatus.py        |  165 ---
 .../modules/hdp-yarn/manifests/historyserver.pp |   48 -
 .../manifests/historyserver/service_check.pp    |   24 -
 .../puppet/modules/hdp-yarn/manifests/init.pp   |  166 ---
 .../hdp-yarn/manifests/mapred2/service_check.pp |   69 -
 .../hdp-yarn/manifests/mapreducev2_client.pp    |   43 -
 .../modules/hdp-yarn/manifests/nodemanager.pp   |   82 --
 .../modules/hdp-yarn/manifests/package.pp       |   28 -
 .../puppet/modules/hdp-yarn/manifests/params.pp |   59 -
 .../hdp-yarn/manifests/resourcemanager.pp       |   56 -
 .../manifests/resourcemanager/service_check.pp  |   24 -
 .../modules/hdp-yarn/manifests/service.pp       |  116 --
 .../modules/hdp-yarn/manifests/smoketest.pp     |   82 --
 .../hdp-yarn/manifests/yarn/service_check.pp    |   38 -
 .../modules/hdp-yarn/manifests/yarn_client.pp   |   34 -
 .../templates/container-executor.cfg.erb        |   22 -
 .../hdp-yarn/templates/mapreduce.conf.erb       |   17 -
 .../modules/hdp-yarn/templates/yarn-env.sh.erb  |  119 --
 .../modules/hdp-yarn/templates/yarn.conf.erb    |   17 -
 .../puppet/modules/hdp-zookeeper/files/zkEnv.sh |   96 --
 .../modules/hdp-zookeeper/files/zkServer.sh     |  120 --
 .../modules/hdp-zookeeper/files/zkService.sh    |   26 -
 .../modules/hdp-zookeeper/files/zkSmoke.sh      |   78 -
 .../modules/hdp-zookeeper/manifests/client.pp   |   47 -
 .../modules/hdp-zookeeper/manifests/init.pp     |  142 --
 .../modules/hdp-zookeeper/manifests/params.pp   |   45 -
 .../manifests/quorum/service_check.pp           |   61 -
 .../modules/hdp-zookeeper/manifests/service.pp  |   96 --
 .../manifests/zookeeper/service_check.pp        |   54 -
 .../templates/configuration.xsl.erb             |   24 -
 .../templates/log4j.properties.erb              |   71 -
 .../modules/hdp-zookeeper/templates/zoo.cfg.erb |   44 -
 .../templates/zookeeper-env.sh.erb              |   25 -
 .../templates/zookeeper_client_jaas.conf.erb    |    5 -
 .../templates/zookeeper_jaas.conf.erb           |    8 -
 .../modules/hdp/files/changeToSecureUid.sh      |   50 -
 .../parser/functions/hdp_args_as_array.rb       |   26 -
 .../functions/hdp_array_from_comma_list.rb      |   26 -
 .../parser/functions/hdp_calc_xmn_from_xms.rb   |   37 -
 .../functions/hdp_comma_list_from_array.rb      |   25 -
 .../lib/puppet/parser/functions/hdp_default.rb  |   50 -
 .../functions/hdp_escape_spec_characters.rb     |   28 -
 .../hdp/lib/puppet/parser/functions/hdp_fail.rb |   27 -
 .../functions/hdp_first_value_from_list.rb      |   25 -
 .../parser/functions/hdp_get_dir_from_url.rb    |   57 -
 .../hdp_get_directory_from_filepath.rb          |   31 -
 .../parser/functions/hdp_get_kinit_path.rb      |   36 -
 .../functions/hdp_get_major_stack_version.rb    |   28 -
 .../parser/functions/hdp_get_port_from_url.rb   |   44 -
 .../parser/functions/hdp_get_value_from_map.rb  |   37 -
 .../hdp/lib/puppet/parser/functions/hdp_host.rb |   28 -
 .../parser/functions/hdp_host_attribute.rb      |   49 -
 .../lib/puppet/parser/functions/hdp_is_empty.rb |   27 -
 .../lib/puppet/parser/functions/hdp_no_hosts.rb |   27 -
 .../puppet/parser/functions/hdp_option_value.rb |   32 -
 .../parser/functions/hdp_set_from_comma_list.rb |   34 -
 .../parser/functions/hdp_str_ends_with.rb       |   32 -
 .../puppet/parser/functions/hdp_template_var.rb |   38 -
 .../puppet/parser/functions/hdp_to_lowercase.rb |   48 -
 .../parser/functions/hdp_unique_id_and_date.rb  |   27 -
 .../hdp/lib/puppet/parser/functions/hdp_user.rb |   28 -
 .../puppet/modules/hdp/manifests/.directory     |   18 -
 .../puppet/modules/hdp/manifests/configfile.pp  |   98 --
 .../modules/hdp/manifests/download_keytabs.pp   |   43 -
 .../main/puppet/modules/hdp/manifests/init.pp   |  528 -------
 .../modules/hdp/manifests/java/jce/package.pp   |   62 -
 .../modules/hdp/manifests/java/package.pp       |   77 -
 .../puppet/modules/hdp/manifests/lzo/package.pp |   36 -
 .../modules/hdp/manifests/namenode-conn.pp      |   24 -
 .../puppet/modules/hdp/manifests/package.pp     |  156 --
 .../main/puppet/modules/hdp/manifests/params.pp |  794 -----------
 .../modules/hdp/manifests/snappy/package.pp     |   52 -
 .../main/puppet/modules/hdp/manifests/snmp.pp   |   53 -
 .../modules/hdp/manifests/testing_env_patch.pp  |   49 -
 .../puppet/modules/hdp/templates/snmpd.conf.erb |   48 -
 .../src/main/puppet/modules/puppetApply.sh      |   29 -
 .../src/main/puppet/modules/stdlib/CHANGELOG    |   20 -
 .../src/main/puppet/modules/stdlib/LICENSE      |  202 ---
 .../src/main/puppet/modules/stdlib/Modulefile   |   11 -
 .../main/puppet/modules/stdlib/README.markdown  |   45 -
 .../modules/stdlib/RELEASE_PROCESS.markdown     |   12 -
 .../lib/puppet/parser/functions/getvar.rb       |   23 -
 .../lib/puppet/parser/functions/has_key.rb      |   27 -
 .../lib/puppet/parser/functions/loadyaml.rb     |   20 -
 .../stdlib/lib/puppet/parser/functions/merge.rb |   28 -
 .../puppet/parser/functions/validate_bool.rb    |   39 -
 .../puppet/parser/functions/validate_hash.rb    |   37 -
 .../lib/puppet/parser/functions/validate_re.rb  |   35 -
 .../lib/puppet/provider/append_line/ruby.rb     |   15 -
 .../modules/stdlib/lib/puppet/type/anchor.rb    |   32 -
 .../stdlib/lib/puppet/type/append_line.rb       |   44 -
 .../puppet/modules/stdlib/manifests/init.pp     |   19 -
 .../puppet/modules/stdlib/manifests/stages.pp   |   43 -
 .../main/puppet/modules/stdlib/spec/spec.opts   |    6 -
 .../puppet/modules/stdlib/spec/spec_helper.rb   |   18 -
 .../unit/puppet/parser/functions/getvar_spec.rb |   53 -
 .../puppet/parser/functions/has_key_spec.rb     |   46 -
 .../unit/puppet/parser/functions/merge_spec.rb  |   54 -
 .../parser/functions/validate_bool_spec.rb      |   76 -
 .../parser/functions/validate_hash_spec.rb      |   63 -
 .../puppet/provider/append_line/ruby_spec.rb    |   30 -
 .../stdlib/spec/unit/puppet/type/anchor_spec.rb |   11 -
 .../spec/unit/puppet/type/append_line_spec.rb   |   24 -
 .../puppet/modules/stdlib/tests/append_line.pp  |    7 -
 .../main/puppet/modules/stdlib/tests/init.pp    |    1 -
 .../src/main/python/ambari_agent/ActionQueue.py |   66 +-
 .../main/python/ambari_agent/AmbariConfig.py    |    8 +-
 .../python/ambari_agent/CommandStatusDict.py    |    2 +-
 .../ambari_agent/CustomServiceOrchestrator.py   |  103 +-
 .../src/main/python/ambari_agent/Grep.py        |   13 -
 .../main/python/ambari_agent/PuppetExecutor.py  |  282 ----
 .../main/python/ambari_agent/RepoInstaller.py   |  109 --
 .../python/ambari_agent/manifestGenerator.py    |  396 -----
 .../src/main/python/ambari_agent/site.pp        |   92 --
 .../test/python/ambari_agent/TestActionQueue.py |  100 +-
 .../TestCustomServiceOrchestrator.py            |    5 +-
 .../src/test/python/ambari_agent/TestGrep.py    |    7 -
 .../ambari_agent/TestManifestGenerator.py       |  230 ---
 .../python/ambari_agent/TestPuppetExecutor.py   |  275 ----
 .../ambari_agent/TestPuppetExecutorManually.py  |   57 -
 .../python/ambari_agent/TestRepoInstaller.py    |   77 -
 .../ambari_agent/examples/ControllerTester.py   |   32 +-
 ambari-client/pom.xml                           |    1 -
 .../ambari/server/agent/ExecutionCommand.java   |    1 -
 .../ambari/server/agent/HeartbeatMonitor.java   |    1 -
 .../controller/AmbariActionExecutionHelper.java |    2 -
 .../AmbariCustomCommandExecutionHelper.java     |    3 -
 .../AmbariManagementControllerImpl.java         |    2 -
 .../server/state/CommandScriptDefinition.java   |    4 +-
 .../1.3.2/configs/default.hbasedecom.json       |    3 +-
 .../python/stacks/1.3.2/configs/default.json    |    3 +-
 .../1.3.2/configs/default.non_gmetad_host.json  |    3 +-
 .../python/stacks/1.3.2/configs/secured.json    |    3 +-
 .../1.3.2/configs/secured_no_jce_name.json      |    3 +-
 .../2.0.6/configs/default.hbasedecom.json       |    3 +-
 .../python/stacks/2.0.6/configs/default.json    |    3 +-
 .../2.0.6/configs/default.non_gmetad_host.json  |    3 +-
 .../stacks/2.0.6/configs/flume_target.json      |    3 +-
 .../python/stacks/2.0.6/configs/ha_default.json |    3 +-
 .../python/stacks/2.0.6/configs/ha_secured.json |    3 +-
 .../python/stacks/2.0.6/configs/secured.json    |    3 +-
 .../2.0.6/configs/secured_no_jce_name.json      |    3 +-
 .../test/python/stacks/2.1/configs/default.json |    3 +-
 .../test/python/stacks/2.1/configs/secured.json |    3 +-
 docs/src/site/apt/install-0.9.apt               |    4 +-
 docs/src/site/apt/install.apt                   |    4 +-
 381 files changed, 161 insertions(+), 26686 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/LICENSE.txt
----------------------------------------------------------------------
diff --git a/LICENSE.txt b/LICENSE.txt
index 795eb37..2b6abfc 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -1,4 +1,3 @@
-
                                  Apache License
                            Version 2.0, January 2004
                         http://www.apache.org/licenses/
@@ -209,16 +208,10 @@ notices and license terms. Your use of the source code for the these
 subcomponents is subject to the terms and conditions of the following
 licenses. 
 
-For the stdlib in puppet modules 
-
-Copyright (C) 2011 Puppet Labs Inc
-
 and some parts:
 
 Copyright (C) 2011 Krzysztof Wilczynski
 
-Puppet Labs can be contacted at: info@puppetlabs.com
-
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
 You may obtain a copy of the License at

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/conf/unix/ambari-agent.ini
----------------------------------------------------------------------
diff --git a/ambari-agent/conf/unix/ambari-agent.ini b/ambari-agent/conf/unix/ambari-agent.ini
index daf15c0..ac37f16 100644
--- a/ambari-agent/conf/unix/ambari-agent.ini
+++ b/ambari-agent/conf/unix/ambari-agent.ini
@@ -27,12 +27,6 @@ ping_port=8670
 cache_dir=/var/lib/ambari-agent/cache
 tolerate_download_failures=true
 
-[puppet]
-puppetmodules=/var/lib/ambari-agent/puppet
-ruby_home=/usr/lib/ambari-agent/lib/ruby-1.8.7-p370
-puppet_home=/usr/lib/ambari-agent/lib/puppet-2.7.9
-facter_home=/usr/lib/ambari-agent/lib/facter-1.6.10
-
 [command]
 maxretries=2
 sleepBetweenRetries=1

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-agent/pom.xml b/ambari-agent/pom.xml
index 87d3427..da88261 100644
--- a/ambari-agent/pom.xml
+++ b/ambari-agent/pom.xml
@@ -37,13 +37,10 @@
     <package.log.dir>/var/log/ambari-agent</package.log.dir>
     <package.pid.dir>/var/run/ambari-agent</package.pid.dir>
     <skipTests>false</skipTests>
-    <facter.tar>http://downloads.puppetlabs.com/facter/facter-1.6.10.tar.gz</facter.tar>
-    <puppet.tar>http://downloads.puppetlabs.com/puppet/puppet-2.7.9.tar.gz</puppet.tar>
     <agent.install.dir>/usr/lib/python2.6/site-packages/ambari_agent</agent.install.dir>
     <resmgmt.install.dir>/usr/lib/python2.6/site-packages/resource_management</resmgmt.install.dir>
     <common_functions.install.dir>/usr/lib/ambari-agent/lib/common_functions</common_functions.install.dir>
     <jinja.install.dir>/usr/lib/python2.6/site-packages/jinja2</jinja.install.dir>
-    <ruby.tar>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos6/ruby-1.8.7-p370.tar.gz</ruby.tar>
     <lib.dir>/usr/lib/ambari-agent/lib</lib.dir>
     <python.ver>python &gt;= 2.6</python.ver>
     <deb.python.ver>python (&gt;= 2.6)</deb.python.ver>
@@ -53,20 +50,6 @@
     <target.cache.dir>${project.build.directory}/cache/</target.cache.dir>
     <resource.keeper.script>${ambari.server.module}/src/main/python/ambari_server/resourceFilesKeeper.py</resource.keeper.script>
   </properties>
-  <profiles>
-    <profile>
-      <id>suse11</id>
-      <properties>
-        <ruby.tar>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/suse11/ruby-1.8.7-p370.tar.gz</ruby.tar>
-      </properties>
-    </profile>
-    <profile>
-      <id>centos5</id>
-      <properties>
-        <ruby.tar>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos5/ruby-1.8.7-p370.tar.gz</ruby.tar>
-      </properties>
-    </profile>
-  </profiles>
   <build>
     <plugins>
       <plugin>
@@ -131,7 +114,7 @@
                 <argument>unitTests.py</argument>
               </arguments>
               <environmentVariables>
-                <PYTHONPATH>${project.basedir}/../ambari-common/src/main/python/jinja2:${project.basedir}/../ambari-common/src/main/python/common_functions:${project.basedir}/../ambari-common/src/test/python:${project.basedir}/../ambari-common/src/main/python:${project.basedir}/src/main/python/ambari_agent:${project.basedir}/src/main/python/resource_management:${project.basedir}/src/test/python/ambari_agent:${project.basedir}/src/test/python/resource_management:${project.basedir}/src/main/python:${project.basedir}/../ambari-agent/src/main/puppet/modules/hdp-hadoop/files:$PYTHONPATH</PYTHONPATH>
+                <PYTHONPATH>${project.basedir}/../ambari-common/src/main/python/jinja2:${project.basedir}/../ambari-common/src/main/python/common_functions:${project.basedir}/../ambari-common/src/test/python:${project.basedir}/../ambari-common/src/main/python:${project.basedir}/src/main/python/ambari_agent:${project.basedir}/src/main/python/resource_management:${project.basedir}/src/test/python/ambari_agent:${project.basedir}/src/test/python/resource_management:${project.basedir}/src/main/python:${project.basedir}/../ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files:${project.basedir}/../ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/files:$PYTHONPATH</PYTHONPATH>
               </environmentVariables>
               <skip>${skipTests}</skip>
             </configuration>
@@ -256,17 +239,6 @@
               </sources>
             </mapping>
             <mapping>
-              <directory>${lib.dir}</directory>
-              <filemode>755</filemode>
-              <username>root</username>
-              <groupname>root</groupname>
-              <sources>
-                <source>
-                  <location>${project.build.directory}/lib</location>
-                </source>
-              </sources>
-            </mapping>
-            <mapping>
               <directory>${jinja.install.dir}</directory>
               <sources>
                 <source>
@@ -289,17 +261,6 @@
               </sources>
             </mapping>
             <mapping>
-              <directory>/var/lib/${project.artifactId}/puppet</directory>
-              <filemode>755</filemode>
-              <username>root</username>
-              <groupname>root</groupname>
-              <sources>
-                <source>
-                  <location>src/main/puppet</location>
-                </source>
-              </sources>
-            </mapping>
-            <mapping>
               <directory>/etc/ambari-agent/conf</directory>
               <filemode>755</filemode>
               <username>root</username>
@@ -439,17 +400,6 @@
               </mapper>
             </data>
             <data>
-              <src>${project.build.directory}/lib</src>
-              <type>directory</type>
-              <mapper>
-                <type>perm</type>
-                <prefix>${lib.dir}</prefix>
-                <user>root</user>
-                <group>root</group>
-                <filemode>755</filemode>
-              </mapper>
-            </data>
-            <data>
               <src>${project.basedir}/../ambari-common/src/main/python/jinja2/jinja2</src>
               <excludes>${project.basedir}/../ambari-common/src/main/python/jinja2/jinja2/testsuite</excludes>
               <type>directory</type>
@@ -470,17 +420,6 @@
               </mapper>
             </data>
             <data>
-              <src>src/main/puppet</src>
-              <type>directory</type>
-              <mapper>
-                <type>perm</type>
-                <prefix>/var/lib/${project.artifactId}/puppet</prefix>
-                <user>root</user>
-                <group>root</group>
-                <filemode>755</filemode>
-              </mapper>
-            </data>
-            <data>
               <src>conf/unix/ambari-agent.ini</src>
               <type>file</type>
               <mapper>
@@ -600,39 +539,6 @@
         </configuration>
       </plugin>
       <plugin>
-        <groupId>com.github.goldin</groupId>
-        <artifactId>copy-maven-plugin</artifactId>
-        <version>0.2.5</version>
-        <executions>
-          <execution>
-            <id>create-archive</id>
-            <phase>package</phase>
-            <goals>
-              <goal>copy</goal>
-            </goals>
-            <configuration>
-              <resources>
-                <resource>
-                  <targetPath>${project.build.directory}/lib</targetPath>
-                  <file>${ruby.tar}</file>
-                  <unpack>true</unpack>
-                </resource>
-                <resource>
-                  <targetPath>${project.build.directory}/lib</targetPath>
-                  <file>${facter.tar}</file>
-                  <unpack>true</unpack>
-                </resource>
-                <resource>
-                  <targetPath>${project.build.directory}/lib</targetPath>
-                  <file>${puppet.tar}</file>
-                  <unpack>true</unpack>
-                </resource>
-              </resources>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
         <artifactId>maven-resources-plugin</artifactId>
         <version>2.6</version>
         <executions>
@@ -694,7 +600,6 @@
             <exclude>src/test/python/ambari_agent/dummy_files/*</exclude>
             <exclude>src/test/python/ambari_agent/dummy*.txt</exclude>
             <exclude>src/main/python/ambari_agent/imports.txt</exclude>
-            <exclude>src/main/puppet/modules/stdlib/**</exclude>
             <exclude>**/*.erb</exclude>
             <exclude>**/*.json</exclude>
             <exclude>**/*.pydevproject</exclude>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/package/deb/control/postinst
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/package/deb/control/postinst b/ambari-agent/src/main/package/deb/control/postinst
index 0ddd842..69a09e8 100644
--- a/ambari-agent/src/main/package/deb/control/postinst
+++ b/ambari-agent/src/main/package/deb/control/postinst
@@ -21,9 +21,6 @@ if [ "$1" == "configure" ]; then  # Action is install
   update-rc.d ambari-agent defaults
 fi
 
-
-chmod 755 /usr/lib/ambari-agent/lib/facter-1.6.10/bin/facter /usr/lib/ambari-agent/lib/puppet-2.7.9/bin/filebucket /usr/lib/ambari-agent/lib/puppet-2.7.9/bin/pi /usr/lib/ambari-agent/lib/puppet-2.7.9/bin/puppet /usr/lib/ambari-agent/lib/puppet-2.7.9/bin/puppetdoc /usr/lib/ambari-agent/lib/puppet-2.7.9/bin/ralsh /usr/lib/ambari-agent/lib/ruby-1.8.7-p370/bin/*
-
 BAK=/etc/ambari-agent/conf/ambari-agent.ini.old
 ORIG=/etc/ambari-agent/conf/ambari-agent.ini
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/package/rpm/postinstall.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/package/rpm/postinstall.sh b/ambari-agent/src/main/package/rpm/postinstall.sh
index da3ba0d..536375e 100644
--- a/ambari-agent/src/main/package/rpm/postinstall.sh
+++ b/ambari-agent/src/main/package/rpm/postinstall.sh
@@ -28,7 +28,6 @@ if [ "$1" -eq 2 ]; then # Action is upgrade
   fi
 fi
 
-chmod 755 /usr/lib/ambari-agent/lib/facter-1.6.10/bin/facter /usr/lib/ambari-agent/lib/puppet-2.7.9/bin/filebucket /usr/lib/ambari-agent/lib/puppet-2.7.9/bin/pi /usr/lib/ambari-agent/lib/puppet-2.7.9/bin/puppet /usr/lib/ambari-agent/lib/puppet-2.7.9/bin/puppetdoc /usr/lib/ambari-agent/lib/puppet-2.7.9/bin/ralsh /usr/lib/ambari-agent/lib/ruby-1.8.7-p370/bin/*
 
 BAK=/etc/ambari-agent/conf/ambari-agent.ini.old
 ORIG=/etc/ambari-agent/conf/ambari-agent.ini

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/manifestloader/site.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/manifestloader/site.pp b/ambari-agent/src/main/puppet/manifestloader/site.pp
deleted file mode 100644
index 7a208a6..0000000
--- a/ambari-agent/src/main/puppet/manifestloader/site.pp
+++ /dev/null
@@ -1,48 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-class manifestloader () {
-    file { '/etc/puppet/agent/modules.tgz':
-      ensure => present,
-      source => "puppet:///modules/catalog/modules.tgz",  
-      mode => '0755',
-    }
-
-    exec { 'untar_modules':
-      command => "rm -rf /etc/puppet/agent/modules ; tar zxf /etc/puppet/agent/modules.tgz -C /etc/puppet/agent/ --strip-components 3",
-      path    => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
-    } 
-
-    exec { 'puppet_apply':
-      command   => "sh /etc/puppet/agent/modules/puppetApply.sh",
-      timeout   => 1800,
-      path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-      logoutput => "true"
-    }
-
-    File['/etc/puppet/agent/modules.tgz'] -> Exec['untar_modules'] -> Exec['puppet_apply']
-}
-
-node default {
- stage{1 :}
- class {'manifestloader': stage => 1}
-}
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/configgenerator/manifests/configfile.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/configgenerator/manifests/configfile.pp b/ambari-agent/src/main/puppet/modules/configgenerator/manifests/configfile.pp
deleted file mode 100644
index 820126d..0000000
--- a/ambari-agent/src/main/puppet/modules/configgenerator/manifests/configfile.pp
+++ /dev/null
@@ -1,68 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-#
-# Generates xml configs from the given key-value hash maps
-#
-# Config file format:
-#
-# <configuration>
-#   <property>
-#     <name>name1</name><value>value1</value>
-#   </property>
-#     ..
-#   <property>
-#     <name>nameN</name><value>valueN</value>
-#   </property>
-# </configuration>
-#
-# Params:
-# - configname - name of the config file (class title by default)
-# - modulespath - modules path ('/etc/puppet/modules' by default)
-# - module - module name
-# - properties - set of the key-value pairs (puppet hash) which corresponds to property name - property value pairs of config file
-#
-# Note: Set correct $modulespath in the configgenerator (or pass it as parameter)
-#
-
-define configgenerator::configfile ($modulespath='/etc/puppet/modules', $filename, $module, $configuration, $owner = "root", $group = "root", $mode = undef) {
-  $configcontent = inline_template('<!--<%=Time.now.asctime %>-->
-  <configuration>
-  <% configuration.each do |key,value| -%>
-  <property>
-    <name><%=key %></name>
-    <value><%=value %></value>
-  </property>
-  <% end -%>
-</configuration>')
- 
-
-debug("Generating config: ${modulespath}/${filename}")
-
-file {"${modulespath}/${filename}":
-  ensure  => present,
-  content => $configcontent,
-  path => "${modulespath}/${filename}",
-  owner => $owner,
-  group => $group,
-  mode => $mode
-}
-} 

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/configgenerator/manifests/init.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/configgenerator/manifests/init.pp b/ambari-agent/src/main/puppet/modules/configgenerator/manifests/init.pp
deleted file mode 100644
index bac20c0..0000000
--- a/ambari-agent/src/main/puppet/modules/configgenerator/manifests/init.pp
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-class configgenerator() {
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-dashboard/manifests/dashboard/service_check.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-dashboard/manifests/dashboard/service_check.pp b/ambari-agent/src/main/puppet/modules/hdp-dashboard/manifests/dashboard/service_check.pp
deleted file mode 100644
index ebc52e1..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-dashboard/manifests/dashboard/service_check.pp
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-dashboard::dashboard::service_check(){}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-dashboard/manifests/init.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-dashboard/manifests/init.pp b/ambari-agent/src/main/puppet/modules/hdp-dashboard/manifests/init.pp
deleted file mode 100644
index 0770252..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-dashboard/manifests/init.pp
+++ /dev/null
@@ -1,76 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-dashboard(
-  $service_state = $hdp::params::cluster_client_state,
-  $opts = {}
-) inherits hdp-dashboard::params
-{
-   if ($service_state == 'no_op') {
-   } elsif ($service_state == 'uninstalled') {
-    hdp::package { 'dashboard' :
-      ensure => 'uninstalled',
-      java_needed => 'false',
-      size   => 64
-    }
-    hdp::directory_recursive_create { $conf_dir :
-      service_state => $service_state,
-      force => true
-    }
-
-    Hdp::Package['dashboard'] -> Hdp::Directory_recursive_create[$conf_dir]
-
-   } elsif ($service_state in ['running','installed_and_configured','stopped']) {
-      hdp::package { 'dashboard' :
-        java_needed => 'false',
-        size => 64
-       }
-     $conf_dir =  $hdp-dashboard::params::conf_dir
-  
-     hdp::directory_recursive_create { $conf_dir :
-       service_state => $service_state,
-       force => true
-     }
- 
-     hdp-dashboard::configfile { 'cluster_configuration.json' : }
-     Hdp-Dashboard::Configfile<||>{dashboard_host => $hdp::params::host_address}
-  
-     #top level does not need anchors
-     Hdp::Package['dashboard'] -> Hdp::Directory_recursive_create[$conf_dir] -> Hdp-Dashboard::Configfile<||> 
-    } else {
-     hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-   }
-}
-
-###config file helper
-define hdp-dashboard::configfile(
-  $dashboard_host = undef
-)
-{
-  
-  hdp::configfile { "${hdp-dashboard::params::conf_dir}/${name}":
-    component      => 'dashboard',
-    owner          => root,
-    group          => root,
-    dashboard_host => $dashboard_host
-  }
-}
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-dashboard/manifests/params.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-dashboard/manifests/params.pp b/ambari-agent/src/main/puppet/modules/hdp-dashboard/manifests/params.pp
deleted file mode 100644
index b39d6b9..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-dashboard/manifests/params.pp
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-dashboard::params()
-{
-  
-  $conf_dir = "/usr/share/hdp/dashboard/dataServices/conf/" #cannot change since hard coded in rpm
-
-  $hdp_cluster_name = hdp_default("hadoop/cluster_configuration/hdp_cluster_name")
-  $scheduler_name = hdp_default("hadoop/cluster_configuration/scheduler_name")
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-dashboard/templates/cluster_configuration.json.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-dashboard/templates/cluster_configuration.json.erb b/ambari-agent/src/main/puppet/modules/hdp-dashboard/templates/cluster_configuration.json.erb
deleted file mode 100644
index 30a005f..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-dashboard/templates/cluster_configuration.json.erb
+++ /dev/null
@@ -1,97 +0,0 @@
-{
-  "config_version": 1,
-  "stack_version": "1.0.2",
-  "overall": {
-    "cluster_name": "<%=scope.function_hdp_template_var("hdp_cluster_name")%>",
-    "dashboard_host": "<%=scope.function_hdp_host("public_dashboard_host")%>",
-    "dashboard_port": 80,
-    "dataservices_host": "<%=scope.function_hdp_host("public_dashboard_host")%>",
-    "dataservices_port": 80,
-    "ganglia" : {
-      "web_host": "<%=scope.function_hdp_host("public_ganglia_server_host")%>",
-      "web_port": 80,
-      "web_root": "/ganglia/?t=yes",
-      "grid_name": "HDP_GRID"
-    },
-    "nagios": {
-      "nagiosserver_host": "<%=scope.function_hdp_host("public_nagios_server_host")%>",
-      "nagiosserver_port": 80,
-      "web_root": "/nagios"
-    },
-    "jmx": {
-      "timeout": 3
-    },
-    "services": {
-	  "HDFS" : [
-        {
-          "installed": true,
-          "name": "HDFS",
-          "namenode_host": "<%=scope.function_hdp_host("public_namenode_host")%>",
-          "namenode_port": 50070,
-          "snamenode_host": "<%=scope.function_hdp_host("public_snamenode_host")%>",
-          "snamenode_port": 50090,
-          "total_datanodes": "<%=h=scope.function_hdp_template_var("slave_hosts");h.kind_of?(Array) ? h.size : ''%>",
-          "ganglia_clusters": {
-            "slaves": "HDPSlaves",
-            "namenode": "HDPNameNode"
-          }
-        }
-      ],
-      "MAPREDUCE" : [
-        {
-          "installed": true,
-          "name": "MAPREDUCE",
-          "jobtracker_host": "<%=scope.function_hdp_host("public_jtnode_host")%>",
-          "jobtracker_port": 50030,
-          "total_tasktrackers": "<%=h=scope.function_hdp_template_var("slave_hosts");h.kind_of?(Array) ? h.size : ''%>",
-          "jobhistory_host": "<%=scope.function_hdp_host("public_jtnode_host")%>",
-          "jobhistory_port": 51111,
-          "ganglia_clusters": {
-            "slaves": "HDPSlaves",
-            "jobtracker": "HDPJobTracker"
-          },
-          "scheduler_type": "<%=scope.function_hdp_template_var("scheduler_name")%>"
-        }
-      ],
-      "HBASE" : [
-        {
-          "installed": <%=not scope.function_hdp_no_hosts("public_hbase_master_hosts")%>,
-          "name": "HBASE",
-          "hbasemasters_hosts": "<%=scope.function_hdp_host("public_hbase_master_hosts")%>",
-          "hbasemasters_port": 60010,
-          "total_regionservers": "<%=h=scope.function_hdp_template_var("slave_hosts");h.kind_of?(Array) ? h.size : ''%>",
-          "ganglia_clusters": {
-            "slaves": "HDPSlaves",
-            "hbasemasters": "HDPHBaseMaster"
-          }
-        }
-      ],
-      "ZOOKEEPER" : [
-        {
-          "installed": <%=not scope.function_hdp_no_hosts("public_zookeeper_hosts")%>,
-          "name": "ZOOKEEPER"
-        }
-      ],
-      "HIVE" : [
-        {
-          "installed": <%=not scope.function_hdp_no_hosts("public_hive_server_host")%>,
-          "name": "HIVE"
-        }
-      ],
-      "TEMPLETON" : [
-        {
-          "installed": <%=not scope.function_hdp_no_hosts("public_webhcat_server_host")%>,
-          "name": "TEMPLETON"
-        }
-      ],
-      "OOZIE" : [
-        {
-          "installed": <%=not scope.function_hdp_no_hosts("public_oozie_server")%>,
-          "name": "OOZIE",
-          "oozie_host": "<%=scope.function_hdp_host("public_oozie_server")%>",
-          "oozie_port": 11000
-        }
-      ]
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-flume/files/flumeSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-flume/files/flumeSmoke.sh b/ambari-agent/src/main/puppet/modules/hdp-flume/files/flumeSmoke.sh
deleted file mode 100644
index 6bc9be4..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-flume/files/flumeSmoke.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-echo "Flume Smoke Test: Passed" 

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-flume/manifests/client.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-flume/manifests/client.pp b/ambari-agent/src/main/puppet/modules/hdp-flume/manifests/client.pp
deleted file mode 100644
index e871533..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-flume/manifests/client.pp
+++ /dev/null
@@ -1,25 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-flume::client(
-  $service_state = $hdp::params::cluster_client_state
-) inherits hdp::params
-{
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-flume/manifests/init.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-flume/manifests/init.pp b/ambari-agent/src/main/puppet/modules/hdp-flume/manifests/init.pp
deleted file mode 100644
index 9bd3dad..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-flume/manifests/init.pp
+++ /dev/null
@@ -1,27 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-flume(
-  $type = server,
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-flume::params
-{
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-flume/manifests/params.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-flume/manifests/params.pp b/ambari-agent/src/main/puppet/modules/hdp-flume/manifests/params.pp
deleted file mode 100644
index de6a2c5..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-flume/manifests/params.pp
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-flume::params() inherits hdp::params
-{
-  $flume_log_dir = hdp_default("flume_log_dir","/var/log/flume")
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-flume/manifests/service.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-flume/manifests/service.pp b/ambari-agent/src/main/puppet/modules/hdp-flume/manifests/service.pp
deleted file mode 100644
index d1525eb..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-flume/manifests/service.pp
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-flume::service(
-  $ensure = $hdp::params::cluster_service_state
-)
-{
-}
-
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-flume/templates/flume-env.sh.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-flume/templates/flume-env.sh.erb b/ambari-agent/src/main/puppet/modules/hdp-flume/templates/flume-env.sh.erb
deleted file mode 100644
index 8aa5ca0..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-flume/templates/flume-env.sh.erb
+++ /dev/null
@@ -1,24 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Enviroment variables can be set here.
-#
-# #JAVA_HOME=/usr/lib/jvm/java-6-sun
-#
-# # Give Flume more memory and pre-allocate, enable remote monitoring via JMX
-JAVA_OPTS="-Xms100m -Xmx200m -Dcom.sun.management.jmxremote -Dflume.monitoring.type=ganglia -Dflume.monitoring.hosts=<%=scope.function_hdp_template_var("::hdp-flume::params::ganglia_sink")%>"
-#
-# # Note that the Flume conf directory is always included in the classpath.
-# #FLUME_CLASSPATH=""

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-flume/templates/log4j.properties.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-flume/templates/log4j.properties.erb b/ambari-agent/src/main/puppet/modules/hdp-flume/templates/log4j.properties.erb
deleted file mode 100644
index 5fec845..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-flume/templates/log4j.properties.erb
+++ /dev/null
@@ -1,59 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#  http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-# Define some default values that can be overridden by system properties.
-#
-# For testing, it may also be convenient to specify
-# -Dflume.root.logger=DEBUG,console when launching flume.
-
-#flume.root.logger=DEBUG,console
-flume.root.logger=INFO,LOGFILE
-flume.log.dir=<%=scope.function_hdp_template_var("::hdp-flume::params::flume_log_dir")%>
-flume.log.file=flume.log
-
-log4j.logger.org.apache.flume.lifecycle = INFO
-log4j.logger.org.jboss = WARN
-log4j.logger.org.mortbay = INFO
-log4j.logger.org.apache.avro.ipc.NettyTransceiver = WARN
-log4j.logger.org.apache.hadoop = INFO
-
-# Define the root logger to the system property "flume.root.logger".
-log4j.rootLogger=${flume.root.logger}
-
-#
-# Rolling file appender
-# Default log rotation configuration
-#
-
-log4j.appender.LOGFILE=org.apache.log4j.RollingFileAppender
-log4j.appender.LOGFILE.MaxFileSize=100MB
-log4j.appender.LOGFILE.MaxBackupIndex=10
-log4j.appender.LOGFILE.File=${flume.log.dir}/${flume.log.file}
-log4j.appender.LOGFILE.layout=org.apache.log4j.PatternLayout
-log4j.appender.LOGFILE.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss,SSS} %-5p [%t] (%C.%M:%L) %x - %m%n
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d (%t) [%p - %l] %m%n

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/checkGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/checkGmetad.sh b/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/checkGmetad.sh
deleted file mode 100644
index e60eb31..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/checkGmetad.sh
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./gmetadLib.sh;
-
-# Before checking gmetad, check rrdcached.
-./checkRrdcached.sh;
-
-gmetadRunningPid=`getGmetadRunningPid`;
-
-if [ -n "${gmetadRunningPid}" ]
-then
-  echo "${GMETAD_BIN} running with PID ${gmetadRunningPid}";
-else
-  echo "Failed to find running ${GMETAD_BIN}";
-  exit 1;
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/checkGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/checkGmond.sh b/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/checkGmond.sh
deleted file mode 100644
index 0cec8dc..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/checkGmond.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
-source ./gmondLib.sh;
-
-function checkGmondForCluster()
-{
-    gmondClusterName=${1};
-
-    gmondCoreConfFileName=`getGmondCoreConfFileName ${gmondClusterName}`;
-
-    # Skip over (purported) Clusters that don't have their core conf file present.
-    if [ -e "${gmondCoreConfFileName}" ]
-    then 
-      gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
-
-      if [ -n "${gmondRunningPid}" ]
-      then
-        echo "${GMOND_BIN} for cluster ${gmondClusterName} running with PID ${gmondRunningPid}";
-      else
-        echo "Failed to find running ${GMOND_BIN} for cluster ${gmondClusterName}";
-        exit 1;
-      fi
-    fi
-}
-
-# main()
-gmondClusterName=${1};
-
-if [ "x" == "x${gmondClusterName}" ]
-then
-    # No ${gmondClusterName} passed in as command-line arg, so check
-    # all the gmonds we know about.
-    for gmondClusterName in `getConfiguredGangliaClusterNames`
-    do
-        checkGmondForCluster ${gmondClusterName};
-    done
-else
-    # Just check the one ${gmondClusterName} that was asked for.
-    checkGmondForCluster ${gmondClusterName};
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/checkRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/checkRrdcached.sh b/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/checkRrdcached.sh
deleted file mode 100644
index d94db5d..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/checkRrdcached.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./rrdcachedLib.sh;
-
-rrdcachedRunningPid=`getRrdcachedRunningPid`;
-
-if [ -n "${rrdcachedRunningPid}" ]
-then
-  echo "${RRDCACHED_BIN} running with PID ${rrdcachedRunningPid}";
-else
-  echo "Failed to find running ${RRDCACHED_BIN}";
-  exit 1;
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmetad.init
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmetad.init b/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmetad.init
deleted file mode 100644
index 20b388e..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmetad.init
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/bin/sh
-# chkconfig: 2345 70 40
-# description: hdp-gmetad startup script
-# processname: hdp-gmetad
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Remember to keep this in-sync with the definition of 
-# GANGLIA_RUNTIME_COMPONENTS_UNPACK_DIR in monrpmInstaller.sh.
-HDP_GANGLIA_RUNTIME_COMPONENTS_DIR=/usr/libexec/hdp/ganglia
-HDP_GANLIA_GMETAD_STARTER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/startGmetad.sh
-HDP_GANLIA_GMETAD_STOPPER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/stopGmetad.sh
-HDP_GANLIA_GMETAD_CHECKER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/checkGmetad.sh
-
-RETVAL=0
-
-case "$1" in
-   start)
-      echo "============================="
-      echo "Starting hdp-gmetad..."
-      echo "============================="
-      [ -f ${HDP_GANLIA_GMETAD_STARTER} ] || exit 1
-      eval "${HDP_GANLIA_GMETAD_STARTER}"
-      RETVAL=$?
-      echo
-      [ $RETVAL -eq 0 ] && touch /var/lock/subsys/hdp-gmetad
-      ;;
-
-  stop)
-      echo "=================================="
-      echo "Shutting down hdp-gmetad..."
-      echo "=================================="
-      [ -f ${HDP_GANLIA_GMETAD_STOPPER} ] || exit 1
-      eval "${HDP_GANLIA_GMETAD_STOPPER}"
-      RETVAL=$?
-      echo
-      [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hdp-gmetad
-      ;;
-
-  restart|reload)
-   	$0 stop
-   	$0 start
-   	RETVAL=$?
-	;;
-  status)
-      echo "======================================="
-      echo "Checking status of hdp-gmetad..."
-      echo "======================================="
-      [ -f ${HDP_GANLIA_GMETAD_CHECKER} ] || exit 1
-      eval "${HDP_GANLIA_GMETAD_CHECKER}"
-      RETVAL=$?
-      ;;
-  *)
-	echo "Usage: $0 {start|stop|restart|status}"
-	exit 1
-esac
-
-exit $RETVAL

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmetadLib.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmetadLib.sh b/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmetadLib.sh
deleted file mode 100644
index e28610e..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmetadLib.sh
+++ /dev/null
@@ -1,204 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Slurp in all our user-customizable settings.
-source ./gangliaEnv.sh;
-
-# Get access to Ganglia-wide constants etc.
-source ./gangliaLib.sh;
-
-GMETAD_BIN=/usr/sbin/gmetad;
-GMETAD_CONF_FILE=${GANGLIA_CONF_DIR}/gmetad.conf;
-GMETAD_PID_FILE=${GANGLIA_RUNTIME_DIR}/gmetad.pid;
-
-function getGmetadLoggedPid()
-{
-    if [ -e "${GMETAD_PID_FILE}" ]
-    then
-        echo `cat ${GMETAD_PID_FILE}`;
-    fi
-}
-
-function getGmetadRunningPid()
-{
-    gmetadLoggedPid=`getGmetadLoggedPid`;
-
-    if [ -n "${gmetadLoggedPid}" ]
-    then
-        echo `ps -o pid=MYPID -p ${gmetadLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
-    fi
-}
-
-function generateGmetadConf()
-{
-    now=`date`;
-
-    cat <<END_OF_GMETAD_CONF_1
-#################### Generated by ${0} on ${now} ####################
-#
-#-------------------------------------------------------------------------------
-# Setting the debug_level to 1 will keep daemon in the forground and
-# show only error messages. Setting this value higher than 1 will make 
-# gmetad output debugging information and stay in the foreground.
-# default: 0
-# debug_level 10
-#
-#-------------------------------------------------------------------------------
-# What to monitor. The most important section of this file. 
-#
-# The data_source tag specifies either a cluster or a grid to
-# monitor. If we detect the source is a cluster, we will maintain a complete
-# set of RRD databases for it, which can be used to create historical 
-# graphs of the metrics. If the source is a grid (it comes from another gmetad),
-# we will only maintain summary RRDs for it.
-#
-# Format: 
-# data_source "my cluster" [polling interval] address1:port addreses2:port ...
-# 
-# The keyword 'data_source' must immediately be followed by a unique
-# string which identifies the source, then an optional polling interval in 
-# seconds. The source will be polled at this interval on average. 
-# If the polling interval is omitted, 15sec is asssumed. 
-#
-# If you choose to set the polling interval to something other than the default,
-# note that the web frontend determines a host as down if its TN value is less
-# than 4 * TMAX (20sec by default).  Therefore, if you set the polling interval
-# to something around or greater than 80sec, this will cause the frontend to
-# incorrectly display hosts as down even though they are not.
-#
-# A list of machines which service the data source follows, in the 
-# format ip:port, or name:port. If a port is not specified then 8649
-# (the default gmond port) is assumed.
-# default: There is no default value
-#
-# data_source "my cluster" 10 localhost  my.machine.edu:8649  1.2.3.5:8655
-# data_source "my grid" 50 1.3.4.7:8655 grid.org:8651 grid-backup.org:8651
-# data_source "another source" 1.3.4.7:8655  1.3.4.8
-END_OF_GMETAD_CONF_1
-
-    # Get info about all the configured Ganglia clusters.
-    getGangliaClusterInfo | while read gangliaClusterInfoLine
-    do
-        # From each, parse out ${gmondClusterName}, ${gmondMasterIP} and ${gmondPort}... 
-        read gmondClusterName gmondMasterIP gmondPort <<<`echo ${gangliaClusterInfoLine}`;
-        # ...and generate a corresponding data_source line for gmetad.conf. 
-        echo "data_source \"${gmondClusterName}\" ${gmondMasterIP}:${gmondPort}";
-    done
-
-    cat <<END_OF_GMETAD_CONF_2
-#
-# Round-Robin Archives
-# You can specify custom Round-Robin archives here (defaults are listed below)
-#
-# Old Default RRA: Keep 1 hour of metrics at 15 second resolution. 1 day at 6 minute
-# RRAs "RRA:AVERAGE:0.5:1:244" "RRA:AVERAGE:0.5:24:244" "RRA:AVERAGE:0.5:168:244" "RRA:AVERAGE:0.5:672:244" \
-#      "RRA:AVERAGE:0.5:5760:374"
-# New Default RRA
-# Keep 5856 data points at 15 second resolution assuming 15 second (default) polling. That's 1 day
-# Two weeks of data points at 1 minute resolution (average)
-#RRAs "RRA:AVERAGE:0.5:1:5856" "RRA:AVERAGE:0.5:4:20160" "RRA:AVERAGE:0.5:40:52704"
-# Retaining existing resolution
-RRAs "RRA:AVERAGE:0.5:1:244" "RRA:AVERAGE:0.5:24:244" "RRA:AVERAGE:0.5:168:244" "RRA:AVERAGE:0.5:672:244" \
-     "RRA:AVERAGE:0.5:5760:374"
-#
-#-------------------------------------------------------------------------------
-# Scalability mode. If on, we summarize over downstream grids, and respect
-# authority tags. If off, we take on 2.5.0-era behavior: we do not wrap our output
-# in <GRID></GRID> tags, we ignore all <GRID> tags we see, and always assume
-# we are the "authority" on data source feeds. This approach does not scale to
-# large groups of clusters, but is provided for backwards compatibility.
-# default: on
-# scalable off
-#
-#-------------------------------------------------------------------------------
-# The name of this Grid. All the data sources above will be wrapped in a GRID
-# tag with this name.
-# default: unspecified
-gridname "HDP_GRID"
-#
-#-------------------------------------------------------------------------------
-# The authority URL for this grid. Used by other gmetads to locate graphs
-# for our data sources. Generally points to a ganglia/
-# website on this machine.
-# default: "http://hostname/ganglia/",
-#   where hostname is the name of this machine, as defined by gethostname().
-# authority "http://mycluster.org/newprefix/"
-#
-#-------------------------------------------------------------------------------
-# List of machines this gmetad will share XML with. Localhost
-# is always trusted. 
-# default: There is no default value
-# trusted_hosts 127.0.0.1 169.229.50.165 my.gmetad.org
-#
-#-------------------------------------------------------------------------------
-# If you want any host which connects to the gmetad XML to receive
-# data, then set this value to "on"
-# default: off
-# all_trusted on
-#
-#-------------------------------------------------------------------------------
-# If you don't want gmetad to setuid then set this to off
-# default: on
-# setuid off
-#
-#-------------------------------------------------------------------------------
-# User gmetad will setuid to (defaults to "nobody")
-# default: "nobody"
-setuid_username "${GMETAD_USER}"
-#
-#-------------------------------------------------------------------------------
-# Umask to apply to created rrd files and grid directory structure
-# default: 0 (files are public)
-# umask 022
-#
-#-------------------------------------------------------------------------------
-# The port gmetad will answer requests for XML
-# default: 8651
-# xml_port 8651
-#
-#-------------------------------------------------------------------------------
-# The port gmetad will answer queries for XML. This facility allows
-# simple subtree and summation views of the XML tree.
-# default: 8652
-# interactive_port 8652
-#
-#-------------------------------------------------------------------------------
-# The number of threads answering XML requests
-# default: 4
-# server_threads 10
-#
-#-------------------------------------------------------------------------------
-# Where gmetad stores its round-robin databases
-# default: "/var/lib/ganglia/rrds"
-# rrd_rootdir "/some/other/place"
-#
-#-------------------------------------------------------------------------------
-# In earlier versions of gmetad, hostnames were handled in a case
-# sensitive manner
-# If your hostname directories have been renamed to lower case,
-# set this option to 0 to disable backward compatibility.
-# From version 3.2, backwards compatibility will be disabled by default.
-# default: 1   (for gmetad < 3.2)
-# default: 0   (for gmetad >= 3.2)
-case_sensitive_hostnames 1
-END_OF_GMETAD_CONF_2
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmond.init
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmond.init b/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmond.init
deleted file mode 100644
index afb7026..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmond.init
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/bin/sh
-# chkconfig: 2345 70 40
-# description: hdp-gmond startup script
-# processname: hdp-gmond
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Remember to keep this in-sync with the definition of 
-# GANGLIA_RUNTIME_COMPONENTS_UNPACK_DIR in monrpmInstaller.sh.
-HDP_GANGLIA_RUNTIME_COMPONENTS_DIR=/usr/libexec/hdp/ganglia
-HDP_GANLIA_GMOND_STARTER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/startGmond.sh
-HDP_GANLIA_GMOND_STOPPER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/stopGmond.sh
-HDP_GANLIA_GMOND_CHECKER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/checkGmond.sh
-
-RETVAL=0
-
-case "$1" in
-   start)
-      echo "============================="
-      echo "Starting hdp-gmond..."
-      echo "============================="
-      [ -f ${HDP_GANLIA_GMOND_STARTER} ] || exit 1
-      eval "${HDP_GANLIA_GMOND_STARTER}"
-      RETVAL=$?
-      echo
-      [ $RETVAL -eq 0 ] && touch /var/lock/subsys/hdp-gmond
-      ;;
-
-  stop)
-      echo "=================================="
-      echo "Shutting down hdp-gmond..."
-      echo "=================================="
-      [ -f ${HDP_GANLIA_GMOND_STOPPER} ] || exit 1
-      eval "${HDP_GANLIA_GMOND_STOPPER}"
-      RETVAL=$?
-      echo
-      [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hdp-gmond
-      ;;
-
-  restart|reload)
-   	$0 stop
-   	$0 start
-   	RETVAL=$?
-	;;
-  status)
-      echo "======================================="
-      echo "Checking status of hdp-gmond..."
-      echo "======================================="
-      [ -f ${HDP_GANLIA_GMOND_CHECKER} ] || exit 1
-      eval "${HDP_GANLIA_GMOND_CHECKER}"
-      RETVAL=$?
-      ;;
-  *)
-	echo "Usage: $0 {start|stop|restart|status}"
-	exit 1
-esac
-
-exit $RETVAL


[02/15] AMBARI-5747 Remove facter-1.6.10, Ruby, Puppet dependencies from pom.xml and src (dsen)

Posted by ds...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/python/ambari_agent/ActionQueue.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/ActionQueue.py b/ambari-agent/src/main/python/ambari_agent/ActionQueue.py
index bc675dc..c9d3200 100644
--- a/ambari-agent/src/main/python/ambari_agent/ActionQueue.py
+++ b/ambari-agent/src/main/python/ambari_agent/ActionQueue.py
@@ -27,7 +27,6 @@ import os
 
 from LiveStatus import LiveStatus
 from shell import shellRunner
-import PuppetExecutor
 from ActualConfigHandler import ActualConfigHandler
 from CommandStatusDict import CommandStatusDict
 from CustomServiceOrchestrator import CustomServiceOrchestrator
@@ -62,9 +61,6 @@ class ActionQueue(threading.Thread):
   COMPLETED_STATUS = 'COMPLETED'
   FAILED_STATUS = 'FAILED'
 
-  COMMAND_FORMAT_V1 = "1.0"
-  COMMAND_FORMAT_V2 = "2.0"
-
   def __init__(self, config, controller):
     super(ActionQueue, self).__init__()
     self.commandQueue = Queue.Queue()
@@ -140,33 +136,17 @@ class ActionQueue(threading.Thread):
       traceback.print_exc()
       logger.warn(err)
 
-
-  def determine_command_format_version(self, command):
-    """
-    Returns either COMMAND_FORMAT_V1 or COMMAND_FORMAT_V2
-    """
-    try:
-      if command['commandParams']['schema_version'] == self.COMMAND_FORMAT_V2:
-        return self.COMMAND_FORMAT_V2
-      else:
-        return  self.COMMAND_FORMAT_V1
-    except KeyError:
-      pass # ignore
-    return self.COMMAND_FORMAT_V1 # Fallback
-
-
   def execute_command(self, command):
     '''
     Executes commands of type  EXECUTION_COMMAND
     '''
     clusterName = command['clusterName']
     commandId = command['commandId']
-    command_format = self.determine_command_format_version(command)
 
     message = "Executing command with id = {commandId} for role = {role} of " \
-              "cluster {cluster}. Command format={command_format}".format(
+              "cluster {cluster}.".format(
               commandId = str(commandId), role=command['role'],
-              cluster=clusterName, command_format=command_format)
+              cluster=clusterName)
     logger.info(message)
     logger.debug(pprint.pformat(command))
 
@@ -181,24 +161,13 @@ class ActionQueue(threading.Thread):
     })
     self.commandStatuses.put_command_status(command, in_progress_status)
     # running command
-    if command_format == self.COMMAND_FORMAT_V1:
-      # Create a new instance of executor for the current thread
-      puppetExecutor = PuppetExecutor.PuppetExecutor(
-        self.config.get('puppet', 'puppetmodules'),
-        self.config.get('puppet', 'puppet_home'),
-        self.config.get('puppet', 'facter_home'),
-        self.config.get('agent', 'prefix'), self.config)
-      commandresult = puppetExecutor.runCommand(command, in_progress_status['tmpout'],
-        in_progress_status['tmperr'])
-    else:
-      commandresult = self.customServiceOrchestrator.runCommand(command,
-        in_progress_status['tmpout'], in_progress_status['tmperr'])
+    commandresult = self.customServiceOrchestrator.runCommand(command,
+      in_progress_status['tmpout'], in_progress_status['tmperr'])
     # dumping results
     status = self.COMPLETED_STATUS
     if commandresult['exitcode'] != 0:
       status = self.FAILED_STATUS
     roleResult = self.commandStatuses.generate_report_template(command)
-    # assume some puppet plumbing to run these commands
     roleResult.update({
       'stdout': commandresult['stdout'],
       'stderr': commandresult['stderr'],
@@ -252,25 +221,22 @@ class ActionQueue(threading.Thread):
       else:
         globalConfig = {}
 
-      command_format = self.determine_command_format_version(command)
-
       livestatus = LiveStatus(cluster, service, component,
                               globalConfig, self.config, self.configTags)
 
-      component_status = None
       component_extra = None
-      if command_format == self.COMMAND_FORMAT_V2:
-        # For custom services, responsibility to determine service status is
-        # delegated to python scripts
-        component_status_result = self.customServiceOrchestrator.requestComponentStatus(command)
-
-        if component_status_result['exitcode'] == 0:
-          component_status = LiveStatus.LIVE_STATUS
-        else:
-          component_status = LiveStatus.DEAD_STATUS
-
-        if component_status_result.has_key('structuredOut'):
-          component_extra = component_status_result['structuredOut']
+
+      # For custom services, responsibility to determine service status is
+      # delegated to python scripts
+      component_status_result = self.customServiceOrchestrator.requestComponentStatus(command)
+
+      if component_status_result['exitcode'] == 0:
+        component_status = LiveStatus.LIVE_STATUS
+      else:
+        component_status = LiveStatus.DEAD_STATUS
+
+      if component_status_result.has_key('structuredOut'):
+        component_extra = component_status_result['structuredOut']
 
       result = livestatus.build(forsed_component_status= component_status)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/python/ambari_agent/AmbariConfig.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/AmbariConfig.py b/ambari-agent/src/main/python/ambari_agent/AmbariConfig.py
index 79c7345..4f0baa9 100644
--- a/ambari-agent/src/main/python/ambari_agent/AmbariConfig.py
+++ b/ambari-agent/src/main/python/ambari_agent/AmbariConfig.py
@@ -38,12 +38,6 @@ cache_dir=/var/lib/ambari-agent/cache
 
 [services]
 
-[puppet]
-puppetmodules=/var/lib/ambari-agent/puppet/
-puppet_home=/root/workspace/puppet-install/puppet-2.7.9
-facter_home=/root/workspace/puppet-install/facter-1.6.10
-timeout_seconds = 600
-
 [python]
 custom_actions_dir = /var/lib/ambari-agent/resources/custom_actions
 
@@ -59,7 +53,7 @@ passphrase_env_var_name=AMBARI_PASSPHRASE
 [heartbeat]
 state_interval = 6
 dirs=/etc/hadoop,/etc/hadoop/conf,/var/run/hadoop,/var/log/hadoop
-rpms=glusterfs,openssl,wget,net-snmp,ntpd,ruby,ganglia,nagios,glusterfs
+rpms=glusterfs,openssl,wget,net-snmp,ntpd,ganglia,nagios,glusterfs
 log_lines_count=300
 
 """

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/python/ambari_agent/CommandStatusDict.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/CommandStatusDict.py b/ambari-agent/src/main/python/ambari_agent/CommandStatusDict.py
index e491e09..f00ada2 100644
--- a/ambari-agent/src/main/python/ambari_agent/CommandStatusDict.py
+++ b/ambari-agent/src/main/python/ambari_agent/CommandStatusDict.py
@@ -112,7 +112,7 @@ class CommandStatusDict():
     output = grep.tail(tmpout, Grep.OUTPUT_LAST_LINES)
     inprogress = self.generate_report_template(command)
     inprogress.update({
-      'stdout': grep.filterMarkup(output),
+      'stdout': output,
       'stderr': tmperr,
       'structuredOut': tmpstructuredout,
       'exitCode': 777,

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py b/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py
index c68a953..e96deb5 100644
--- a/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py
+++ b/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py
@@ -26,10 +26,7 @@ import sys
 from FileCache import FileCache
 from AgentException import AgentException
 from PythonExecutor import PythonExecutor
-from AmbariConfig import AmbariConfig
 import hostname
-from LiveStatus import LiveStatus
-import manifestGenerator
 
 
 logger = logging.getLogger()
@@ -48,6 +45,9 @@ class CustomServiceOrchestrator():
   PRE_HOOK_PREFIX="before"
   POST_HOOK_PREFIX="after"
 
+  HOSTS_LIST_KEY = "all_hosts"
+  PING_PORTS_KEY = "all_ping_ports"
+  AMBARI_SERVER_HOST = "ambari_server_host"
 
   def __init__(self, config, controller):
     self.config = config
@@ -197,7 +197,6 @@ class CustomServiceOrchestrator():
     Converts command to json file and returns file path
     """
     # Perform few modifications to stay compatible with the way in which
-    # site.pp files are generated by manifestGenerator.py
     public_fqdn = self.public_fqdn
     command['public_hostname'] = public_fqdn
     # Now, dump the json file
@@ -209,7 +208,7 @@ class CustomServiceOrchestrator():
       file_path = os.path.join(self.tmp_dir, "status_command.json")
     else:
       task_id = command['taskId']
-      command['clusterHostInfo'] = manifestGenerator.decompressClusterHostInfo(command['clusterHostInfo'])
+      command['clusterHostInfo'] = self.decompressClusterHostInfo(command['clusterHostInfo'])
       file_path = os.path.join(self.tmp_dir, "command-{0}.json".format(task_id))
     # Json may contain passwords, that's why we need proper permissions
     if os.path.isfile(file_path):
@@ -219,3 +218,97 @@ class CustomServiceOrchestrator():
       content = json.dumps(command, sort_keys = False, indent = 4)
       f.write(content)
     return file_path
+
+  def decompressClusterHostInfo(self, clusterHostInfo):
+    info = clusterHostInfo.copy()
+    #Pop info not related to host roles
+    hostsList = info.pop(self.HOSTS_LIST_KEY)
+    pingPorts = info.pop(self.PING_PORTS_KEY)
+    ambariServerHost = info.pop(self.AMBARI_SERVER_HOST)
+
+    decompressedMap = {}
+
+    for k,v in info.items():
+      # Convert from 1-3,5,6-8 to [1,2,3,5,6,7,8]
+      indexes = self.convertRangeToList(v)
+      # Convert from [1,2,3,5,6,7,8] to [host1,host2,host3...]
+      decompressedMap[k] = [hostsList[i] for i in indexes]
+
+    #Convert from ['1:0-2,4', '42:3,5-7'] to [1,1,1,42,1,42,42,42]
+    pingPorts = self.convertMappedRangeToList(pingPorts)
+
+    #Convert all elements to str
+    pingPorts = map(str, pingPorts)
+
+    #Add ping ports to result
+    decompressedMap[self.PING_PORTS_KEY] = pingPorts
+    #Add hosts list to result
+    decompressedMap[self.HOSTS_LIST_KEY] = hostsList
+    #Add ambari-server host to result
+    decompressedMap[self.AMBARI_SERVER_HOST] = ambariServerHost
+
+    return decompressedMap
+
+  # Converts from 1-3,5,6-8 to [1,2,3,5,6,7,8]
+  def convertRangeToList(self, list):
+
+    resultList = []
+
+    for i in list:
+
+      ranges = i.split(',')
+
+      for r in ranges:
+        rangeBounds = r.split('-')
+        if len(rangeBounds) == 2:
+
+          if not rangeBounds[0] or not rangeBounds[1]:
+            raise AgentException.AgentException("Broken data in given range, expected - ""m-n"" or ""m"", got : " + str(r))
+
+
+          resultList.extend(range(int(rangeBounds[0]), int(rangeBounds[1]) + 1))
+        elif len(rangeBounds) == 1:
+          resultList.append((int(rangeBounds[0])))
+        else:
+          raise AgentException.AgentException("Broken data in given range, expected - ""m-n"" or ""m"", got : " + str(r))
+
+    return resultList
+
+  #Converts from ['1:0-2,4', '42:3,5-7'] to [1,1,1,42,1,42,42,42]
+  def convertMappedRangeToList(self, list):
+
+    resultDict = {}
+
+    for i in list:
+      valueToRanges = i.split(":")
+      if len(valueToRanges) <> 2:
+        raise AgentException.AgentException("Broken data in given value to range, expected format - ""value:m-n"", got - " + str(i))
+      value = valueToRanges[0]
+      rangesToken = valueToRanges[1]
+
+      for r in rangesToken.split(','):
+
+        rangeIndexes = r.split('-')
+
+        if len(rangeIndexes) == 2:
+
+          if not rangeIndexes[0] or not rangeIndexes[1]:
+            raise AgentException.AgentException("Broken data in given value to range, expected format - ""value:m-n"", got - " + str(r))
+
+          start = int(rangeIndexes[0])
+          end = int(rangeIndexes[1])
+
+          for k in range(start, end + 1):
+            resultDict[k] = int(value)
+
+
+        elif len(rangeIndexes) == 1:
+          index = int(rangeIndexes[0])
+
+          resultDict[index] = int(value)
+
+
+    resultList = dict(sorted(resultDict.items())).values()
+
+    return resultList
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/python/ambari_agent/Grep.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/Grep.py b/ambari-agent/src/main/python/ambari_agent/Grep.py
index fcd7b1f..1aaf40d 100644
--- a/ambari-agent/src/main/python/ambari_agent/Grep.py
+++ b/ambari-agent/src/main/python/ambari_agent/Grep.py
@@ -13,8 +13,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import re
-
 class Grep:
 
   # How many lines from command output send to server
@@ -76,14 +74,3 @@ class Grep:
       length = len(lines)
       tailed = lines[length - n:]
       return "".join(tailed)
-
-  def filterMarkup(self, string):
-    """
-    Filters given string from puppet colour markup done using escape codes like 
-    """
-    if string is None:
-      result = None
-    else:
-      regexp = "\x1b" + r"\[[\d;]{1,4}m"
-      result = re.sub(regexp, '', string)
-    return result

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/python/ambari_agent/PuppetExecutor.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/PuppetExecutor.py b/ambari-agent/src/main/python/ambari_agent/PuppetExecutor.py
deleted file mode 100644
index 7164a9f..0000000
--- a/ambari-agent/src/main/python/ambari_agent/PuppetExecutor.py
+++ /dev/null
@@ -1,282 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-import json
-import os.path
-import logging
-import subprocess
-import pprint
-import threading
-from threading import Thread
-
-from shell import shellRunner
-import manifestGenerator
-from RepoInstaller import RepoInstaller
-from Grep import Grep
-import shell
-
-JAVANOTVALID_MSG = "Cannot access JDK! Make sure you have permission to execute {0}/bin/java"
-
-logger = logging.getLogger()
-
-class PuppetExecutor:
-
-  """ Class that executes the commands that come from the server using puppet.
-  This is the class that provides the pluggable point for executing the puppet"""
-
-  grep = Grep()
-  NO_ERROR = "none"
-
-  def __init__(self, puppetModule, puppetInstall, facterInstall, tmpDir, config):
-    self.puppetModule = puppetModule
-    self.puppetInstall = puppetInstall
-    self.facterInstall = facterInstall
-    self.tmpDir = tmpDir
-    self.reposInstalled = False
-    self.config = config
-    self.modulesdir = self.puppetModule + "/modules"
-    self.event = threading.Event()
-    self.last_puppet_has_been_killed = False
-    self.sh = shellRunner()
-    self.puppet_timeout = config.get("puppet", "timeout_seconds")
-
-  def configureEnviron(self, environ):
-    if not self.config.has_option("puppet", "ruby_home"):
-      return environ
-    ruby_home = self.config.get("puppet", "ruby_home")
-    if os.path.exists(ruby_home):
-      """Only update ruby home if the config is configured"""
-      path = os.environ["PATH"]
-      if not ruby_home in path:
-        environ["PATH"] = ruby_home + os.path.sep + "bin"  + ":"+environ["PATH"] 
-      environ["MY_RUBY_HOME"] = ruby_home
-    return environ
-    
-  def getPuppetBinary(self):
-    puppetbin = os.path.join(self.puppetInstall, "bin", "puppet") 
-    if os.path.exists(puppetbin):
-      return puppetbin
-    else:
-      logger.info("Using default puppet on the host : " + puppetbin 
-                  + " does not exist.")
-      return "puppet"
-
-  def discardInstalledRepos(self):
-    """
-    Makes agent to forget about installed repos.
-    So the next call of generate_repo_manifests() will definitely
-    install repos again
-    """
-    self.reposInstalled = False
-
-  def generate_repo_manifests(self, command, tmpDir, modulesdir, taskId):
-    # Hack to only create the repo files once
-    manifest_list = []
-    if not self.reposInstalled:
-      repoInstaller = RepoInstaller(command, tmpDir, modulesdir, taskId, self.config)
-      manifest_list = repoInstaller.generate_repo_manifests()
-    return manifest_list
-
-  def puppetCommand(self, sitepp):
-    modules = self.puppetModule
-    puppetcommand = [self.getPuppetBinary(), "apply", "--confdir=" + modules, "--detailed-exitcodes", sitepp]
-    return puppetcommand
-  
-  def facterLib(self):
-    return self.facterInstall + "/lib/"
-    pass
-  
-  def puppetLib(self):
-    return self.puppetInstall + "/lib"
-    pass
-
-  def condenseOutput(self, stdout, stderr, retcode):
-    grep = self.grep
-    if stderr == self.NO_ERROR:
-      result = grep.tail(stdout, grep.OUTPUT_LAST_LINES)
-    else:
-      result = grep.grep(stdout, "fail", grep.ERROR_LAST_LINES_BEFORE, grep.ERROR_LAST_LINES_AFTER)
-      result = grep.cleanByTemplate(result, "warning")
-      if result is None: # Second try
-       result = grep.grep(stdout, "err", grep.ERROR_LAST_LINES_BEFORE, grep.ERROR_LAST_LINES_AFTER)
-       result = grep.cleanByTemplate(result, "warning")
-    filteredresult = grep.filterMarkup(result)
-    return filteredresult
-
-  def isSuccessfull(self, returncode):
-    return not self.last_puppet_has_been_killed and (returncode == 0 or returncode == 2)
-
-  def run_manifest(self, command, file, tmpoutfile, tmperrfile):
-    result = {}
-    taskId = 0
-    timeout = command ['commandParams']['command_timeout']
-    if command.has_key("taskId"):
-      taskId = command['taskId']
-    puppetEnv = os.environ
-    #Install repos
-    repo_manifest_list = self.generate_repo_manifests(command, self.tmpDir,
-                                                      self.modulesdir, taskId)
-    puppetFiles = list(repo_manifest_list)
-    puppetFiles.append(file)
-    #Run all puppet commands, from manifest generator and for repos installation
-    #Appending outputs and errors, exitcode - maximal from all
-    for puppetFile in puppetFiles:
-      self.runPuppetFile(puppetFile, result, puppetEnv, tmpoutfile,
-                         tmperrfile, timeout)
-      # Check if one of the puppet command fails and error out
-      if not self.isSuccessfull(result["exitcode"]):
-        break
-
-    if self.isSuccessfull(result["exitcode"]):
-      # Check if all the repos were installed or not and reset the flag
-      self.reposInstalled = True
-
-    logger.info("ExitCode : "  + str(result["exitcode"]))
-    return result
-  
-  def isJavaAvailable(self, command):
-    javaExecutablePath = "{0}/bin/java".format(command)
-    return not self.sh.run([javaExecutablePath, '-version'])['exitCode']
-
-  def runCommand(self, command, tmpoutfile, tmperrfile):
-    # After installing we must have jdk available for start/stop/smoke
-    if command['roleCommand'] != "INSTALL":
-      java64_home = None
-      if 'hostLevelParams' in command and 'java_home' in command['hostLevelParams']:
-        java64_home = str(command['hostLevelParams']['java_home']).strip()
-      if java64_home is None or not self.isJavaAvailable(java64_home):
-        if java64_home is None:
-          errMsg = "Cannot access JDK! Make sure java_home is specified in hostLevelParams"
-        else:
-          errMsg = JAVANOTVALID_MSG.format(java64_home)
-        return {'stdout': '', 'stderr': errMsg, 'exitcode': 1}
-      pass
-    pass
-
-    taskId = 0
-    if command.has_key("taskId"):
-      taskId = command['taskId']
-    siteppFileName = os.path.join(self.tmpDir, "site-" + str(taskId) + ".pp")
-    errMsg = manifestGenerator.generateManifest(command, siteppFileName,
-                                                self.modulesdir, self.config)
-    if not errMsg:
-      result = self.run_manifest(command, siteppFileName, tmpoutfile, tmperrfile)
-    else:
-      result = {'stdout': '', 'stderr': errMsg, 'exitcode': 1}
-    return result
-
-  def runPuppetFile(self, puppetFile, result, puppetEnv, tmpoutfile,
-                    tmperrfile, timeout):
-    """ Run the command and make sure the output gets propagated"""
-    puppetcommand = self.puppetCommand(puppetFile)
-    rubyLib = ""
-    if os.environ.has_key("RUBYLIB"):
-      rubyLib = os.environ["RUBYLIB"]
-      logger.debug("RUBYLIB from Env " + rubyLib)
-    if not (self.facterLib() in rubyLib):
-      rubyLib = rubyLib + ":" + self.facterLib()
-    if not (self.puppetLib() in rubyLib):
-      rubyLib = rubyLib + ":" + self.puppetLib()
-    tmpout =  open(tmpoutfile, 'w')
-    tmperr =  open(tmperrfile, 'w')
-    puppetEnv["RUBYLIB"] = rubyLib
-    puppetEnv = self.configureEnviron(puppetEnv)
-    logger.debug("Setting RUBYLIB as: " + rubyLib)
-    logger.info("Running command " + pprint.pformat(puppetcommand))
-    puppet = self.lauch_puppet_subprocess(puppetcommand, tmpout, tmperr, puppetEnv)
-    logger.info("Command started with PID: " + str(puppet.pid))
-    logger.debug("Launching watchdog thread")
-    self.event.clear()
-    self.last_puppet_has_been_killed = False
-    thread = Thread(target =  self.puppet_watchdog_func,
-                    args = (puppet, timeout))
-    thread.start()
-    # Waiting for process to finished or killed
-    puppet.communicate()
-    self.event.set()
-    thread.join()
-    # Building results
-    error = self.NO_ERROR
-    returncode = 0
-    if not self.isSuccessfull(puppet.returncode):
-      returncode = puppet.returncode
-      error = open(tmperrfile, 'r').read()
-      logging.error("Error running puppet: \n" + str(error))
-      pass
-    if self.last_puppet_has_been_killed:
-      error = str(error) + "\n Puppet has been killed due to timeout"
-      returncode = 999
-    if result.has_key("stderr"):
-      result["stderr"] = result["stderr"] + os.linesep + str(error)
-    else:
-      result["stderr"] = str(error)
-    puppetOutput = open(tmpoutfile, 'r').read()
-    logger.debug("Output from puppet :\n" + puppetOutput)
-    logger.info("Puppet execution process with pid %s exited with code %s." %
-                (str(puppet.pid), str(returncode)))
-    if result.has_key("exitcode"):
-      result["exitcode"] = max(returncode, result["exitcode"])
-    else:
-      result["exitcode"] = returncode
-    condensed = self.condenseOutput(puppetOutput, error, returncode)
-    if result.has_key("stdout"):
-      result["stdout"] = result["stdout"] + os.linesep + str(condensed)
-    else:
-      result["stdout"] = str(condensed)
-    return result
-
-  def lauch_puppet_subprocess(self, puppetcommand, tmpout, tmperr, puppetEnv):
-    """
-    Creates subprocess with given parameters. This functionality was moved to separate method
-    to make possible unit testing
-    """
-    return subprocess.Popen(puppetcommand,
-      stdout=tmpout,
-      stderr=tmperr,
-      env=puppetEnv)
-
-  def puppet_watchdog_func(self, puppet, puppet_timeout):
-    self.event.wait(float(puppet_timeout))
-    if puppet.returncode is None:
-      logger.error("Task timed out, killing process with PID: " + str(puppet.pid))
-      shell.kill_process_with_children(puppet.pid)
-      self.last_puppet_has_been_killed = True
-    pass
-
-
-def main():
-  logging.basicConfig(level=logging.DEBUG)    
-  #test code
-  jsonFile = open('test.json', 'r')
-  jsonStr = jsonFile.read() 
-  # Below is for testing only.
-  
-  puppetInstance = PuppetExecutor("/home/centos/ambari_repo_info/ambari-agent/src/main/puppet/",
-                                  "/usr/",
-                                  "/root/workspace/puppet-install/facter-1.6.10/",
-                                  "/tmp")
-  jsonFile = open('test.json', 'r')
-  jsonStr = jsonFile.read() 
-  parsedJson = json.loads(jsonStr)
-  result = puppetInstance.runCommand(parsedJson, '/tmp/out.txt', '/tmp/err.txt')
-  logger.debug(result)
-  
-if __name__ == '__main__':
-  main()
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/python/ambari_agent/RepoInstaller.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/RepoInstaller.py b/ambari-agent/src/main/python/ambari_agent/RepoInstaller.py
deleted file mode 100644
index 78e7dc2..0000000
--- a/ambari-agent/src/main/python/ambari_agent/RepoInstaller.py
+++ /dev/null
@@ -1,109 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import logging
-import os
-import json
-from pprint import pformat
-import ast
-from shell import shellRunner
-from manifestGenerator import writeImports
-import AmbariConfig
-
-
-PUPPET_EXT=".pp"
-
-logger = logging.getLogger()
-
-class RepoInstaller:
-  def __init__(self, parsedJson, path, modulesdir, taskId, config):
-    self.parsedJson = parsedJson
-    self.path = path
-    self.modulesdir = modulesdir
-    self.taskId = taskId
-    self.sh = shellRunner()
-    self.config = config
-    
-  def prepareReposInfo(self):
-    params = {}
-    self.repoInfoList = []
-    if self.parsedJson.has_key('hostLevelParams'):
-      params = self.parsedJson['hostLevelParams']
-    if params.has_key('repo_info'):
-      self.repoInfoList = params['repo_info']
-    logger.info("Repo List Info " + pformat(self.repoInfoList))
-    if (isinstance(self.repoInfoList, basestring)):
-      if (self.repoInfoList is not None and (len(self.repoInfoList) > 0)):
-        self.repoInfoList = ast.literal_eval(self.repoInfoList)
-      else:
-        self.repoInfoList = []
-
-  def generateFiles(self):
-    repoPuppetFiles = []
-    for repo in self.repoInfoList:
-      repoFile = open(self.path + os.sep + repo['repoId'] + '-' + 
-                      str(self.taskId) + PUPPET_EXT, 'w+')
-
-      writeImports(repoFile, self.modulesdir, AmbariConfig.imports)
-      
-      baseUrl = ''
-      mirrorList = ''
-      
-      if repo.has_key('baseUrl'):
-        baseUrl = repo['baseUrl']
-        baseUrl = baseUrl.decode('unicode-escape').encode('utf-8')
-        # Hack to take care of $ signs in the repo url
-        baseUrl = baseUrl.replace('$', '\$')
-
-      if repo.has_key('mirrorsList'):
-        mirrorList = repo['mirrorsList']
-        mirrorList = mirrorList.decode('unicode-escape').encode('utf-8')
-        # Hack to take care of $ signs in the repo url
-        mirrorList = mirrorList.replace('$', '\$')
-
-      repoFile.write('node /default/ {')
-      repoFile.write('class{ "hdp-repos::process_repo" : ' + ' os_type => "' + repo['osType'] +
-      '", repo_id => "' + repo['repoId'] + '", base_url => "' + baseUrl +
-      '", mirror_list => "' + mirrorList +'", repo_name => "' + repo['repoName'] + '" }' )
-      repoFile.write('}')
-      repoFile.close()
-      repoPuppetFiles.append(repoFile.name)
-
-    return repoPuppetFiles
-
-  def generate_repo_manifests(self):
-    self.prepareReposInfo()
-    repoPuppetFiles = self.generateFiles()
-    return repoPuppetFiles
-
-def main():
-  #Test code
-  logging.basicConfig(level=logging.DEBUG)    
-  #test code
-  jsonFile = open('test.json', 'r')
-  jsonStr = jsonFile.read() 
-  parsedJson = json.loads(jsonStr)
-  repoInstaller = RepoInstaller(parsedJson, '/tmp', '/home/centos/ambari_ws/ambari-agent/src/main/puppet/modules',0)
-  repoInstaller.generate_repo_manifests()
-  
-if __name__ == '__main__':
-  main()
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/python/ambari_agent/manifestGenerator.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/manifestGenerator.py b/ambari-agent/src/main/python/ambari_agent/manifestGenerator.py
deleted file mode 100644
index 66ce224..0000000
--- a/ambari-agent/src/main/python/ambari_agent/manifestGenerator.py
+++ /dev/null
@@ -1,396 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import json
-import os.path
-import logging
-from datetime import datetime
-import pprint
-import AmbariConfig
-import hostname
-from ambari_agent import AgentException
-
-HOSTS_LIST_KEY = "all_hosts"
-PING_PORTS_KEY = "all_ping_ports"
-AMBARI_SERVER_HOST = "ambari_server_host"
-
-logger = logging.getLogger()
-
-non_global_configuration_types = ["hdfs-site", "core-site", 
-                             "mapred-queue-acls",
-                             "hadoop-policy", "mapred-site", 
-                             "capacity-scheduler", "hbase-site",
-                             "hbase-policy", "hive-site", "oozie-site", 
-                             "webhcat-site", "hdfs-exclude-file", "hue-site",
-                             "yarn-site"]
-
-# Converts from 1-3,5,6-8 to [1,2,3,5,6,7,8] 
-def convertRangeToList(list):
-  
-  resultList = []
-
-  for i in list:
-      
-    ranges = i.split(',')
-    
-    for r in ranges:
-      rangeBounds = r.split('-')
-      if len(rangeBounds) == 2:
-        
-        if not rangeBounds[0] or not rangeBounds[1]:
-          raise AgentException.AgentException("Broken data in given range, expected - ""m-n"" or ""m"", got : " + str(r))
-            
-        
-        resultList.extend(range(int(rangeBounds[0]), int(rangeBounds[1]) + 1))
-      elif len(rangeBounds) == 1:
-        resultList.append((int(rangeBounds[0])))
-      else:
-        raise AgentException.AgentException("Broken data in given range, expected - ""m-n"" or ""m"", got : " + str(r))
-    
-  return resultList
-
-#Converts from ['1:0-2,4', '42:3,5-7'] to [1,1,1,42,1,42,42,42]
-def convertMappedRangeToList(list):
-    
-  resultDict = {}
-  
-  for i in list:
-    valueToRanges = i.split(":")
-    if len(valueToRanges) <> 2:
-      raise AgentException.AgentException("Broken data in given value to range, expected format - ""value:m-n"", got - " + str(i))
-    value = valueToRanges[0]
-    rangesToken = valueToRanges[1]
-    
-    for r in rangesToken.split(','):
-        
-      rangeIndexes = r.split('-')
-    
-      if len(rangeIndexes) == 2:
-          
-        if not rangeIndexes[0] or not rangeIndexes[1]:
-          raise AgentException.AgentException("Broken data in given value to range, expected format - ""value:m-n"", got - " + str(r))
-
-        start = int(rangeIndexes[0])
-        end = int(rangeIndexes[1])
-        
-        for k in range(start, end + 1):
-          resultDict[k] = int(value)
-        
-        
-      elif len(rangeIndexes) == 1:
-        index = int(rangeIndexes[0])
-        
-        resultDict[index] = int(value)
-       
-
-  resultList = dict(sorted(resultDict.items())).values()
-      
-  return resultList
-
-def decompressClusterHostInfo(clusterHostInfo):
-  info = clusterHostInfo.copy()
-  #Pop info not related to host roles  
-  hostsList = info.pop(HOSTS_LIST_KEY)
-  pingPorts = info.pop(PING_PORTS_KEY)
-  ambariServerHost = info.pop(AMBARI_SERVER_HOST)
-
-  decompressedMap = {}
-
-  for k,v in info.items():
-    # Convert from 1-3,5,6-8 to [1,2,3,5,6,7,8] 
-    indexes = convertRangeToList(v)
-    # Convert from [1,2,3,5,6,7,8] to [host1,host2,host3...]
-    decompressedMap[k] = [hostsList[i] for i in indexes]
-  
-  #Convert from ['1:0-2,4', '42:3,5-7'] to [1,1,1,42,1,42,42,42]
-  pingPorts = convertMappedRangeToList(pingPorts)
-  
-  #Convert all elements to str
-  pingPorts = map(str, pingPorts)
-
-  #Add ping ports to result
-  decompressedMap[PING_PORTS_KEY] = pingPorts
-  #Add hosts list to result
-  decompressedMap[HOSTS_LIST_KEY] = hostsList
-  #Add ambari-server host to result
-  decompressedMap[AMBARI_SERVER_HOST] = ambariServerHost
-  
-  return decompressedMap
-
-
-#read static imports from file and write them to manifest
-def writeImports(outputFile, modulesdir, importsList):
-  logger.info("Modules dir is " + modulesdir)
-  outputFile.write('#' + datetime.now().strftime('%d.%m.%Y %H:%M:%S') + os.linesep)
-  for line in importsList:
-    modulename = line.rstrip()
-    line = "import '" + modulesdir + os.sep + modulename + "'" + os.linesep
-    outputFile.write(line)
-
-
-def generateManifest(parsedJson, fileName, modulesdir, ambariconfig):
-  logger.debug("JSON Received:")
-  logger.debug(json.dumps(parsedJson, sort_keys=True, indent=4))
-#reading json
-  hostname = parsedJson['hostname']
-  clusterHostInfo = {} 
-  if 'clusterHostInfo' in parsedJson:
-    if parsedJson['clusterHostInfo']:
-      clusterHostInfo = decompressClusterHostInfo(parsedJson['clusterHostInfo'])
-  params = {}
-  if 'hostLevelParams' in parsedJson: 
-    if parsedJson['hostLevelParams']:
-      params = parsedJson['hostLevelParams']
-  configurations = {}
-  if 'configurations' in parsedJson:
-    if parsedJson['configurations']:
-      configurations = parsedJson['configurations']
-  nonGlobalConfigurationsKeys = non_global_configuration_types
-  #hostAttributes = parsedJson['hostAttributes']
-  roleParams = {}
-  if 'roleParams' in parsedJson:
-    if parsedJson['roleParams']:
-      roleParams = parsedJson['roleParams']
-  roles = [{'role' : parsedJson['role'],
-            'cmd' : parsedJson['roleCommand'],
-            'roleParams' : roleParams}]
-  errMsg = ''
-  try:
-    #writing manifest
-    manifest = open(fileName, 'w')
-    #Change mode to make site.pp files readable to owner and group only
-    os.chmod(fileName, 0660)
-
-    #Check for Ambari Config and make sure you pick the right imports file
-
-    #writing imports from external static file
-    writeImports(outputFile=manifest, modulesdir=modulesdir, importsList=AmbariConfig.imports)
-
-    #writing hostname
-    writeHostnames(manifest)
-
-    #writing nodes
-    writeNodes(manifest, clusterHostInfo)
-
-    #writing params from map
-    writeParams(manifest, params, modulesdir)
-
-    nonGlobalConfigurations = {}
-    flatConfigurations = {}
-
-    if configurations:
-      for configKey in configurations.iterkeys():
-        if configKey in nonGlobalConfigurationsKeys:
-          nonGlobalConfigurations[configKey] = configurations[configKey]
-        else:
-          flatConfigurations[configKey] = configurations[configKey]
-
-    #writing config maps
-    if (nonGlobalConfigurations):
-      writeNonGlobalConfigurations(manifest, nonGlobalConfigurations)
-    if (flatConfigurations):
-      writeFlatConfigurations(manifest, flatConfigurations)
-
-    #writing host attributes
-    #writeHostAttributes(manifest, hostAttributes)
-
-    #writing task definitions
-    writeTasks(manifest, roles, ambariconfig, clusterHostInfo, hostname)
-
-
-  except TypeError:
-    errMsg = 'Manifest can\'t be generated from the JSON \n' + \
-                    json.dumps(parsedJson, sort_keys=True, indent=4)
-    logger.error(errMsg)
-  finally:
-    manifest.close()
-
-  return errMsg
-
-def writeHostnames(outputFile):
-  fqdn = hostname.hostname()
-  public_fqdn = hostname.public_hostname()
-  outputFile.write('$myhostname' + " = '" + fqdn + "'" + os.linesep)
-  outputFile.write('$public_hostname' + " = '" + public_fqdn + "'" + os.linesep)
-
-  #write nodes
-def writeNodes(outputFile, clusterHostInfo):
-  if clusterHostInfo.has_key('zookeeper_hosts'):
-    clusterHostInfo['zookeeper_hosts'] = sorted(clusterHostInfo['zookeeper_hosts'])
-  
-  for node in clusterHostInfo.iterkeys():
-    outputFile.write('$' + node + '= [')
-    coma = ''
-    
-    for value in clusterHostInfo[node]:
-      outputFile.write(coma + '\'' + value + '\'')
-      coma = ', '
-
-    outputFile.write(']\n')
-
-#write params
-def writeParams(outputFile, params, modulesdir):
-
-  for paramName in params.iterkeys():
-    if paramName == 'repo_info':     
-      continue
-      
-
-    param = params[paramName]
-    if type(param) is dict:
-
-      outputFile.write('$' + paramName + '= {\n')
-
-      coma = ''
-
-      for subParam in param.iterkeys():
-        outputFile.write(coma + '"' + subParam + '" => "' + param[subParam] + '"')
-        coma = ',\n'
-
-      outputFile.write('\n}\n')
-    else:
-      outputFile.write('$' +  paramName + '="' + param + '"\n')
-
-
-#write host attributes
-def writeHostAttributes(outputFile, hostAttributes):
-  outputFile.write('$hostAttributes={\n')
-
-  coma = ''
-  for attribute in hostAttributes.iterkeys():
-    outputFile.write(coma + '"' +  attribute + '" => "{' + hostAttributes[attribute] + '"}')
-    coma = ',\n'
-
-  outputFile.write('}\n')
-
-#write flat configurations
-def writeFlatConfigurations(outputFile, flatConfigs):
-  flatDict = {}
-  logger.debug("Generating global configurations =>\n" + pprint.pformat(flatConfigs))
-  for flatConfigName in flatConfigs.iterkeys():
-    for flatConfig in flatConfigs[flatConfigName].iterkeys():
-      flatDict[flatConfig] = flatConfigs[flatConfigName][flatConfig]
-  for gconfigKey in flatDict.iterkeys():
-    outputFile.write('$' + gconfigKey + " = '" + escape(flatDict[gconfigKey]) + "'" + os.linesep)
-
-#write xml configurations
-def writeNonGlobalConfigurations(outputFile, xmlConfigs):
-  outputFile.write('$configuration =  {\n')
-
-  for configName in xmlConfigs.iterkeys():
-    config = xmlConfigs[configName]
-    logger.debug("Generating " + configName + ", configurations =>\n" + pprint.pformat(config))
-    outputFile.write(configName + '=> {\n')
-    coma = ''
-    for configParam in config.iterkeys():
-      outputFile.write(coma + '"' + configParam + '" => \'' + escape(config[configParam]) + '\'')
-      coma = ',\n'
-
-    outputFile.write('\n},\n')
-    
-  outputFile.write('\n}\n')
-
-#write node tasks
-def writeTasks(outputFile, roles, ambariconfig, clusterHostInfo=None, 
-               hostname="localhost"):
-  #reading dictionaries
-  rolesToClass = AmbariConfig.rolesToClass
-
-  serviceStates = AmbariConfig.serviceStates
-
-  outputFile.write('node /default/ {\n ')
-
-  writeStages(outputFile, len(roles) + 1)
-  stageNum = 1
-
-  outputFile.write('class {\'hdp\': stage => ' + str(stageNum) + '}\n')
-  stageNum = stageNum + 1
-  # Need to hack for zookeeper since we need 
-  zk_hosts = []
-  for role in roles :
-    rolename = role['role']
-    command = role['cmd']
-    taskParams = role['roleParams']
-    if (rolename == 'ZOOKEEPER_SERVER'):
-      zk_hosts = clusterHostInfo['zookeeper_hosts']
-      # Sort the list in lexicographical order
-      taskParams['myid'] = str(sorted(zk_hosts).index(hostname) + 1)
-    
-    taskParamsNormalized = normalizeTaskParams(taskParams)
-    taskParamsPostfix = ''
-    
-    if len(taskParamsNormalized) > 0 :
-      taskParamsPostfix = ', ' + taskParamsNormalized
-    
-    className = rolesToClass[rolename]
-   
-    if command in serviceStates:
-      serviceState = serviceStates[command] 
-      outputFile.write('class {\'' + className + '\':' +
-                        ' stage => ' + str(stageNum) + 
-                     ', service_state => ' + serviceState 
-                     + taskParamsPostfix + '}\n')
-    else:
-      outputFile.write('class {\'' + className + '\':' + 
-                       ' stage => ' + str(stageNum) + 
-                       taskParamsPostfix + '}\n')
-
-    stageNum = stageNum + 1
-  outputFile.write('}\n')
-
-def normalizeTaskParams(taskParams):
-  result = ''
-  coma = ''
-  
-  for paramName in taskParams.iterkeys():
-    result = coma + result + paramName + ' => ' + taskParams[paramName]
-    coma = ','
-    
-  return result
-  
-def writeStages(outputFile, numStages):
-  arrow = ''
-  
-  for i in range(numStages):
-    outputFile.write(arrow + 'stage{' + str(i + 1) + ' :}')
-    arrow = ' -> '
-  
-  outputFile.write('\n')
-
-#Escape special characters
-def escape(param):
-    return param.replace('\\', '\\\\').replace('\'', '\\\'')
-  
-def main():
-  logging.basicConfig(level=logging.DEBUG)    
-  #test code
-  jsonFile = open('test.json', 'r')
-  jsonStr = jsonFile.read() 
-  modulesdir = os.path.abspath(os.getcwd() + ".." + os.sep + ".." + 
-                               os.sep + ".." + os.sep + "puppet" + 
-                               os.sep + "modules" + os.sep)
-  inputJsonStr = jsonStr
-  parsedJson = json.loads(inputJsonStr)
-  generateManifest(parsedJson, 'site.pp', modulesdir)
-  
-
-if __name__ == '__main__':
-  main()
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/python/ambari_agent/site.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/site.pp b/ambari-agent/src/main/python/ambari_agent/site.pp
deleted file mode 100644
index a5badea..0000000
--- a/ambari-agent/src/main/python/ambari_agent/site.pp
+++ /dev/null
@@ -1,92 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-import '/media/sf_/home/mahadev/workspace/ambari-workspace/ambari-git/ambari-agent/src/main/puppet/modules/hdp/manifests/*.pp'
-import '/media/sf_/home/mahadev/workspace/ambari-workspace/ambari-git/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/*.pp'
-import '/media/sf_/home/mahadev/workspace/ambari-workspace/ambari-git/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/*.pp'
-import '/media/sf_/home/mahadev/workspace/ambari-workspace/ambari-git/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/*.pp'
-import '/media/sf_/home/mahadev/workspace/ambari-workspace/ambari-git/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/*.pp'
-import '/media/sf_/home/mahadev/workspace/ambari-workspace/ambari-git/ambari-agent/src/main/puppet/modules/hdp-pig/manifests/*.pp'
-import '/media/sf_/home/mahadev/workspace/ambari-workspace/ambari-git/ambari-agent/src/main/puppet/modules/hdp-sqoop/manifests/*.pp'
-import '/media/sf_/home/mahadev/workspace/ambari-workspace/ambari-git/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/*.pp'
-import '/media/sf_/home/mahadev/workspace/ambari-workspace/ambari-git/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/*.pp'
-import '/media/sf_/home/mahadev/workspace/ambari-workspace/ambari-git/ambari-agent/src/main/puppet/modules/hdp-hcat/manifests/*.pp'
-import '/media/sf_/home/mahadev/workspace/ambari-workspace/ambari-git/ambari-agent/src/main/puppet/modules/hdp-mysql/manifests/*.pp'
-import '/media/sf_/home/mahadev/workspace/ambari-workspace/ambari-git/ambari-agent/src/main/puppet/modules/hdp-monitor-webserver/manifests/*.pp'
-import '/media/sf_/home/mahadev/workspace/ambari-workspace/ambari-git/ambari-agent/src/main/puppet/modules/hdp-repos/manifests/*.pp'
-$NAMENODE= ['h2.hortonworks.com']
-$DATANODE= ['h1.hortonworks.com', 'h2.hortonworks.com']
-$jdk_location="http://hdp1/downloads"
-$jdk_bins= {
-"32" => "jdk-6u31-linux-x64.bin",
-"64" => "jdk-6u31-linux-x64.bin"
-}
-$hdfs_user="hdfs"
-$java32_home="/usr/jdk64/jdk1.6.0_31"
-$java64_home="/usr/jdk64/jdk1.6.0_31"
-$configuration =  {
-capacity-scheduler=> {
-"mapred.capacity-scheduler.queue.default.capacity" => "100",
-"mapred.capacity-scheduler.queue.default.supports-priorit" => "false"
-},
-oozie-site=> {
-"oozie.service.ActionService.executor.ext.classes" => "org.apache.oozie.action.hadoop.HiveActionExecutor, org.apache.oozie.action.hadoop.SqoopActionExecutor,org.apache.oozie.action.email.EmailActionExecutor,"
-},
-mapred-site=> {
-"mapred.queue.names" => "hive,pig,default",
-"mapred.jobtracker.taskScheduler" => "org.apache.hadoop.mapred.CapacityTaskScheduler"
-},
-core-site=> {
-"fs.default.name" => "hrt8n36.cc1.ygridcore.net"
-},
-hbase-policy=> {
-"security.client.protocol.acl" => "*"
-},
-hbase-site=> {
-"hbase.cluster.distributed" => "true"
-},
-hdfs-site=> {
-"dfs.block.size" => "256000000",
-"dfs.replication" => "1"
-},
-hadoop-policy=> {
-"security.client.datanode.protocol.acl" => "*",
-"security.client.protocol.acl" => "*"
-},
-mapred-queue-acls=> {
-"mapred.queue.default.acl-submit-job" => "*",
-"mapred.queue.default.acl-administer-jobs" => "*"
-},
-templeton-site=> {
-"templeton.override.enabled" => "true"
-},
-hive-site=> {
-"hive.exec.scratchdir" => "/tmp"
-},
-
-}
-$security_enabled = "true"
-$task_bin_exe = "ls"
-$hadoop_piddirprefix = "/tmp"
-$ganglia_server_host = "localhost"
-node /default/ {
- stage{1 :} -> stage{2 :}
-class {'hdp': stage => 1}
-class {'hdp-hadoop::namenode': stage => 2, service_state => installed_and_configured}
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/test/python/ambari_agent/TestActionQueue.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestActionQueue.py b/ambari-agent/src/test/python/ambari_agent/TestActionQueue.py
index b6b79b1..d020ad9 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestActionQueue.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestActionQueue.py
@@ -21,7 +21,6 @@ from Queue import Queue
 
 from unittest import TestCase
 from ambari_agent.LiveStatus import LiveStatus
-from ambari_agent.PuppetExecutor import PuppetExecutor
 from ambari_agent.ActionQueue import ActionQueue
 from ambari_agent.AmbariConfig import AmbariConfig
 import os, errno, time, pprint, tempfile, threading
@@ -243,23 +242,23 @@ class TestActionQueue(TestCase):
     dummy_controller = MagicMock()
     actionQueue = ActionQueue(config, dummy_controller)
     unfreeze_flag = threading.Event()
-    puppet_execution_result_dict = {
+    python_execution_result_dict = {
       'stdout': 'out',
       'stderr': 'stderr',
       'structuredOut' : ''
       }
     def side_effect(command, tmpoutfile, tmperrfile):
       unfreeze_flag.wait()
-      return puppet_execution_result_dict
+      return python_execution_result_dict
     def patched_aq_execute_command(command):
       # We have to perform patching for separate thread in the same thread
-      with patch.object(PuppetExecutor, "runCommand") as runCommand_mock:
+      with patch.object(CustomServiceOrchestrator, "runCommand") as runCommand_mock:
           runCommand_mock.side_effect = side_effect
           actionQueue.execute_command(command)
     ### Test install/start/stop command ###
     ## Test successful execution with configuration tags
-    puppet_execution_result_dict['status'] = 'COMPLETE'
-    puppet_execution_result_dict['exitcode'] = 0
+    python_execution_result_dict['status'] = 'COMPLETE'
+    python_execution_result_dict['exitcode'] = 0
     # We call method in a separate thread
     execution_thread = Thread(target = patched_aq_execute_command ,
                               args = (self.datanode_install_command, ))
@@ -315,8 +314,8 @@ class TestActionQueue(TestCase):
     self.assertEqual(len(report['reports']), 0)
 
     ## Test failed execution
-    puppet_execution_result_dict['status'] = 'FAILED'
-    puppet_execution_result_dict['exitcode'] = 13
+    python_execution_result_dict['status'] = 'FAILED'
+    python_execution_result_dict['exitcode'] = 13
     # We call method in a separate thread
     execution_thread = Thread(target = patched_aq_execute_command ,
                               args = (self.datanode_install_command, ))
@@ -349,8 +348,8 @@ class TestActionQueue(TestCase):
     self.assertEqual(len(report['reports']), 0)
 
     ### Test upgrade command ###
-    puppet_execution_result_dict['status'] = 'COMPLETE'
-    puppet_execution_result_dict['exitcode'] = 0
+    python_execution_result_dict['status'] = 'COMPLETE'
+    python_execution_result_dict['exitcode'] = 0
     execution_thread = Thread(target = patched_aq_execute_command ,
                               args = (self.datanode_upgrade_command, ))
     execution_thread.start()
@@ -384,12 +383,9 @@ class TestActionQueue(TestCase):
   @patch.object(CustomServiceOrchestrator, "runCommand")
   @patch("CommandStatusDict.CommandStatusDict")
   @patch.object(ActionQueue, "status_update_callback")
-  @patch.object(ActionQueue, "determine_command_format_version")
-  def test_store_configuration_tags(self, determine_command_format_version_mock,
-                                    status_update_callback_mock,
+  def test_store_configuration_tags(self, status_update_callback_mock,
                                     command_status_dict_mock,
                                     cso_runCommand_mock):
-    determine_command_format_version_mock.return_value = 2
     custom_service_orchestrator_execution_result_dict = {
       'stdout': 'out',
       'stderr': 'stderr',
@@ -426,7 +422,6 @@ class TestActionQueue(TestCase):
     self.assertEqual(expected, report['reports'][0])
 
   @patch.object(ActionQueue, "status_update_callback")
-  @patch.object(ActionQueue, "determine_command_format_version")
   @patch.object(StackVersionsFileHandler, "read_stack_version")
   @patch.object(CustomServiceOrchestrator, "requestComponentStatus")
   @patch.object(ActionQueue, "execute_command")
@@ -435,93 +430,18 @@ class TestActionQueue(TestCase):
   def test_execute_status_command(self, CustomServiceOrchestrator_mock,
                                   build_mock, execute_command_mock,
                                   requestComponentStatus_mock, read_stack_version_mock,
-                                  determine_command_format_version_mock,
                                   status_update_callback):
     CustomServiceOrchestrator_mock.return_value = None
     dummy_controller = MagicMock()
     actionQueue = ActionQueue(AmbariConfig().getConfig(), dummy_controller)
 
     build_mock.return_value = "dummy report"
-    # Check execution ov V1 status command
-    determine_command_format_version_mock.return_value = ActionQueue.COMMAND_FORMAT_V1
-    actionQueue.execute_status_command(self.status_command)
-    report = actionQueue.result()
-    expected = 'dummy report'
-    self.assertEqual(len(report['componentStatus']), 1)
-    self.assertEqual(report['componentStatus'][0], expected)
-    self.assertFalse(requestComponentStatus_mock.called)
 
-    # Check execution ov V2 status command
     requestComponentStatus_mock.reset_mock()
     requestComponentStatus_mock.return_value = {'exitcode': 0}
-    determine_command_format_version_mock.return_value = ActionQueue.COMMAND_FORMAT_V2
     actionQueue.execute_status_command(self.status_command)
     report = actionQueue.result()
     expected = 'dummy report'
     self.assertEqual(len(report['componentStatus']), 1)
     self.assertEqual(report['componentStatus'][0], expected)
     self.assertTrue(requestComponentStatus_mock.called)
-
-
-  @patch.object(CustomServiceOrchestrator, "__init__")
-  def test_determine_command_format_version(self,
-                                            CustomServiceOrchestrator_mock):
-    CustomServiceOrchestrator_mock.return_value = None
-    v1_command = {
-      'commandParams': {
-        'schema_version': '1.0'
-      }
-    }
-    v2_command = {
-      'commandParams': {
-        'schema_version': '2.0'
-      }
-    }
-    current_command = {
-      # Absent 'commandParams' section
-    }
-    dummy_controller = MagicMock()
-    actionQueue = ActionQueue(AmbariConfig().getConfig(), dummy_controller)
-    self.assertEqual(actionQueue.determine_command_format_version(v1_command),
-                     ActionQueue.COMMAND_FORMAT_V1)
-    self.assertEqual(actionQueue.determine_command_format_version(v2_command),
-                     ActionQueue.COMMAND_FORMAT_V2)
-    self.assertEqual(actionQueue.determine_command_format_version(current_command),
-                     ActionQueue.COMMAND_FORMAT_V1)
-
-
-  @patch.object(ActionQueue, "determine_command_format_version")
-  @patch("__builtin__.open")
-  @patch.object(PuppetExecutor, "runCommand")
-  @patch.object(CustomServiceOrchestrator, "runCommand")
-  @patch.object(ActionQueue, "status_update_callback")
-  @patch.object(CustomServiceOrchestrator, "__init__")
-  def test_command_execution_depending_on_command_format(self,
-                                CustomServiceOrchestrator_mock,
-                                status_update_callback_mock,
-                                custom_ex_runCommand_mock,
-                                puppet_runCommand_mock, open_mock,
-                                determine_command_format_version_mock):
-    CustomServiceOrchestrator_mock.return_value = None
-    dummy_controller = MagicMock()
-    actionQueue = ActionQueue(AmbariConfig().getConfig(), dummy_controller)
-    ret = {
-      'stdout' : '',
-      'stderr' : '',
-      'exitcode': 1,
-      }
-    puppet_runCommand_mock.return_value = ret
-    determine_command_format_version_mock.return_value = \
-                                  ActionQueue.COMMAND_FORMAT_V1
-    actionQueue.execute_command(self.datanode_install_command)
-    self.assertTrue(puppet_runCommand_mock.called)
-    self.assertFalse(custom_ex_runCommand_mock.called)
-
-    puppet_runCommand_mock.reset_mock()
-
-    custom_ex_runCommand_mock.return_value = ret
-    determine_command_format_version_mock.return_value = \
-      ActionQueue.COMMAND_FORMAT_V2
-    actionQueue.execute_command(self.datanode_install_command)
-    self.assertFalse(puppet_runCommand_mock.called)
-    self.assertTrue(custom_ex_runCommand_mock.called)

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/test/python/ambari_agent/TestCustomServiceOrchestrator.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestCustomServiceOrchestrator.py b/ambari-agent/src/test/python/ambari_agent/TestCustomServiceOrchestrator.py
index d972928..ed74232 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestCustomServiceOrchestrator.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestCustomServiceOrchestrator.py
@@ -37,7 +37,6 @@ import sys
 from AgentException import AgentException
 from FileCache import FileCache
 from LiveStatus import LiveStatus
-import manifestGenerator
 
 
 class TestCustomServiceOrchestrator(TestCase):
@@ -67,7 +66,7 @@ class TestCustomServiceOrchestrator(TestCase):
     self.assertTrue(dummy_controller.registration_listeners.append.called)
 
 
-  @patch.object(manifestGenerator, 'decompressClusterHostInfo')
+  @patch.object(CustomServiceOrchestrator, 'decompressClusterHostInfo')
   @patch("hostname.public_hostname")
   @patch("os.path.isfile")
   @patch("os.unlink")
@@ -217,7 +216,7 @@ class TestCustomServiceOrchestrator(TestCase):
     run_file_mock.reset_mock()
 
     # unknown script type case
-    command['commandParams']['script_type'] = "PUPPET"
+    command['commandParams']['script_type'] = "SOME_TYPE"
     ret = orchestrator.runCommand(command, "out.txt", "err.txt")
     self.assertEqual(ret['exitcode'], 1)
     self.assertFalse(run_file_mock.called)

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/test/python/ambari_agent/TestGrep.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestGrep.py b/ambari-agent/src/test/python/ambari_agent/TestGrep.py
index f379960..206e6e8 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestGrep.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestGrep.py
@@ -95,13 +95,6 @@ debug: Processing report from ambari-dmi with processor Puppet::Reports::Store
     desired = ''
     self.assertEquals(fragment, desired, 'Grep tail function contains bug in index arithmetics')
 
-  def test_filterMarkup(self):
-    string = """notice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Process_pkg[hadoop 64]/Package[hadoop-libhdfs]/ensure: created"""
-    desired="""notice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Process_pkg[hadoop 64]/Package[hadoop-libhdfs]/ensure: created"""
-    filtered = self.grep.filterMarkup(string)
-    #sys.stderr.write(filtered)
-    self.assertEquals(filtered, desired)
-
   def tearDown(self):
     pass
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/test/python/ambari_agent/TestManifestGenerator.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestManifestGenerator.py b/ambari-agent/src/test/python/ambari_agent/TestManifestGenerator.py
deleted file mode 100644
index b3163e2..0000000
--- a/ambari-agent/src/test/python/ambari_agent/TestManifestGenerator.py
+++ /dev/null
@@ -1,230 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-import os, sys, StringIO
-from ambari_agent.AgentException import AgentException
-
-from unittest import TestCase
-from ambari_agent import manifestGenerator
-import ambari_agent.AmbariConfig
-import tempfile
-import json
-import shutil
-from ambari_agent.AmbariConfig import AmbariConfig
-from mock.mock import patch, MagicMock, call
-
-
-class TestManifestGenerator(TestCase):
-
-  def setUp(self):
-    # disable stdout
-    out = StringIO.StringIO()
-    sys.stdout = out
-
-    self.dir = tempfile.mkdtemp()
-    self.config = AmbariConfig()
-    jsonCommand = file('../../main/python/ambari_agent/test.json').read()
-    self.parsedJson = json.loads(jsonCommand)
-
-
-  def tearDown(self):
-    shutil.rmtree(self.dir)
-
-    # enable stdout
-    sys.stdout = sys.__stdout__
-
-
-  def testWriteImports(self):
-    tmpFileName = tempfile.mkstemp(dir=self.dir, text=True)[1]
-    print tmpFileName
-    tmpFile = file(tmpFileName, 'r+')
-
-    manifestGenerator.writeImports(tmpFile, '../../main/puppet/modules', self.config.getImports())
-    tmpFile.seek(0)
-    print tmpFile.read()
-    tmpFile.close()
-
-  def testEscape(self):
-    shouldBe = '\\\'\\\\'
-    result = manifestGenerator.escape('\'\\')
-    self.assertEqual(result, shouldBe)
-
-
-  def test_writeNodes(self):
-    tmpFileName = tempfile.mkstemp(dir=self.dir, text=True)[1]
-    tmpFile = file(tmpFileName, 'r+')
-
-    clusterHostInfo = self.parsedJson['clusterHostInfo']
-    clusterHostInfo['zookeeper_hosts'] = ["h1.hortonworks.com", "h2.hortonworks.com"]
-    manifestGenerator.writeNodes(tmpFile, clusterHostInfo)
-    tmpFile.seek(0)
-    print tmpFile.read()
-    tmpFile.close()
-    os.remove(tmpFileName)
-
-  def test_writeNodes_failed(self):
-    tmpFileName = tempfile.mkstemp(dir=self.dir, text=True)[1]
-    tmpFile = file(tmpFileName, 'r+')
-
-    clusterHostInfo = self.parsedJson['clusterHostInfo']
-    clusterHostInfo.update({u'ZOOKEEPER':[None]})
-    clusterHostInfo['zookeeper_hosts'] = ["h1.hortonworks.com", "h2.hortonworks.com"]
-    self.assertRaises(TypeError, manifestGenerator.writeNodes, tmpFile, clusterHostInfo)
-    tmpFile.seek(0)
-    print tmpFile.read()
-    tmpFile.close()
-    os.remove(tmpFileName)
-
-  def test_writeHostAttributes(self):
-    tmpFileName = tempfile.mkstemp(dir=self.dir, text=True)[1]
-    tmpFile = file(tmpFileName, 'r+')
-
-    hostAttributes = {'HostAttr1' : '1', 'HostAttr2' : '2'}
-    manifestGenerator.writeHostAttributes(tmpFile, hostAttributes)
-    tmpFile.seek(0)
-    print tmpFile.read()
-    tmpFile.close()
-    os.remove(tmpFileName)
-
-
-  def test_writeTasks(self):
-    tmpFileName = tempfile.mkstemp(dir=self.dir, text=True)[1]
-    tmpFile = file(tmpFileName, 'r+')
-    roles = [{'role' : 'ZOOKEEPER_SERVER',
-              'cmd' : 'NONE',
-              'roleParams' : {'someRoleParams': '-x'}}]
-    clusterHostInfo = self.parsedJson['clusterHostInfo']
-    clusterHostInfo['zookeeper_hosts'] = ["h1.hortonworks.com", "h2.hortonworks.com"]
-    manifestGenerator.writeTasks(tmpFile, roles, self.config, clusterHostInfo, "h1.hortonworks.com")
-    tmpFile.seek(0)
-    print tmpFile.read()
-    tmpFile.close()
-    os.remove(tmpFileName)
-    
-  def testConvertRangeToList(self):
-    
-    rangesList = ["1-3", "4", "6", "7-9"]
-    list = manifestGenerator.convertRangeToList(rangesList)
-    self.assertEqual(sorted(list), sorted([1,2,3,4,6,7,8,9]))
-    
-    rangesList = ["5", "4"]
-    list = manifestGenerator.convertRangeToList(rangesList)
-    self.assertEqual(list, [5,4])
-
-    exceptionWasTrown = False
-    try:
-      rangesList = ["0", "-2"]
-      list = manifestGenerator.convertRangeToList(rangesList)
-    except AgentException, err:
-      #Expected
-      exceptionWasTrown = True
-      
-    self.assertTrue(exceptionWasTrown)
-    
-    exceptionWasTrown = False
-    try:
-      rangesList = ["0", "-"]
-      list = manifestGenerator.convertRangeToList(rangesList)
-    except AgentException, err:
-      #Expected
-      exceptionWasTrown = True
-    self.assertTrue(exceptionWasTrown)
-    
-    exceptionWasTrown = False
-    try:
-      rangesList = ["0", "2-"]
-      list = manifestGenerator.convertRangeToList(rangesList)
-    except AgentException, err:
-      #Expected
-      exceptionWasTrown = True
-    self.assertTrue(exceptionWasTrown)
-    
-  def testConvertMappedRangeToList(self):
-    mappedRangedList = ["1:0-2,5", "2:3,4"]
-    list = manifestGenerator.convertMappedRangeToList(mappedRangedList)
-    self.assertEqual(list, [1,1,1,2,2,1])
-    
-    mappedRangedList = ["7:0"]
-    list = manifestGenerator.convertMappedRangeToList(mappedRangedList)
-    self.assertEqual(list, [7])
-    
-    exceptionWasTrown = False
-    mappedRangedList = ["7:0-"]
-    try:
-      list = manifestGenerator.convertMappedRangeToList(mappedRangedList)
-    except AgentException, err:
-      #Expected
-      exceptionWasTrown = True
-    self.assertTrue(exceptionWasTrown)
-    
-    
-    exceptionWasTrown = False
-    mappedRangedList = ["7:-"]
-    try:
-      list = manifestGenerator.convertMappedRangeToList(mappedRangedList)
-    except AgentException, err:
-      #Expected
-      exceptionWasTrown = True
-    self.assertTrue(exceptionWasTrown)
-    
-    exceptionWasTrown = False
-    mappedRangedList = ["7:-1"]
-    try:
-      list = manifestGenerator.convertMappedRangeToList(mappedRangedList)
-    except AgentException, err:
-      #Expected
-      exceptionWasTrown = True
-    self.assertTrue(exceptionWasTrown)
-    
-  def testDecompressClusterHostInfo(self):
-
-    all_hosts_key = "all_hosts"
-    all_ping_ports_key = "all_ping_ports"
-    ambari_server_host_key = "ambari_server_host"
-    info = { "jtnode_host"        : ["5"],
-             "hbase_master_hosts" : ["5"],
-             all_hosts_key: ["h8", "h9", "h5", "h4", "h7", "h6", "h1", "h3", "h2", "h10"],
-             "namenode_host"      : ["6"],
-             "mapred_tt_hosts"    : ["0", "7-9", "2","3", "5"],
-             "slave_hosts"        : ["3", "0", "1", "5-9"],
-             "snamenode_host"     : ["8"],
-             all_ping_ports_key: ["8670:1,5-8", "8673:9", "8672:0,4", "8671:2,3"],
-             "hbase_rs_hosts"     : ["3", "1", "5", "8", "9"],
-             ambari_server_host_key: ["h0"]
-    }
-
-    decompressedInfo = manifestGenerator.decompressClusterHostInfo(info)
-
-    self.assertTrue(decompressedInfo.has_key(all_hosts_key))
-
-    self.assertEquals(info.pop(all_hosts_key), decompressedInfo.get(all_hosts_key))
-
-    self.assertEquals(['8672', '8670', '8671', '8671', '8672',
-                       '8670', '8670', '8670', '8670', '8673'],
-                      decompressedInfo.get(all_ping_ports_key))
-
-    self.assertEquals(["h0"], decompressedInfo.get(ambari_server_host_key))
-
-    for k,v in decompressedInfo.items():
-      if k is all_ping_ports_key:
-        continue # Skip checking this list
-      # Check that list contains only host names
-      non_converted = [x for x in v if not x.startswith("h")]
-      self.assertEquals(0, len(non_converted))
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/test/python/ambari_agent/TestPuppetExecutor.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestPuppetExecutor.py b/ambari-agent/src/test/python/ambari_agent/TestPuppetExecutor.py
deleted file mode 100644
index ee7f9d8..0000000
--- a/ambari-agent/src/test/python/ambari_agent/TestPuppetExecutor.py
+++ /dev/null
@@ -1,275 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-from unittest import TestCase
-from PuppetExecutor import PuppetExecutor
-from RepoInstaller import RepoInstaller
-from Grep import Grep
-from pprint import pformat
-import socket, threading, tempfile
-import os, time
-import sys
-import json
-from AmbariConfig import AmbariConfig
-from mock.mock import patch, MagicMock, call
-from threading import Thread
-from shell import shellRunner
-import manifestGenerator
-
-class TestPuppetExecutor(TestCase):
-
-
-  def test_build(self):
-    puppetexecutor = PuppetExecutor("/tmp", "/x", "/y", "/z", AmbariConfig().getConfig())
-    command = puppetexecutor.puppetCommand("site.pp")
-    self.assertEquals("puppet", command[0], "puppet binary wrong")
-    self.assertEquals("apply", command[1], "local apply called")
-    self.assertEquals("--confdir=/tmp", command[2],"conf dir tmp")
-    self.assertEquals("--detailed-exitcodes", command[3], "make sure output \
-    correct")
-    
-  @patch.object(shellRunner,'run')
-  def test_isJavaAvailable(self, cmdrun_mock):
-    puppetInstance = PuppetExecutor("/tmp", "/x", "/y", '/tmpdir',
-                                    AmbariConfig().getConfig())
-    command = {'configurations':{'global':{'java64_home':'/usr/jdk/jdk123'}}}
-    
-    cmdrun_mock.return_value = {'exitCode': 1, 'output': 'Command not found', 'error': ''}
-    self.assertEquals(puppetInstance.isJavaAvailable(command), False)
-    
-    cmdrun_mock.return_value = {'exitCode': 0, 'output': 'OK', 'error': ''}
-    self.assertEquals(puppetInstance.isJavaAvailable(command), True)
-
-  @patch.object(manifestGenerator, 'generateManifest')
-  @patch.object(PuppetExecutor, 'isJavaAvailable')
-  @patch.object(PuppetExecutor, 'runPuppetFile')
-  def test_run_command(self, runPuppetFileMock, isJavaAvailableMock, generateManifestMock):
-    tmpdir = tempfile.gettempdir()
-    puppetInstance = PuppetExecutor("/tmp", "/x", "/y", tmpdir, AmbariConfig().getConfig())
-    jsonFile = open('../../main/python/ambari_agent/test.json', 'r')
-    jsonStr = jsonFile.read()
-    parsedJson = json.loads(jsonStr)
-    parsedJson["taskId"] = 1
-    def side_effect1(puppetFile, result, puppetEnv, tmpoutfile, tmperrfile, timeout):
-        result["exitcode"] = 0
-    runPuppetFileMock.side_effect = side_effect1
-    generateManifestMock.return_value = ''
-    puppetInstance.reposInstalled = False
-    isJavaAvailableMock.return_value = True
-    res = puppetInstance.runCommand(parsedJson, tmpdir + '/out.txt', tmpdir + '/err.txt')
-    self.assertEquals(res["exitcode"], 0)
-    self.assertTrue(puppetInstance.reposInstalled)
-
-    def side_effect2(puppetFile, result, puppetEnv, tmpoutfile, tmperrfile, timeout):
-        result["exitcode"] = 999
-    runPuppetFileMock.side_effect = side_effect2
-    puppetInstance.reposInstalled = False
-    isJavaAvailableMock.return_value = True
-    res = puppetInstance.runCommand(parsedJson, tmpdir + '/out.txt', tmpdir + '/err.txt')
-    self.assertEquals(res["exitcode"], 999)
-    self.assertFalse(puppetInstance.reposInstalled)
-
-    generateManifestMock.return_value = 'error during manifest generation'
-    res = puppetInstance.runCommand(parsedJson, tmpdir + '/out.txt', tmpdir + '/err.txt')
-    self.assertTrue(generateManifestMock.called)
-    self.assertEquals(res["exitcode"], 1)
-    generateManifestMock.return_value = ''
-
-    def side_effect2(puppetFile, result, puppetEnv, tmpoutfile, tmperrfile):
-        result["exitcode"] = 0
-    runPuppetFileMock.side_effect = side_effect2
-    puppetInstance.reposInstalled = False
-    isJavaAvailableMock.return_value = False
-    parsedJson['roleCommand'] = "START"
-    parsedJson['hostLevelParams'] = {'java_home':'/usr/jdk/jdk123'}
-    res = puppetInstance.runCommand(parsedJson, tmpdir + '/out.txt', tmpdir + '/err.txt')
-    
-    JAVANOTVALID_MSG = "Cannot access JDK! Make sure you have permission to execute {0}/bin/java"
-    errMsg = JAVANOTVALID_MSG.format('/usr/jdk/jdk123')
-    self.assertEquals(res["exitcode"], 1)
-    self.assertEquals(res["stderr"], errMsg)
-    self.assertFalse(puppetInstance.reposInstalled)
-
-    parsedJson['hostLevelParams'] = {'random':{'name1':'value2'}}
-    res = puppetInstance.runCommand(parsedJson, tmpdir + '/out.txt', tmpdir + '/err.txt')
-    self.assertEquals(res["exitcode"], 1)
-    self.assertEquals(res["stderr"], "Cannot access JDK! Make sure java_home is specified in hostLevelParams")
-
-  @patch("os.path.exists")
-  def test_configure_environ(self, osPathExistsMock):
-    config = AmbariConfig().getConfig()
-    tmpdir = tempfile.gettempdir()
-    puppetInstance = PuppetExecutor("/tmp", "/x", "/y", tmpdir, config)
-    environ = puppetInstance.configureEnviron({})
-    self.assertEquals(environ, {})
-
-    config.set('puppet','ruby_home',"test/ruby_home")
-    puppetInstance = PuppetExecutor("/tmp", "/x", "/y", tmpdir, config)
-    osPathExistsMock.return_value = True
-    environ = puppetInstance.configureEnviron({"PATH" : "test_path"})
-    self.assertEquals(environ["PATH"], "test/ruby_home/bin:test_path")
-    self.assertEquals(environ["MY_RUBY_HOME"], "test/ruby_home")
-
-  def test_condense_bad2(self):
-    puppetexecutor = PuppetExecutor("/tmp", "/x", "/y", "/z", AmbariConfig().getConfig())
-    grep = Grep()
-    puppetexecutor.grep = grep
-    grep.ERROR_LAST_LINES_BEFORE = 2
-    grep.ERROR_LAST_LINES_AFTER = 3
-    string_err = open('ambari_agent' + os.sep + 'dummy_puppet_output_error2.txt', 'r').read().replace("\n", os.linesep)
-    result = puppetexecutor.condenseOutput(string_err, '', 1)
-    stripped_string = string_err.strip()
-    lines = stripped_string.splitlines(True)
-    d = lines[1:6]
-    d = grep.cleanByTemplate("".join(d).strip(), "warning").splitlines(True)
-    result_check = True
-    for l in d:
-      result_check &= grep.filterMarkup(l) in result
-    self.assertEquals(result_check, True, "Failed to condence fail log")
-    self.assertEquals(('warning' in result.lower()), False, "Failed to condence fail log")
-    self.assertEquals(len(result.splitlines(True)), 5, "Failed to condence fail log")
-
-  def test_condense_bad3(self):
-    puppetexecutor = PuppetExecutor("/tmp", "/x", "/y", "/z", AmbariConfig().getConfig())
-    grep = Grep()
-    puppetexecutor.grep = grep
-    string_err = open('ambari_agent' + os.sep + 'dummy_puppet_output_error3.txt', 'r').read().replace("\n", os.linesep)
-    result = puppetexecutor.condenseOutput(string_err, '', 1)
-    stripped_string = string_err.strip()
-    lines = stripped_string.splitlines(True)
-    #sys.stderr.write(result)
-    d = lines[0:31]
-    d = grep.cleanByTemplate("".join(d).strip(), "warning").splitlines(True)
-    result_check = True
-    for l in d:
-      result_check &= grep.filterMarkup(l) in result
-    self.assertEquals(result_check, True, "Failed to condence fail log")
-    self.assertEquals(('warning' in result.lower()), False, "Failed to condence fail log")
-    self.assertEquals(len(result.splitlines(True)), 19, "Failed to condence fail log")
-
-  def test_condense_good(self):
-    puppetexecutor = PuppetExecutor("/tmp", "/x", "/y", "/z", AmbariConfig().getConfig())
-    grep = Grep()
-    puppetexecutor.grep = grep
-    grep.OUTPUT_LAST_LINES = 2
-    string_good = open('ambari_agent' + os.sep + 'dummy_puppet_output_good.txt', 'r').read().replace("\n", os.linesep)
-    result = puppetexecutor.condenseOutput(string_good, PuppetExecutor.NO_ERROR, 0)
-    stripped_string = string_good.strip()
-    lines = stripped_string.splitlines(True)
-    result_check = lines[45].strip() in result and lines[46].strip() in result
-    self.assertEquals(result_check, True, "Failed to condence output log")
-    self.assertEquals(len(result.splitlines(True)), 2, "Failed to condence output log")
-
-  @patch("shell.kill_process_with_children")
-  def test_watchdog_1(self, kill_process_with_children_mock):
-    """
-    Tests whether watchdog works
-    """
-    subproc_mock = self.Subprocess_mockup()
-    config = AmbariConfig().getConfig()
-    executor_mock = self.PuppetExecutor_mock("/home/centos/ambari_repo_info/ambari-agent/src/main/puppet/",
-      "/usr/",
-      "/root/workspace/puppet-install/facter-1.6.10/",
-      "/tmp", config, subproc_mock)
-    _, tmpoutfile = tempfile.mkstemp()
-    _, tmperrfile = tempfile.mkstemp()
-    result = {  }
-    puppetEnv = { "RUBYLIB" : ""}
-    kill_process_with_children_mock.side_effect = lambda pid : subproc_mock.terminate()
-    subproc_mock.returncode = None
-    timeout = "0.1"
-    thread = Thread(target =  executor_mock.runPuppetFile, args = ("fake_puppetFile",
-        result, puppetEnv, tmpoutfile, tmperrfile, timeout))
-    thread.start()
-    time.sleep(0.1)
-    subproc_mock.finished_event.wait()
-    self.assertEquals(subproc_mock.was_terminated, True, "Subprocess should be terminated due to timeout")
-
-
-  def test_watchdog_2(self):
-    """
-    Tries to catch false positive watchdog invocations
-    """
-    subproc_mock = self.Subprocess_mockup()
-    config = AmbariConfig().getConfig()
-    executor_mock = self.PuppetExecutor_mock("/home/centos/ambari_repo_info/ambari-agent/src/main/puppet/",
-    "/usr/",
-    "/root/workspace/puppet-install/facter-1.6.10/",
-    "/tmp", config, subproc_mock)
-    _, tmpoutfile = tempfile.mkstemp()
-    _, tmperrfile = tempfile.mkstemp()
-    result = {  }
-    puppetEnv = { "RUBYLIB" : ""}
-    subproc_mock.returncode = 0
-    timeout = "5"
-    thread = Thread(target =  executor_mock.runPuppetFile, args = ("fake_puppetFile",
-                            result, puppetEnv, tmpoutfile, tmperrfile, timeout))
-    thread.start()
-    time.sleep(0.1)
-    subproc_mock.should_finish_event.set()
-    subproc_mock.finished_event.wait()
-    self.assertEquals(subproc_mock.was_terminated, False, "Subprocess should not be terminated before timeout")
-    self.assertEquals(subproc_mock.returncode, 0, "Subprocess should not be terminated before timeout")
-
-
-  class  PuppetExecutor_mock(PuppetExecutor):
-
-    def __init__(self, puppetModule, puppetInstall, facterInstall, tmpDir, config, subprocess_mockup):
-      self.subprocess_mockup = subprocess_mockup
-      PuppetExecutor.__init__(self, puppetModule, puppetInstall, facterInstall, tmpDir, config)
-      pass
-
-    def lauch_puppet_subprocess(self, puppetcommand, tmpout, tmperr, puppetEnv):
-      self.subprocess_mockup.tmpout = tmpout
-      self.subprocess_mockup.tmperr = tmperr
-      return self.subprocess_mockup
-
-    def runShellKillPgrp(self, puppet):
-      puppet.terminate()  # note: In real code, subprocess.terminate() is not called
-      pass
-
-  class Subprocess_mockup():
-
-    returncode = 0
-
-    started_event = threading.Event()
-    should_finish_event = threading.Event()
-    finished_event = threading.Event()
-    was_terminated = False
-    tmpout = None
-    tmperr = None
-    pid=-1
-
-    def communicate(self):
-      self.started_event.set()
-      self.tmpout.write("Dummy output")
-      self.tmpout.flush()
-
-      self.tmperr.write("Dummy err")
-      self.tmperr.flush()
-      self.should_finish_event.wait()
-      self.finished_event.set()
-      pass
-
-    def terminate(self):
-      self.was_terminated = True
-      self.returncode = 17
-      self.should_finish_event.set()
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/test/python/ambari_agent/TestPuppetExecutorManually.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestPuppetExecutorManually.py b/ambari-agent/src/test/python/ambari_agent/TestPuppetExecutorManually.py
deleted file mode 100644
index 5299edb..0000000
--- a/ambari-agent/src/test/python/ambari_agent/TestPuppetExecutorManually.py
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-from unittest import TestCase
-from ambari_agent.PuppetExecutor import PuppetExecutor
-from pprint import pformat
-import socket
-import os
-import sys
-import logging
-from AmbariConfig import AmbariConfig
-import tempfile
-
-FILEPATH="runme.pp"
-logger = logging.getLogger()
-
-class TestPuppetExecutor(TestCase):
-
-  def test_run(self):
-    """
-    Used to run arbitrary puppet manifest. Test tries to find puppet manifest 'runme.pp' and runs it.
-    Test does not make any assertions
-    """
-    if not os.path.isfile(FILEPATH):
-      return
-
-    logger.info("***** RUNNING " + FILEPATH + " *****")
-    cwd = os.getcwd()
-    puppetexecutor = PuppetExecutor(cwd, "/x", "/y", "/tmp", AmbariConfig().getConfig())
-    result = {}
-    puppetEnv = os.environ
-    _, tmpoutfile = tempfile.mkstemp()
-    _, tmperrfile = tempfile.mkstemp()
-    result = puppetexecutor.runPuppetFile(FILEPATH, result, puppetEnv, tmpoutfile, tmperrfile)
-    logger.info("*** Puppet output: " + str(result['stdout']))
-    logger.info("*** Puppet errors: " + str(result['stderr']))
-    logger.info("*** Puppet retcode: " + str(result['exitcode']))
-    logger.info("****** DONE *****")
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/test/python/ambari_agent/TestRepoInstaller.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestRepoInstaller.py b/ambari-agent/src/test/python/ambari_agent/TestRepoInstaller.py
deleted file mode 100644
index 87683f9..0000000
--- a/ambari-agent/src/test/python/ambari_agent/TestRepoInstaller.py
+++ /dev/null
@@ -1,77 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-from unittest import TestCase
-from ambari_agent.RepoInstaller import RepoInstaller
-import tempfile
-import json, os
-import shutil
-from ambari_agent.AmbariConfig import AmbariConfig
-from mock.mock import patch, MagicMock, call
-
-class TestRepoInstaller(TestCase):
-
-  def setUp(self):
-    self.dir = tempfile.mkdtemp()
-    jsonCommand = file('../../main/python/ambari_agent/test.json').read()
-    self.parsedJson= json.loads(jsonCommand)
-    self.config = AmbariConfig().getConfig()
-    self.repoInstaller = RepoInstaller(self.parsedJson, self.dir, '../../main/puppet/modules', 1, self.config)
-
-    pass
-
-  def tearDown(self):
-    shutil.rmtree(self.dir)
-    pass
-
-
-  def test_prepare_repos_info(self):
-    localParsedJson = json.loads('{"hostLevelParams" : {"repo_info" : {"test" : "test"}}}')
-    localRepoInstaller = RepoInstaller(localParsedJson, self.dir, '../../main/puppet/modules', 1, self.config)
-    localRepoInstaller.prepareReposInfo()
-    self.assertEquals(localRepoInstaller.repoInfoList['test'], "test")
-
-    localParsedJson = json.loads('{"hostLevelParams" : {"repo_info" : "1"}}')
-    localRepoInstaller = RepoInstaller(localParsedJson, self.dir, '../../main/puppet/modules', 1, self.config)
-    localRepoInstaller.prepareReposInfo()
-    self.assertEquals(localRepoInstaller.repoInfoList, 1)
-
-    localParsedJson = json.loads('{"hostLevelParams" : {"repo_info" : ""}}')
-    localRepoInstaller = RepoInstaller(localParsedJson, self.dir, '../../main/puppet/modules', 1, self.config)
-    localRepoInstaller.prepareReposInfo()
-    self.assertEquals(localRepoInstaller.repoInfoList, [])
-
-
-  def test_generate_files(self):
-    localParsedJson = json.loads('{"hostLevelParams": { "repo_info" : [{"baseUrl":"http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos5"\
-           ,"osType":"centos5","repoId":"HDP-1.1.1.16_TEST","repoName":"HDP_TEST", "mirrorsList":"http://mirrors.fedoraproject.org/mirrorlist"}]}}')
-    localRepoInstaller = RepoInstaller(localParsedJson, self.dir, '../../main/puppet/modules', 1, self.config)
-    localRepoInstaller.prepareReposInfo()
-    result = localRepoInstaller.generateFiles()
-    self.assertTrue(result[0].endswith("HDP-1.1.1.16_TEST-1.pp"))
-
-  @patch.object(RepoInstaller, 'prepareReposInfo')
-  @patch.object(RepoInstaller, 'generateFiles')
-  def testInstallRepos(self, generateFilesMock, prepareReposInfoMock):
-    result = self.repoInstaller.generate_repo_manifests()
-    self.assertTrue(prepareReposInfoMock.called)
-    self.assertTrue(generateFilesMock.called)
-    print('generate_repo_manifests result: ' + result.__str__())
-    pass

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/test/python/ambari_agent/examples/ControllerTester.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/examples/ControllerTester.py b/ambari-agent/src/test/python/ambari_agent/examples/ControllerTester.py
index b5aa404..a3163f4 100644
--- a/ambari-agent/src/test/python/ambari_agent/examples/ControllerTester.py
+++ b/ambari-agent/src/test/python/ambari_agent/examples/ControllerTester.py
@@ -26,14 +26,14 @@ from mock.mock import patch, MagicMock, call
 from ambari_agent.AmbariConfig  import AmbariConfig
 import Queue
 import logging
-from ambari_agent import PuppetExecutor, PythonExecutor
+from ambari_agent import PythonExecutor
 
 logger=logging.getLogger()
 
 queue = Queue.Queue()
 
-# Set to True to replace python and puppet calls with mockups
-disable_python_and_puppet = True
+# Set to True to replace python calls with mockups
+disable_python = True
 
 agent_version = "1.3.0"
 
@@ -113,24 +113,16 @@ responseId = Int(0)
 
 def main():
 
-  if disable_python_and_puppet:
-    with patch.object(PuppetExecutor.PuppetExecutor, 'run_manifest') \
-                                          as run_manifest_method:
-      run_manifest_method.side_effect = \
-              lambda command, file, tmpout, tmperr: {
-          'exitcode' : 0,
-          'stdout'   : "Simulated run of pp %s" % file,
-          'stderr'   : 'None'
-        }
-      with patch.object(PythonExecutor.PythonExecutor, 'run_file') \
+  if disable_python:
+    with patch.object(PythonExecutor.PythonExecutor, 'run_file') \
                                           as run_file_py_method:
-        run_file_py_method.side_effect = \
-              lambda command, file, tmpoutfile, tmperrfile: {
-          'exitcode' : 0,
-          'stdout'   : "Simulated run of py %s" % file,
-          'stderr'   : 'None'
-        }
-        run_simulation()
+      run_file_py_method.side_effect = \
+            lambda command, file, tmpoutfile, tmperrfile: {
+        'exitcode' : 0,
+        'stdout'   : "Simulated run of py %s" % file,
+        'stderr'   : 'None'
+      }
+      run_simulation()
   else:
     run_simulation()
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-client/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-client/pom.xml b/ambari-client/pom.xml
index 86a0837..038422f 100755
--- a/ambari-client/pom.xml
+++ b/ambari-client/pom.xml
@@ -153,7 +153,6 @@
             <exclude>src/examples/*</exclude>
             <exclude>src/test/python/dummy*.txt</exclude>
             <exclude>src/main/python/ambari_client/imports.txt</exclude>
-            <exclude>src/main/puppet/modules/stdlib/**</exclude>
             <exclude>**/*.erb</exclude>
             <exclude>**/*.json</exclude>
           </excludes>


[12/15] AMBARI-5747 Remove facter-1.6.10, Ruby, Puppet dependencies from pom.xml and src (dsen)

Posted by ds...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/package.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/package.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/package.pp
deleted file mode 100644
index 4beaafd..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/package.pp
+++ /dev/null
@@ -1,44 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#singleton, but using define so can use collections to override params
-define hdp-hadoop::package(
-  $ensure = 'present',
-  $include_32_bit = false,
-  $include_64_bit = false
-)
-{
-  #just use 32 if its specifically requested and no 64 bit requests
-  if ($include_32_bit == true) and ($include_64_bit != true) {
-    $size = 32
-  } else  {
-    $size = 64
-  }
-  $package = "hadoop ${size}"
-  $lzo_enabled = $hdp::params::lzo_enabled
-
-  hdp::package{ $package:
-    ensure       => $ensure,
-    package_type => 'hadoop',
-    size         => $size,
-    lzo_needed   => $lzo_enabled
-  }
-  anchor{ 'hdp-hadoop::package::helper::begin': } -> Hdp::Package[$package] -> anchor{ 'hdp-hadoop::package::helper::end': }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp
deleted file mode 100644
index 5b9ebaa..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp
+++ /dev/null
@@ -1,222 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::params(
-) inherits hdp::params 
-{
-
-  ##TODO: for testing in masterless mode
-  $use_preconditions = false
-  ####  
-  $conf_dir = $hdp::params::hadoop_conf_dir 
-
-  ####hbase
-  $hdfs_root_dir = $hdp::params::hbase_hdfs_root_dir
-
-  ####### users
-
-  $mapred_user = $hdp::params::mapred_user
-  $hdfs_user = $hdp::params::hdfs_user
-  
-  ##### security related
-  $keytab_path = hdp_default("keytab_path","/etc/security/keytabs")
- 
-  if ($hdp::params::security_enabled == true) {
-    $enable_security_authorization = true
-    $security_type = "kerberos"
-    $task_controller = "org.apache.hadoop.mapred.LinuxTaskController"
-    $dfs_datanode_address = 1019
-    $dfs_datanode_http_address = 1022
-  } else {
-    $enable_security_authorization = false
-    $security_type = "simple"
-    $task_controller = "org.apache.hadoop.mapred.DefaultTaskController"
-    $dfs_datanode_address = hdp_default("dfs_datanode_address","50010")
-    $dfs_datanode_http_address = hdp_default("dfs_datanode_http_address","50075")
-  }
-
-  ### hadoop-env
-  
-  $dtnode_heapsize = hdp_default("dtnode_heapsize","1024m")
-  $ttnode_heapsize = hdp_default("ttnode_heapsize","1024m")
-
-  $hadoop_heapsize = hdp_default("hadoop_heapsize","1024")
-
-  $hdfs_log_dir_prefix = hdp_default("hdfs_log_dir_prefix","/var/log/hadoop")
-
-  $hadoop_pid_dir_prefix = hdp_default("hadoop_pid_dir_prefix","/var/run/hadoop")
-  $run_dir = $hadoop_pid_dir_prefix
-
-  $namenode_formatted_mark_dir = "${run_dir}/hdfs/namenode/formatted/"
-
-  $jtnode_heapsize = hdp_default("jtnode_heapsize","1024m")
-
-  $jtnode_opt_maxnewsize = hdp_default("jtnode_opt_maxnewsize","200m")
-
-  $jtnode_opt_newsize = hdp_default("jtnode_opt_newsize","200m")
-
-  $namenode_heapsize = hdp_default("namenode_heapsize","1024m")
-
-  $namenode_opt_maxnewsize = hdp_default("namenode_opt_maxnewsize","640m")
-
-  $namenode_opt_newsize = hdp_default("namenode_opt_newsize","640m")
-  
-  $hadoop_libexec_dir = hdp_default("hadoop_libexec_dir","/usr/lib/hadoop/libexec")
-  
-  $mapreduce_libs_path = hdp_default("mapreduce_libs_path","/usr/lib/hadoop-mapreduce/*")
-  
-  $mapred_log_dir_prefix = hdp_default("mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
-
-  $mapred_pid_dir_prefix = hdp_default("mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-
-  # Cannot create new dir in directory.pp, reusing existing path
-  $namenode_dirs_created_stub_dir = "${hdfs_log_dir_prefix}/${hdp::params::hdfs_user}"
-  $namenode_dirs_stub_filename = "namenode_dirs_created"
-
-  ### JSVC_HOME path is correct for AMD64 only, but can be changed through API
-  if ($hdp::params::hdp_os_type == "suse") {
-    $jsvc_path = hdp_default("jsvc_path","/usr/lib/bigtop-utils")
-  } else {
-    $jsvc_path = hdp_default("jsvc_path","/usr/libexec/bigtop-utils")
-  }
-
-  ### compression related
-  if (($hdp::params::lzo_enabled == true) and ($hdp::params::snappy_enabled == true)) {
-    $mapred_compress_map_output = true
-    $compression_codecs =  "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.SnappyCodec"
-    $mapred_map_output_compression_codec = "org.apache.hadoop.io.compress.SnappyCodec"
-  } elsif ($hdp::params::snappy_enabled == true) {
-    $mapred_compress_map_output = true
-    $compression_codecs = "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec" 
-    $mapred_map_output_compression_codec = "org.apache.hadoop.io.compress.SnappyCodec"
-  } elsif ($hdp::params::lzo_enabled == true) {
-    $mapred_compress_map_output = true
-    $compression_codecs = "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec"
-    $mapred_map_output_compression_codec = "com.hadoop.compression.lzo.LzoCodec"
-  } else { 
-    $mapred_compress_map_output = false
-    $compression_codecs = "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec"
-    $mapred_map_output_compression_codec = "org.apache.hadoop.io.compress.DefaultCodec"
-  }
-
-  ### core-site
-  if (hdp_get_major_stack_version($stack_version) >= 2) {
-    $fs_checkpoint_dir = hdp_default("hdfs-site/dfs.namenode.checkpoint.dir","/tmp/hadoop-hdfs/dfs/namesecondary")
-  } else {
-    $fs_checkpoint_dir = hdp_default("core-site/fs.checkpoint.dir","/tmp/hadoop-hdfs/dfs/namesecondary")
-  }
-
-  $proxyuser_group = hdp_default("core-site/proxyuser.group","users")
-  
-  $hadoop_tmp_dir = hdp_default("core-site/hadoop.tmp.dir","/tmp/hadoop-$hdfs_user")
-  
-  $hadoop_ssl_enabled = hdp_default("core-site/hadoop.ssl.enabled","false")
-
-  ### hdfs-site
-  $datanode_du_reserved = hdp_default("hdfs-site/datanode.du.reserved",1073741824)
-
-  $dfs_block_local_path_access_user = hdp_default("hdfs-site/dfs.block.local.path.access.user","hbase")
-
-  $dfs_data_dir = $hdp::params::dfs_data_dir
-
-  $dfs_datanode_data_dir_perm = hdp_default("hdfs-site/dfs.datanode.data.dir.perm",750)
-
-  $dfs_datanode_failed_volume_tolerated = hdp_default("hdfs-site/dfs.datanode.failed.volume.tolerated",0)
-
-  $dfs_exclude = hdp_default("hdfs-site/dfs.exclude","dfs.exclude")
-
-  $dfs_include = hdp_default("hdfs-site/dfs.include","dfs.include")
-  
-  if (hdp_get_major_stack_version($stack_version) >= 2) {
-    $dfs_name_dir = hdp_default("hdfs-site/dfs.namenode.name.dir","/tmp/hadoop-hdfs/dfs/name")
-  } else {
-    $dfs_name_dir = hdp_default("hdfs-site/dfs.name.dir","/tmp/hadoop-hdfs/dfs/name")
-  }
-  
-  $dfs_replication = hdp_default("hdfs-site/dfs.replication",3)
-
-  $dfs_support_append = hdp_default("hdfs-site/dfs.support.append",true)
-
-  $dfs_webhdfs_enabled = hdp_default("hdfs-site/dfs.webhdfs.enabled",false)
-  
-  $jn_edits_dir = hdp_default("hdfs-site/dfs.journalnode.edits.dir", "/grid/0/hdfs/journal")
-  
-  $dfs_domain_socket_path = hdp_default("hdfs-site/dfs.domain.socket.path","/var/lib/hadoop-hdfs/dn_socket")
-
- ######### mapred #######
-   ### mapred-site
-
-  $mapred_system_dir = '/mapred/system'
-
-  $mapred_child_java_opts_sz = hdp_default("mapred-site/mapred.child.java.opts.sz","-Xmx768m")
-
-  $mapred_cluster_map_mem_mb = hdp_default("mapred-site/mapred.cluster.map.mem.mb","-1")
-
-  $mapred_cluster_max_map_mem_mb = hdp_default("mapred-site/mapred.cluster.max.map.mem.mb","-1")
-
-  $mapred_cluster_max_red_mem_mb = hdp_default("mapred-site/mapred.cluster.max.red.mem.mb","-1")
-
-  $mapred_cluster_red_mem_mb = hdp_default("mapred-site/mapred.cluster.red.mem.mb","-1")
-
-  $mapred_job_map_mem_mb = hdp_default("mapred-site/mapred.job.map.mem.mb","-1")
-
-  $mapred_job_red_mem_mb = hdp_default("mapred-site/mapred.job.red.mem.mb","-1")
-
-  $mapred_jobstatus_dir = hdp_default("mapred-site/mapred.jobstatus.dir","file:////mapred/jobstatus")
-
-  if (hdp_get_major_stack_version($stack_version) >= 2) {
-    $mapred_local_dir = hdp_default("mapred-site/mapreduce.cluster.local.dir","/tmp/hadoop-mapred/mapred/local")
-  } else {
-    $mapred_local_dir = hdp_default("mapred-site/mapred.local.dir","/tmp/hadoop-mapred/mapred/local")
-  }
-
-  $mapred_tt_group = hdp_default("mapred-site/mapreduce.tasktracker.group", "hadoop")
-   
-  $mapreduce_userlog_retainhours = hdp_default("mapred-site/mapreduce.userlog.retainhours",24)
-
-  $maxtasks_per_job = hdp_default("mapred-site/maxtasks.per.job","-1")
-
-  $scheduler_name = hdp_default("mapred-site/scheduler.name","org.apache.hadoop.mapred.CapacityTaskScheduler")
-
-  #### health_check
-
-  $security_enabled = $hdp::params::security_enabled
-
-  $task_bin_exe = hdp_default("task_bin_exe")
-
-  $rca_enabled = hdp_default("rca_enabled", false)
-  $rca_disabled_prefix = "###"
-  if ($rca_enabled == true) {
-    $rca_prefix = ""
-  } else {
-    $rca_prefix = $rca_disabled_prefix
-  }
-  # $ambari_db_server_host = hdp_default("ambari_db_server_host", "localhost")
-  $ambari_db_rca_url = hdp_default("ambari_db_rca_url", "jdbc:postgresql://localhost/ambarirca")
-  $ambari_db_rca_driver = hdp_default("ambari_db_rca_driver", "org.postgresql.Driver")
-  $ambari_db_rca_username = hdp_default("ambari_db_rca_username", "mapred")
-  $ambari_db_rca_password = hdp_default("ambari_db_rca_password", "mapred")
-
-  if ($hdp::params::dfs_ha_enabled == true) {
-    $nameservice = $hdp::params::dfs_ha_nameservices
-    $namenode_id = hdp_hadoop_get_namenode_id("dfs.namenode.rpc-address.${nameservice}", "hdfs-site")
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/service.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/service.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/service.pp
deleted file mode 100644
index 98def76..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/service.pp
+++ /dev/null
@@ -1,132 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-define hdp-hadoop::service(
-  $ensure = 'running',
-  $user,
-  $initial_wait = undef,
-  $create_pid_dir = true,
-  $create_log_dir = true
-)
-{
-
-  $security_enabled = $hdp::params::security_enabled
-
-  #NOTE does not work if namenode and datanode are on same host 
-  $pid_dir = "${hdp-hadoop::params::hadoop_pid_dir_prefix}/${user}"
-  
-  $hadoop_libexec_dir = $hdp-hadoop::params::hadoop_libexec_dir
-  
-  if (($security_enabled == true) and ($name == 'datanode')) {
-    $run_as_root = true
-  } else {       
-    $run_as_root = false
-  }
-
-  if (($security_enabled == true) and ($name == 'datanode')) {
-    $hdfs_user = $hdp::params::hdfs_user
-    $pid_file = "${hdp-hadoop::params::hadoop_pid_dir_prefix}/${hdfs_user}/hadoop-${hdfs_user}-${name}.pid"
-  } else {
-    $pid_file = "${pid_dir}/hadoop-${user}-${name}.pid"
-  } 
-
-  $log_dir = "${hdp-hadoop::params::hdfs_log_dir_prefix}/${user}"
-  $hadoop_daemon = "export HADOOP_LIBEXEC_DIR=${hadoop_libexec_dir} && ${hdp::params::hadoop_bin}/hadoop-daemon.sh"
-   
-  $cmd = "${hadoop_daemon} --config ${hdp-hadoop::params::conf_dir}"
-  if ($ensure == 'running') {
-    if ($run_as_root == true) {
-      $daemon_cmd = "su - root -c  '${cmd} start ${name}'"
-    } else {
-      $daemon_cmd = "su - ${user} -c  '${cmd} start ${name}'"
-    }
-    # Here we check if pid file exists and if yes, then we run 'ps pid' command
-    # that returns 1 if process is not running
-    $service_is_up = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
-  } elsif ($ensure == 'stopped') {
-    if ($run_as_root == true) {
-      $daemon_cmd = "su - root -c  '${cmd} stop ${name}' && rm -f ${pid_file}"
-    } else {
-      $daemon_cmd = "su - ${user} -c  '${cmd} stop ${name}' && rm -f ${pid_file}"
-    }
-    $service_is_up = undef
-  } else {
-    $daemon_cmd = undef
-  }
- 
-  if ($create_pid_dir == true) {
-    hdp::directory_recursive_create { $pid_dir: 
-      owner       => $user,
-      context_tag => 'hadoop_service',
-      service_state => $::service_state,
-      force => true
-    }
-  }
-  
-  if ($create_log_dir == true) {
-    hdp::directory_recursive_create { $log_dir: 
-      owner       => $user,
-      context_tag => 'hadoop_service',
-      service_state => $::service_state,
-      force => true
-    }
-  }
-  if ($daemon_cmd != undef) {
-    if ($name == 'datanode' and $ensure == 'running') {
-      exec { 'delete_pid_before_datanode_start':
-        command  => "rm -f ${pid_file}",
-        unless       => $service_is_up,
-        path => $hdp::params::exec_path
-      }
-    }
-    hdp::exec { $daemon_cmd:
-      command      => $daemon_cmd,
-      unless       => $service_is_up,
-      initial_wait => $initial_wait
-    }
-  }
-
-  anchor{"hdp-hadoop::service::${name}::begin":}
-  anchor{"hdp-hadoop::service::${name}::end":}
-  if ($daemon_cmd != undef) {
-    Anchor["hdp-hadoop::service::${name}::begin"] -> Hdp::Exec[$daemon_cmd] -> Anchor["hdp-hadoop::service::${name}::end"]
-
-    if ($create_pid_dir == true) {
-      Anchor["hdp-hadoop::service::${name}::begin"] -> Hdp::Directory_recursive_create[$pid_dir] -> Hdp::Exec[$daemon_cmd] 
-    }
-     if ($create_log_dir == true) {
-      Anchor["hdp-hadoop::service::${name}::begin"] -> Hdp::Directory_recursive_create[$log_dir] -> Hdp::Exec[$daemon_cmd] 
-    }
-    if ($name == 'datanode' and $ensure == 'running') {
-      Anchor["hdp-hadoop::service::${name}::begin"] -> Exec['delete_pid_before_datanode_start'] -> Hdp::Exec[$daemon_cmd]
-    }
-  }
-  if ($ensure == 'running') {
-    #TODO: look at Puppet resource retry and retry_sleep
-    #TODO: can make sleep contingent on $name
-    $sleep = 5
-    $post_check = "sleep ${sleep}; ${service_is_up}"
-    hdp::exec { $post_check:
-      command => $post_check,
-      unless  => $service_is_up
-    }
-    Hdp::Exec[$daemon_cmd] -> Hdp::Exec[$post_check] -> Anchor["hdp-hadoop::service::${name}::end"]
-  }  
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/jobtracker-conn.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/jobtracker-conn.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/jobtracker-conn.pp
deleted file mode 100644
index f0338f9..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/jobtracker-conn.pp
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::slave::jobtracker-conn($jobtracker_host)
-{
-  Hdp-Hadoop::Configfile<||>{jtnode_host => $jobtracker_host}
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/master-conn.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/master-conn.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/master-conn.pp
deleted file mode 100644
index 326f31d..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/master-conn.pp
+++ /dev/null
@@ -1,27 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::slave::master-conn($master_host)
-{
-  Hdp-Hadoop::Configfile<||>{
-    namenode_host => $master_host,
-    jtnode_host   => $master_host
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/namenode-conn.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/namenode-conn.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/namenode-conn.pp
deleted file mode 100644
index 8047c05..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/namenode-conn.pp
+++ /dev/null
@@ -1,27 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#TODO: this might be replaced by just using hdp::namenode-conn
-class hdp-hadoop::slave::namenode-conn($namenode_host)
-{
-  #TODO: check if can get rido of both
-  Hdp-Hadoop::Configfile<||>{namenode_host => $namenode_host}
-  Hdp::Configfile<||>{namenode_host => $namenode_host} #for components other than hadoop (e.g., hbase) 
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/smoketest.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/smoketest.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/smoketest.pp
deleted file mode 100644
index 296a0d4..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/smoketest.pp
+++ /dev/null
@@ -1,46 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::smoketest(
-  $opts={}
-)
-{
-  #TODO: put in wait
-  #TODO: look for better way to compute outname
-  $date_format = '"%M%d%y"'
-  $outname = inline_template("<%=  `date +${date_format}`.chomp %>")
-
-  #TODO: hardwired to run on namenode and to use user hdfs
-
-  $put = "dfs -put /etc/passwd passwd-${outname}"
-  $exec = "jar /usr/share/hadoop/hadoop-examples-*.jar wordcount passwd-${outname} ${outname}.out"
-  $result = "fs -test -e ${outname}.out /dev/null 2>&1"
-  anchor{ "hdp-hadoop::smoketest::begin" :} ->
-  hdp-hadoop::exec-hadoop{ $put:
-    command => $put
-  } ->
-  hdp-hadoop::exec-hadoop{ $exec:
-    command =>  $exec
-  } ->
-  hdp-hadoop::exec-hadoop{ $result:
-    command =>  $result
-  } ->
-  anchor{ "hdp-hadoop::smoketest::end" :}
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/snamenode.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/snamenode.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/snamenode.pp
deleted file mode 100644
index f2c5beb..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/snamenode.pp
+++ /dev/null
@@ -1,98 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::snamenode(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-hadoop::params  
-{
-  $hdp::params::service_exists['hdp-hadoop::snamenode'] = true
-
-  Hdp-hadoop::Common<||>{service_state => $service_state}
-  Hdp-hadoop::Package<||>{include_64_bit => true}
-  Hdp-hadoop::Configfile<||>{sizes +> 64}
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) {
-    $fs_checkpoint_dir = $hdp-hadoop::params::fs_checkpoint_dir
-  
-    #adds package, users and directories, and common hadoop configs
-    include hdp-hadoop::initialize
-
-    if ( ($service_state == 'installed_and_configured') and
-         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
-      if ($hdp::params::service_exists['hdp-hadoop::namenode'] != true) {
-        $masterHost = $kerberos_adminclient_host[0]
-        hdp::download_keytab { 'snamenode_service_keytab' :
-          masterhost => $masterHost,
-          keytabdst => "${$keytab_path}/nn.service.keytab",
-          keytabfile => 'nn.service.keytab',
-          owner => $hdp-hadoop::params::hdfs_user
-        }
-        hdp::download_keytab { 'snamenode_spnego_keytab' :   
-          masterhost => $masterHost,
-          keytabdst => "${$keytab_path}/spnego.service.keytab",
-          keytabfile => 'spnego.service.keytab', 
-          owner => $hdp-hadoop::params::hdfs_user,
-          mode => '0440',
-          group => $hdp::params::user_group
-        }
-      }
-    }
- 
-    Hdp-Hadoop::Configfile<||>{snamenode_host => $hdp::params::host_address}
-  
-    hdp-hadoop::snamenode::create_name_dirs { $fs_checkpoint_dir: 
-      service_state => $service_state
-    }
-    
-    if ($hdp::params::service_exists['hdp-hadoop::namenode'] == true) {
-      $create_pid_dir = false
-      $create_log_dir = false
-    } else {
-      $create_pid_dir = true
-      $create_log_dir = true
-    }
-    
-    hdp-hadoop::service{ 'secondarynamenode':
-      ensure         => $service_state,
-      user           => $hdp-hadoop::params::hdfs_user,
-      create_pid_dir => $create_pid_dir,
-      create_log_dir => $create_log_dir
-    }
-  
-    #top level does not need anchors
-    Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Namenode::Create_name_dirs<||> ->
-      Hdp-hadoop::Service['secondarynamenode'] -> Anchor['hdp-hadoop::end']
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
-
-define hdp-hadoop::snamenode::create_name_dirs($service_state)
-{
-   $dirs = hdp_array_from_comma_list($name)
-   hdp::directory_recursive_create { $dirs :
-     owner => $hdp-hadoop::params::hdfs_user,
-     mode => '0755',
-     service_state => $service_state,
-     force => true
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/tasktracker.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/tasktracker.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/tasktracker.pp
deleted file mode 100644
index e6869d6..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/tasktracker.pp
+++ /dev/null
@@ -1,94 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::tasktracker(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-hadoop::params
-{
-  $hdp::params::service_exists['hdp-hadoop::tasktracker'] = true
-
-  Hdp-hadoop::Common<||>{service_state => $service_state}
-
-  if ($hdp::params::use_32_bits_on_slaves == true) {
-    Hdp-hadoop::Package<||>{include_32_bit => true}
-    Hdp-hadoop::Configfile<||>{sizes +> 32}
-  } else {
-    Hdp-hadoop::Package<||>{include_64_bit => true}
-    Hdp-hadoop::Configfile<||>{sizes +> 64}
-  }
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
-    $mapred_local_dir = $hdp-hadoop::params::mapred_local_dir
-  
-    #adds package, users and directories, and common hadoop configs
-    include hdp-hadoop::initialize
-
-    if ( ($service_state == 'installed_and_configured') and
-         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
-      $masterHost = $kerberos_adminclient_host[0]
-      hdp::download_keytab { 'tasktracker_service_keytab' :
-        masterhost => $masterHost,
-        keytabdst => "${$keytab_path}/tt.service.keytab",
-        keytabfile => 'tt.service.keytab',
-        owner => $hdp-hadoop::params::mapred_user
-      }
-    }
-
-    hdp-hadoop::tasktracker::create_local_dirs { $mapred_local_dir:
-      service_state => $service_state
-    }
-    
-    if ($hdp::params::service_exists['hdp-hadoop::jobtracker'] == true) {
-      $create_pid_dir = false
-      $create_log_dir = false
-    } else {
-      $create_pid_dir = true
-      $create_log_dir = true
-    }
-
-    hdp-hadoop::service{ 'tasktracker':
-      ensure => $service_state,
-      user   => $hdp-hadoop::params::mapred_user,
-      create_pid_dir => $create_pid_dir,
-      create_log_dir => $create_log_dir
-    }
-  
-    #top level does not need anchors
-    Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Tasktracker::Create_local_dirs<||> -> Hdp-hadoop::Service['tasktracker'] ->
-    Anchor['hdp-hadoop::end']
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
-
-define hdp-hadoop::tasktracker::create_local_dirs($service_state)
-{
-  if ($hdp::params::service_exists['hdp-hadoop::jobtracker'] != true) {
-    $dirs = hdp_array_from_comma_list($name)
-    hdp::directory_recursive_create_ignore_failure { $dirs :
-      owner => $hdp-hadoop::params::mapred_user,
-      mode => '0755',
-      service_state => $service_state,
-      force => true
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/zkfc.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/zkfc.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/zkfc.pp
deleted file mode 100644
index 5f74012..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/zkfc.pp
+++ /dev/null
@@ -1,51 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::zkfc(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-hadoop::params 
-{
-
-  Hdp-hadoop::Common<||>{service_state => $service_state}
-  Hdp-hadoop::Package<||>{include_64_bit => true}
-  Hdp-hadoop::Configfile<||>{sizes +> 64}
-  
-  $hdfs_user = $hdp-hadoop::params::hdfs_user
-  
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
-  
-    #adds package, users and directories, and common hadoop configs
-    include hdp-hadoop::initialize
-    
-    hdp-hadoop::service{ 'zkfc':
-      ensure         => $service_state,
-      user           => $hdp-hadoop::params::hdfs_user,
-      create_pid_dir => true,
-      create_log_dir => true
-    }
-    
-    #top level does not need anchors
-    Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Service['zkfc'] -> Anchor['hdp-hadoop::end']
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/commons-logging.properties.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/commons-logging.properties.erb b/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/commons-logging.properties.erb
deleted file mode 100644
index 77e458f..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/commons-logging.properties.erb
+++ /dev/null
@@ -1,25 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-#Logging Implementation
-
-#Log4J
-org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger
-
-#JDK Logger
-#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/exclude_hosts_list.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/exclude_hosts_list.erb b/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/exclude_hosts_list.erb
deleted file mode 100644
index 750549c..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/exclude_hosts_list.erb
+++ /dev/null
@@ -1,3 +0,0 @@
-<% exlude_hosts_list.each do |val| -%>
-<%= val%>
-<% end -%>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-env.sh.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-env.sh.erb b/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-env.sh.erb
deleted file mode 100644
index ef0d3d4..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-env.sh.erb
+++ /dev/null
@@ -1,122 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Set Hadoop-specific environment variables here.
-
-# The only required environment variable is JAVA_HOME.  All others are
-# optional.  When running a distributed configuration it is best to
-# set JAVA_HOME in this file, so that it is correctly defined on
-# remote nodes.
-
-# The java implementation to use.  Required.
-export JAVA_HOME=<%=scope.function_hdp_template_var("::hdp::params::java64_home")%>
-export HADOOP_HOME_WARN_SUPPRESS=1
-
-# Hadoop Configuration Directory
-#TODO: if env var set that can cause problems
-export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-<%=scope.function_hdp_template_var("conf_dir")%>}
-
-<% if scope.function_hdp_template_var("::hdp::params::isHadoop2Stack") == true %>
-# Path to jsvc required by secure HDP 2.0 datanode
-export JSVC_HOME=<%=scope.function_hdp_template_var("jsvc_path")%>
-<% end %>
-
-
-# The maximum amount of heap to use, in MB. Default is 1000.
-export HADOOP_HEAPSIZE="<%=scope.function_hdp_template_var("hadoop_heapsize")%>"
-
-export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms<%=scope.function_hdp_template_var("namenode_heapsize")%>"
-
-# Extra Java runtime options.  Empty by default.
-export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
-
-# Command specific options appended to HADOOP_OPTS when specified
-export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER/hs_err_pid%p.log -XX:NewSize=<%=scope.function_hdp_template_var("namenode_opt_newsize")%> -XX:MaxNewSize=<%=scope.function_hdp_template_var("namenode_opt_maxnewsize")%> -Xloggc:<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms<%=scope.function_hdp_template_var("namenode_heapsize")%> -Xmx<%=scope.function_hdp_template_var("namenode_heapsize")%> -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
-HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER/hs_err_pid%p.log -XX:NewSize=<%=scope.function_hdp_template_var("jtnode_opt_newsize")%> -XX:MaxNewSize=<%=scope.function_hdp_template_var("jtnode_opt_maxnewsize")%> -Xloggc:<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx<%=scope.function_hdp_template_var("jtnode_heapsize")%> -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
-
-HADOOP_TASKTRACKER_OPTS="-server -Xmx<%=scope.function_hdp_template_var("ttnode_heapsize")%> -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
-HADOOP_DATANODE_OPTS="-Xmx<%=scope.function_hdp_template_var("dtnode_heapsize")%> -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
-HADOOP_BALANCER_OPTS="-server -Xmx<%=scope.function_hdp_template_var("hadoop_heapsize")%>m ${HADOOP_BALANCER_OPTS}"
-
-export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER/hs_err_pid%p.log -XX:NewSize=<%=scope.function_hdp_template_var("namenode_opt_newsize")%> -XX:MaxNewSize=<%=scope.function_hdp_template_var("namenode_opt_maxnewsize")%> -Xloggc:<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx<%=scope.function_hdp_template_var("namenode_heapsize")%> -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
-
-# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
-# On secure datanodes, user to run the datanode as after dropping privileges
-export HADOOP_SECURE_DN_USER=<%=scope.function_hdp_template_var("hdfs_user")%>
-
-# Extra ssh options.  Empty by default.
-export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
-
-# Where log files are stored.  $HADOOP_HOME/logs by default.
-export HADOOP_LOG_DIR=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER
-
-# History server logs
-export HADOOP_MAPRED_LOG_DIR=<%=scope.function_hdp_template_var("mapred_log_dir_prefix")%>/$USER
-
-# Where log files are stored in the secure data environment.
-export HADOOP_SECURE_DN_LOG_DIR=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$HADOOP_SECURE_DN_USER
-
-# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
-# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
-
-# host:path where hadoop code should be rsync'd from.  Unset by default.
-# export HADOOP_MASTER=master:/home/$USER/src/hadoop
-
-# Seconds to sleep between slave commands.  Unset by default.  This
-# can be useful in large clusters, where, e.g., slave rsyncs can
-# otherwise arrive faster than the master can service them.
-# export HADOOP_SLAVE_SLEEP=0.1
-
-# The directory where pid files are stored. /tmp by default.
-export HADOOP_PID_DIR=<%=scope.function_hdp_template_var("hadoop_pid_dir_prefix")%>/$USER
-export HADOOP_SECURE_DN_PID_DIR=<%=scope.function_hdp_template_var("hadoop_pid_dir_prefix")%>/$HADOOP_SECURE_DN_USER
-
-# History server pid
-export HADOOP_MAPRED_PID_DIR=<%=scope.function_hdp_template_var("mapred_pid_dir_prefix")%>/$USER
-
-YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
-
-# A string representing this instance of hadoop. $USER by default.
-export HADOOP_IDENT_STRING=$USER
-
-# The scheduling priority for daemon processes.  See 'man nice'.
-
-# export HADOOP_NICENESS=10
-
-# Use libraries from standard classpath
-JAVA_JDBC_LIBS=""
-#Add libraries required by mysql connector
-for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`
-do
-  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
-done
-#Add libraries required by oracle connector
-for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`
-do
-  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
-done
-#Add libraries required by nodemanager
-MAPREDUCE_LIBS=<%=scope.function_hdp_template_var("mapreduce_libs_path")%>
-export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
-
-# Setting path to hdfs command line
-export HADOOP_LIBEXEC_DIR=<%=scope.function_hdp_template_var("hadoop_libexec_dir")%>
-
-#Mostly required for hadoop 2.0
-export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties-GANGLIA.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties-GANGLIA.erb b/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties-GANGLIA.erb
deleted file mode 100644
index 65d9767..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties-GANGLIA.erb
+++ /dev/null
@@ -1,45 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# syntax: [prefix].[source|sink|jmx].[instance].[options]
-# See package.html for org.apache.hadoop.metrics2 for details
-
-<%if not scope.function_hdp_is_empty(ganglia_server_host)%>
-*.period=60
-
-*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
-*.sink.ganglia.period=10
-
-# default for supportsparse is false
-*.sink.ganglia.supportsparse=true
-
-.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
-.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
-
-# Hook up to the server
-namenode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8661
-datanode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-jobtracker.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8662
-tasktracker.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-maptask.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-reducetask.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-resourcemanager.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8664
-nodemanager.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-historyserver.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8666
-journalnode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-
-resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
-
-<% end %>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties.erb b/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties.erb
deleted file mode 100644
index 65d9767..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties.erb
+++ /dev/null
@@ -1,45 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# syntax: [prefix].[source|sink|jmx].[instance].[options]
-# See package.html for org.apache.hadoop.metrics2 for details
-
-<%if not scope.function_hdp_is_empty(ganglia_server_host)%>
-*.period=60
-
-*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
-*.sink.ganglia.period=10
-
-# default for supportsparse is false
-*.sink.ganglia.supportsparse=true
-
-.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
-.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
-
-# Hook up to the server
-namenode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8661
-datanode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-jobtracker.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8662
-tasktracker.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-maptask.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-reducetask.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-resourcemanager.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8664
-nodemanager.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-historyserver.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8666
-journalnode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-
-resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
-
-<% end %>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hdfs.conf.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hdfs.conf.erb b/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hdfs.conf.erb
deleted file mode 100644
index b2c3179..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hdfs.conf.erb
+++ /dev/null
@@ -1,17 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-<%=scope.function_hdp_template_var("hdfs_user")%>   - nofile 32768
-<%=scope.function_hdp_template_var("hdfs_user")%>   - nproc  65536

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/health_check-v2.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/health_check-v2.erb b/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/health_check-v2.erb
deleted file mode 100644
index cb7b12b..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/health_check-v2.erb
+++ /dev/null
@@ -1,91 +0,0 @@
-#!/bin/bash
-#
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-err=0;
-
-function check_disks {
-
-  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
-    fsdev=""
-    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
-    if [ -z "$fsdev" -a "$m" != "/mnt" ] ; then
-      msg_="$msg_ $m(u)"
-    else
-      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
-    fi
-  done
-
-  if [ -z "$msg_" ] ; then
-    echo "disks ok" ; exit 0
-  else
-    echo "$msg_" ; exit 2
-  fi
-
-}
-
-function check_link {
-  snmp=/usr/bin/snmpwalk
-  if [ -e $snmp ] ; then
-    $snmp -t 5 -Oe  -Oq  -Os -v 1 -c public localhost if | \
-    awk ' {
-      split($1,a,".") ;
-      if ( a[1] == "ifIndex" ) { ifIndex[a[2]] = $2 }
-      if ( a[1] == "ifDescr" ) { ifDescr[a[2]] = $2 }
-      if ( a[1] == "ifType" ) { ifType[a[2]] = $2 }
-      if ( a[1] == "ifSpeed" ) { ifSpeed[a[2]] = $2 }
-      if ( a[1] == "ifAdminStatus" ) { ifAdminStatus[a[2]] = $2 }
-      if ( a[1] == "ifOperStatus" ) { ifOperStatus[a[2]] = $2 }
-    }
-    END {
-      up=0;
-      for (i in ifIndex ) {
-      if ( ifType[i] == 6 && ifAdminStatus[i] == 1 && ifOperStatus[i] == 1 && ifSpeed[i] == 1000000000 ) {
-      up=i;
-      }
-      }
-      if ( up == 0 ) { print "check link" ; exit 2 }
-      else { print ifDescr[up],"ok" }
-    }'
-    exit $? ;
-  fi
-}
-
-# Run all checks
-# Disabled 'check_link' for now... 
-for check in disks ; do
-  msg=`check_${check}` ;
-  if [ $? -eq 0 ] ; then
-    ok_msg="$ok_msg$msg,"
-  else
-    err_msg="$err_msg$msg,"
-  fi
-done
-
-if [ ! -z "$err_msg" ] ; then
-  echo -n "ERROR $err_msg "
-fi
-if [ ! -z "$ok_msg" ] ; then
-  echo -n "OK: $ok_msg"
-fi
-
-echo
-
-# Success!
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/health_check.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/health_check.erb b/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/health_check.erb
deleted file mode 100644
index b84b336..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/health_check.erb
+++ /dev/null
@@ -1,118 +0,0 @@
-#!/bin/bash
-#
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-err=0;
-
-function check_disks {
-
-  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
-    fsdev=""
-    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
-    if [ -z "$fsdev" ] ; then
-      msg_="$msg_ $m(u)"
-    else
-      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
-    fi
-  done
-
-  if [ -z "$msg_" ] ; then
-    echo "disks ok" ; exit 0
-  else
-    echo "$msg_" ; exit 2
-  fi
-
-}
-
-function check_taskcontroller {
-  if [ "<%=scope.function_hdp_template_var("::hdp::params::security_enabled")%>" == "true" ]; then
-    perm=`stat -c %a:%U:%G <%=scope.function_hdp_template_var("task_bin_exe")%> 2>/dev/null`
-    if [ $? -eq 0 ] && [ "$perm" == "6050:root:hadoop" ] ; then
-      echo "taskcontroller ok"
-    else
-      echo 'check taskcontroller' ; exit 1
-    fi
-  fi
-}
-
-function check_jetty {
-  hname=`hostname`
-  jmx=`curl -s -S -m 5 "http://$hname:<%=scope.function_hdp_template_var("::hdp::tasktracker_port")%>/jmx?qry=Hadoop:service=TaskTracker,name=ShuffleServerMetrics" 2>/dev/null` ;
-  if [ $? -eq 0 ] ; then
-    e=`echo $jmx | awk '/shuffle_exceptions_caught/ {printf"%d",$2}'` ;
-    e=${e:-0} # no jmx servlet ?
-    if [ $e -gt 10 ] ; then
-      echo "check jetty: shuffle_exceptions=$e" ; exit 1
-    else
-      echo "jetty ok"
-    fi
-  else
-    echo "check jetty: ping failed" ; exit 1
-  fi
-}
-
-function check_link {
-  snmp=/usr/bin/snmpwalk
-  if [ -e $snmp ] ; then
-    $snmp -t 5 -Oe  -Oq  -Os -v 1 -c public localhost if | \
-    awk ' {
-      split($1,a,".") ;
-      if ( a[1] == "ifIndex" ) { ifIndex[a[2]] = $2 }
-      if ( a[1] == "ifDescr" ) { ifDescr[a[2]] = $2 }
-      if ( a[1] == "ifType" ) { ifType[a[2]] = $2 }
-      if ( a[1] == "ifSpeed" ) { ifSpeed[a[2]] = $2 }
-      if ( a[1] == "ifAdminStatus" ) { ifAdminStatus[a[2]] = $2 }
-      if ( a[1] == "ifOperStatus" ) { ifOperStatus[a[2]] = $2 }
-    }
-    END {
-      up=0;
-      for (i in ifIndex ) {
-      if ( ifType[i] == 6 && ifAdminStatus[i] == 1 && ifOperStatus[i] == 1 && ifSpeed[i] == 1000000000 ) {
-      up=i;
-      }
-      }
-      if ( up == 0 ) { print "check link" ; exit 2 }
-      else { print ifDescr[up],"ok" }
-    }'
-    exit $? ;
-  fi
-}
-
-# Run all checks
-# Disabled 'check_link' for now... 
-for check in disks taskcontroller jetty; do
-  msg=`check_${check}` ;
-  if [ $? -eq 0 ] ; then
-    ok_msg="$ok_msg$msg,"
-  else
-    err_msg="$err_msg$msg,"
-  fi
-done
-
-if [ ! -z "$err_msg" ] ; then
-  echo -n "ERROR $err_msg "
-fi
-if [ ! -z "$ok_msg" ] ; then
-  echo -n "OK: $ok_msg"
-fi
-
-echo
-
-# Success!
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/include_hosts_list.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/include_hosts_list.erb b/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/include_hosts_list.erb
deleted file mode 100644
index 5b519c6..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/include_hosts_list.erb
+++ /dev/null
@@ -1,3 +0,0 @@
-<% include_hosts_list.each do |val| -%>
-<%= val %>
-<% end -%>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/log4j.properties.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/log4j.properties.erb b/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/log4j.properties.erb
deleted file mode 100644
index 1458f1e..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/log4j.properties.erb
+++ /dev/null
@@ -1,227 +0,0 @@
-# Copyright 2011 The Apache Software Foundation
-# 
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Define some default values that can be overridden by system properties
-hadoop.root.logger=INFO,console
-hadoop.log.dir=.
-hadoop.log.file=hadoop.log
-
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hadoop.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshhold=ALL
-
-#
-# Daily Rolling File Appender
-#
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this 
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
-#
-# TaskLog Appender
-#
-
-#Default values
-hadoop.tasklog.taskid=null
-hadoop.tasklog.iscleanup=false
-hadoop.tasklog.noKeepSplits=4
-hadoop.tasklog.totalLogFileSize=100
-hadoop.tasklog.purgeLogSplits=true
-hadoop.tasklog.logsRetainHours=12
-
-log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
-log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
-log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
-
-log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
-log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-
-#
-#Security audit appender
-#
-hadoop.security.logger=INFO,console
-hadoop.security.log.maxfilesize=256MB
-hadoop.security.log.maxbackupindex=20
-log4j.category.SecurityLogger=${hadoop.security.logger}
-hadoop.security.log.file=SecurityAuth.audit
-log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender 
-log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
-log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
-log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
-
-log4j.appender.RFAS=org.apache.log4j.RollingFileAppender 
-log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
-log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
-log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
-log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
-
-#
-# hdfs audit logging
-#
-hdfs.audit.logger=INFO,console
-log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
-log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
-log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
-log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
-log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
-
-#
-# mapred audit logging
-#
-mapred.audit.logger=INFO,console
-log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
-log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
-log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
-log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
-log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
-
-#
-# Rolling File Appender
-#
-
-log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Logfile size and and 30-day backups
-log4j.appender.RFA.MaxFileSize=256MB
-log4j.appender.RFA.MaxBackupIndex=10
-
-log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
-log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-# Custom Logging levels
-
-hadoop.metrics.log.level=INFO
-#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
-#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
-#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
-log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
-
-# Jets3t library
-log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
-
-#
-# Null Appender
-# Trap security logger on the hadoop client side
-#
-log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
-
-<% if (scope.function_hdp_template_var("::hdp::params::is_jtnode_master") || scope.function_hdp_template_var("::hdp::params::is_rmnode_master"))%>
-#
-# Job Summary Appender 
-#
-# Use following logger to send summary to separate file defined by 
-# hadoop.mapreduce.jobsummary.log.file rolled daily:
-# hadoop.mapreduce.jobsummary.logger=INFO,JSA
-# 
-hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
-hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
-log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
-<% if (scope.function_hdp_get_major_stack_version([scope.function_hdp_template_var("stack_version")]) >= 2)%>
-# Set the ResourceManager summary log filename
-yarn.server.resourcemanager.appsummary.log.file=hadoop-mapreduce.jobsummary.log
-# Set the ResourceManager summary log level and appender
-yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}
-#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
-
-# To enable AppSummaryLogging for the RM,
-# set yarn.server.resourcemanager.appsummary.logger to
-# <LEVEL>,RMSUMMARY in hadoop-env.sh
-
-# Appender for ResourceManager Application Summary Log
-# Requires the following properties to be set
-#    - hadoop.log.dir (Hadoop Log directory)
-#    - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
-#    - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
-log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
-log4j.appender.RMSUMMARY.File=<%=scope.function_hdp_template_var("yarn_log_dir_prefix")%>/<%=scope.function_hdp_template_var("yarn_user")%>/${yarn.server.resourcemanager.appsummary.log.file}
-log4j.appender.RMSUMMARY.MaxFileSize=256MB
-log4j.appender.RMSUMMARY.MaxBackupIndex=20
-log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
-log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-<% else %>
-log4j.appender.JSA.File=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/<%=scope.function_hdp_template_var("mapred_user")%>/${hadoop.mapreduce.jobsummary.log.file}
-<%end-%>
-log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
-log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-log4j.appender.JSA.DatePattern=.yyyy-MM-dd
-log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
-<% if (scope.function_hdp_get_major_stack_version([scope.function_hdp_template_var("stack_version")]) >= 2)%>
-log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
-log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
-<% else %>
-log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
-log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
-<%end-%>
-<%end-%>
-
-<%=scope.function_hdp_template_var("rca_prefix")%>ambari.jobhistory.database=<%=scope.function_hdp_host("ambari_db_rca_url")%>
-<%=scope.function_hdp_template_var("rca_prefix")%>ambari.jobhistory.driver=<%=scope.function_hdp_host("ambari_db_rca_driver")%>
-<%=scope.function_hdp_template_var("rca_prefix")%>ambari.jobhistory.user=<%=scope.function_hdp_host("ambari_db_rca_username")%>
-<%=scope.function_hdp_template_var("rca_prefix")%>ambari.jobhistory.password=<%=scope.function_hdp_host("ambari_db_rca_password")%>
-<%=scope.function_hdp_template_var("rca_prefix")%>ambari.jobhistory.logger=DEBUG,JHA
-
-<%=scope.function_hdp_template_var("rca_prefix")%>log4j.appender.JHA=org.apache.ambari.log4j.hadoop.mapreduce.jobhistory.JobHistoryAppender
-<%=scope.function_hdp_template_var("rca_prefix")%>log4j.appender.JHA.database=${ambari.jobhistory.database}
-<%=scope.function_hdp_template_var("rca_prefix")%>log4j.appender.JHA.driver=${ambari.jobhistory.driver}
-<%=scope.function_hdp_template_var("rca_prefix")%>log4j.appender.JHA.user=${ambari.jobhistory.user}
-<%=scope.function_hdp_template_var("rca_prefix")%>log4j.appender.JHA.password=${ambari.jobhistory.password}
-
-<%=scope.function_hdp_template_var("rca_prefix")%>log4j.logger.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger=${ambari.jobhistory.logger}
-<%=scope.function_hdp_template_var("rca_prefix")%>log4j.additivity.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger=true

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/slaves.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/slaves.erb b/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/slaves.erb
deleted file mode 100644
index 3cd38b3..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/slaves.erb
+++ /dev/null
@@ -1,3 +0,0 @@
-<%h=scope.function_hdp_host("slave_hosts"); (h.kind_of?(Array) ? h : []).each do |host|-%>
-<%= host %>
-<%end-%>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/taskcontroller.cfg.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/taskcontroller.cfg.erb b/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/taskcontroller.cfg.erb
deleted file mode 100644
index 78fd75e..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/taskcontroller.cfg.erb
+++ /dev/null
@@ -1,20 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-mapred.local.dir=<%=scope.function_hdp_template_var("mapred_local_dir")%>
-mapreduce.tasktracker.group=<%=scope.function_hdp_template_var("mapred_tt_group")%>
-hadoop.log.dir=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/<%=scope.function_hdp_template_var("mapred_user")%>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hbase/files/hbaseSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hbase/files/hbaseSmoke.sh b/ambari-agent/src/main/puppet/modules/hdp-hbase/files/hbaseSmoke.sh
deleted file mode 100644
index 0003188..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hbase/files/hbaseSmoke.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-disable 'ambarismoketest'
-drop 'ambarismoketest'
-create 'ambarismoketest','family'
-put 'ambarismoketest','row01','family:col01','value1'
-scan 'ambarismoketest'
-exit

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hbase/files/hbaseSmokeVerify.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hbase/files/hbaseSmokeVerify.sh b/ambari-agent/src/main/puppet/modules/hdp-hbase/files/hbaseSmokeVerify.sh
deleted file mode 100644
index 39fe6e5..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hbase/files/hbaseSmokeVerify.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-conf_dir=$1
-data=$2
-echo "scan 'ambarismoketest'" | hbase --config $conf_dir shell > /tmp/hbase_chk_verify
-cat /tmp/hbase_chk_verify
-echo "Looking for $data"
-grep -q $data /tmp/hbase_chk_verify
-if [ "$?" -ne 0 ]
-then
-  exit 1
-fi
-
-grep -q '1 row(s)' /tmp/hbase_chk_verify
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/client.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/client.pp b/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/client.pp
deleted file mode 100644
index b0931df..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/client.pp
+++ /dev/null
@@ -1,51 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hbase::client(
-  $service_state = $hdp::params::cluster_client_state,
-  $opts = {}
-)
-{
-  include hdp-hbase::params
-  $hbase_tmp_dir = $hdp-hbase::params::hbase_tmp_dir
-
-  #assumption is there are no other hbase components on node
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['installed_and_configured','uninstalled']) {
-    if (($hdp::params::service_exists['hdp-hbase::master'] != true) and ($hdp::params::service_exists['hdp-hbase::regionserver'] != true)) {
-      #adds package, users, directories, and common configs
-      class { 'hdp-hbase': 
-        type          => 'client',
-        service_state => $service_state
-      }
-
-      hdp::directory_recursive_create_ignore_failure { "${hbase_tmp_dir}/local/jars":
-        owner => $hdp-hbase::params::hbase_user,
-        context_tag => 'hbase_client',
-        service_state => $service_state,
-        force => true
-      }
-
-      Class[ 'hdp-hbase' ] -> Hdp::Directory_recursive_create_ignore_failure<||>
-    }
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/hbase/service_check.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/hbase/service_check.pp b/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/hbase/service_check.pp
deleted file mode 100644
index 6bad593..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/hbase/service_check.pp
+++ /dev/null
@@ -1,113 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hbase::hbase::service_check() inherits hdp-hbase::params
-{
-  $smoke_test_user = $hdp::params::smokeuser
-  $security_enabled = $hdp::params::security_enabled
-  $conf_dir = $hdp::params::hbase_conf_dir
-  $smoke_user_keytab = $hdp::params::smokeuser_keytab
-  $hbase_user = $hdp-hbase::params::hbase_user
-  $hbase_keytab = $hdp::params::hbase_user_keytab
-  $serviceCheckData = hdp_unique_id_and_date()
-  $kinit_cmd = "${hdp::params::kinit_path_local} -kt ${smoke_user_keytab} ${smoke_test_user};"
-
-  anchor { 'hdp-hbase::hbase::service_check::begin':}
-
-  if (hdp_get_major_stack_version($hdp::params::stack_version) >= 2){
-    $output_file = "${hbase_hdfs_root_dir}/data/default/ambarismoketest"
-  } else {
-    $output_file = "${hbase_hdfs_root_dir}/ambarismoketest"
-  }
-
-  $test_cmd = "fs -test -e ${output_file}"
-
-  $hbase_servicecheck_file = '/tmp/hbase-smoke.sh'
-
-  file { '/tmp/hbaseSmokeVerify.sh':
-    ensure => present,
-    source => "puppet:///modules/hdp-hbase/hbaseSmokeVerify.sh",
-    mode => '0755',
-  }
-
-  file { $hbase_servicecheck_file:
-    mode => '0755',
-    content => template('hdp-hbase/hbase-smoke.sh.erb'),
-  }
-  if ($security_enabled == true) {
-    $servicecheckcmd = "su - ${smoke_test_user} -c '$kinit_cmd hbase --config $conf_dir  shell $hbase_servicecheck_file'"
-    $smokeverifycmd = "su - ${smoke_test_user} -c '$kinit_cmd /tmp/hbaseSmokeVerify.sh $conf_dir ${serviceCheckData}'"
-  } else {
-    $servicecheckcmd = "su - ${smoke_test_user} -c 'hbase --config $conf_dir  shell $hbase_servicecheck_file'"
-    $smokeverifycmd = "su - ${smoke_test_user} -c '/tmp/hbaseSmokeVerify.sh $conf_dir ${serviceCheckData}'"
-  }
-
-  exec { $hbase_servicecheck_file:
-    command   => $servicecheckcmd,
-    tries     => 3,
-    try_sleep => 5,
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    logoutput => "true"
-  }
-
-  exec { '/tmp/hbaseSmokeVerify.sh':
-    command   => $smokeverifycmd,
-    tries     => 3,
-    try_sleep => 5,
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    notify    => Hdp-hadoop::Exec-hadoop['hbase::service_check::test'],
-    logoutput => "true"
-  }
-
-  hdp-hadoop::exec-hadoop { 'hbase::service_check::test':
-    command     => $test_cmd,
-    refreshonly => true,
-    require     => Exec['/tmp/hbaseSmokeVerify.sh'],
-    before      => Anchor['hdp-hbase::hbase::service_check::end'] #TODO: remove after testing
-  }
-
-  if ($security_enabled == true) {
-    $hbase_grant_premissions_file = '/tmp/hbase_grant_permissions.sh'
-    $hbase_kinit_cmd = "${hdp::params::kinit_path_local} -kt ${hbase_keytab} ${hbase_user};"
-    $grantprivelegecmd = "$hbase_kinit_cmd hbase shell ${hbase_grant_premissions_file}"
-
-    file { $hbase_grant_premissions_file:
-      owner   => $hbase_user,
-      group   => $hdp::params::user_group,
-      mode => '0644',
-      content => template('hdp-hbase/hbase_grant_permissions.erb')
-      }
-      hdp-hadoop::exec-hadoop { '${smokeuser}_grant_privileges' :
-        command => $grantprivelegecmd,
-        require => File[$hbase_grant_premissions_file],
-        user => $hbase_user
-      }
-     Anchor['hdp-hbase::hbase::service_check::begin'] ->  File['/tmp/hbaseSmokeVerify.sh']
-       File[$hbase_servicecheck_file] ->  File[$hbase_grant_premissions_file] ->
-       Hdp-hadoop::Exec-hadoop['${smokeuser}_grant_privileges'] ->
-       Exec[$hbase_servicecheck_file] ->
-       Exec['/tmp/hbaseSmokeVerify.sh'] -> Anchor['hdp-hbase::hbase::service_check::end']
-  } else {
-    Anchor['hdp-hbase::hbase::service_check::begin'] ->  File['/tmp/hbaseSmokeVerify.sh']
-    File[$hbase_servicecheck_file] -> Exec[$hbase_servicecheck_file] -> Exec['/tmp/hbaseSmokeVerify.sh']
-    -> Anchor['hdp-hbase::hbase::service_check::end']
-  }
-  anchor{ 'hdp-hbase::hbase::service_check::end':}
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/init.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/init.pp b/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/init.pp
deleted file mode 100644
index 384fff5..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/init.pp
+++ /dev/null
@@ -1,155 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hbase(
-  $type,
-  $service_state) 
-{
-  include hdp-hbase::params
- 
-  $hbase_user = $hdp-hbase::params::hbase_user
-  $config_dir = $hdp-hbase::params::conf_dir
-  
-  $hdp::params::component_exists['hdp-hbase'] = true
-  $smokeuser = $hdp::params::smokeuser
-  $security_enabled = $hdp::params::security_enabled
-
-  #Configs generation  
-
-  if has_key($configuration, 'hbase-site') {
-    configgenerator::configfile{'hbase-site': 
-      modulespath => $hdp-hbase::params::conf_dir,
-      filename => 'hbase-site.xml',
-      module => 'hdp-hbase',
-      configuration => $configuration['hbase-site'],
-      owner => $hbase_user,
-      group => $hdp::params::user_group
-    }
-  } else { # Manually overriding ownership of file installed by hadoop package
-    file { "${hdp-hbase::params::conf_dir}/hbase-site.xml":
-      owner => $hbase_user,
-      group => $hdp::params::user_group
-    }
-  }
-
-  if has_key($configuration, 'hdfs-site') {
-    configgenerator::configfile{'hdfs-site':
-      modulespath => $hdp-hbase::params::conf_dir,
-      filename => 'hdfs-site.xml',
-      module => 'hdp-hbase',
-      configuration => $configuration['hdfs-site'],
-      owner => $hbase_user,
-      group => $hdp::params::user_group
-    }
-  } else { # Manually overriding ownership of file installed by hadoop package
-    file { "${hdp-hbase::params::conf_dir}/hdfs-site.xml":
-      owner => $hbase_user,
-      group => $hdp::params::user_group
-    }
-  }
-
-  if has_key($configuration, 'hbase-policy') {
-    configgenerator::configfile{'hbase-policy': 
-      modulespath => $hdp-hbase::params::conf_dir,
-      filename => 'hbase-policy.xml',
-      module => 'hdp-hbase',
-      configuration => $configuration['hbase-policy'],
-      owner => $hbase_user,
-      group => $hdp::params::user_group
-    }
-  } else { # Manually overriding ownership of file installed by hadoop package
-    file { "${hdp-hbase::params::conf_dir}/hbase-policy.xml":
-      owner => $hbase_user,
-      group => $hdp::params::user_group
-    }
-  }
-
-  anchor{'hdp-hbase::begin':}
-  anchor{'hdp-hbase::end':}
-
-  if ($service_state == 'uninstalled') {
-    hdp::package { 'hbase':
-      ensure => 'uninstalled'
-    }
-    hdp::directory { $config_dir:
-      service_state => $service_state,
-      force => true
-    }
-
-    Anchor['hdp-hbase::begin'] -> Hdp::Package['hbase'] -> Hdp::Directory[$config_dir] -> Anchor['hdp-hbase::end']
-
-  } else {  
-    hdp::package { 'hbase': }
-  
-    hdp::directory { $config_dir: 
-      service_state => $service_state,
-      force => true,
-      owner => $hbase_user,
-      group => $hdp::params::user_group,
-      override_owner => true
-    }
-
-   hdp-hbase::configfile { ['hbase-env.sh',  $hdp-hbase::params::metric-prop-file-name ]: 
-      type => $type
-    }
-
-    hdp-hbase::configfile { 'regionservers':}
-
-    if ($security_enabled == true) {
-      if ($type == 'master' and $service_state == 'running') {
-        hdp-hbase::configfile { 'hbase_master_jaas.conf' : }
-      } elsif ($type == 'regionserver' and $service_state == 'running') {
-        hdp-hbase::configfile { 'hbase_regionserver_jaas.conf' : }
-      } elsif ($type == 'client') {
-        hdp-hbase::configfile { 'hbase_client_jaas.conf' : }
-      }
-    }
-    Anchor['hdp-hbase::begin'] -> Hdp::Package['hbase'] -> Hdp::Directory[$config_dir] ->
-    Hdp-hbase::Configfile<||> ->  Anchor['hdp-hbase::end']
-  }
-}
-
-### config files
-define hdp-hbase::configfile(
-  $mode = undef,
-  $hbase_master_hosts = undef,
-  $template_tag = undef,
-  $type = undef,
-  $conf_dir = $hdp-hbase::params::conf_dir
-) 
-{
-  if ($name == $hdp-hbase::params::metric-prop-file-name) {
-    if ($type == 'master') {
-      $tag = GANGLIA-MASTER
-    } else {
-      $tag = GANGLIA-RS
-    }
-  } else {
-    $tag = $template_tag
-  }
-
-  hdp::configfile { "${conf_dir}/${name}":
-    component         => 'hbase',
-    owner             => $hdp-hbase::params::hbase_user,
-    mode              => $mode,
-    hbase_master_hosts => $hbase_master_hosts,
-    template_tag      => $tag
-  }
-}


[06/15] AMBARI-5747 Remove facter-1.6.10, Ruby, Puppet dependencies from pom.xml and src (dsen)

Posted by ds...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/service.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/service.pp b/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/service.pp
deleted file mode 100644
index 2d4711b..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/service.pp
+++ /dev/null
@@ -1,223 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-oozie::service(
-  $ensure,
-  $setup,
-  $initial_wait = undef
-)
-{
-  include $hdp-oozie::params
-  
-  $user = "$hdp-oozie::params::oozie_user"
-  $hadoop_home = $hdp-oozie::params::hadoop_prefix
-  $oozie_tmp = $hdp-oozie::params::oozie_tmp_dir
-  $oozie_hdfs_user_dir = $hdp::params::oozie_hdfs_user_dir
-  $cmd = "env HADOOP_HOME=${hadoop_home} /usr/sbin/oozie_server.sh"
-  $pid_file = "${hdp-oozie::params::oozie_pid_dir}/oozie.pid" 
-  $jar_location = $hdp::params::hadoop_jar_location
-  if (hdp_get_major_stack_version($hdp::params::stack_version) >= 2) {
-    $ext_js_path = "/usr/share/HDP-oozie/ext-2.2.zip"
-  } else {
-    $ext_js_path = "/usr/share/HDP-oozie/ext.zip"
-  }
-  $oozie_libext_dir = "/usr/lib/oozie/libext"
-
-  $lzo_enabled = $hdp::params::lzo_enabled
-
-  $security = $hdp::params::security_enabled
-  $oozie_keytab = $hdp-oozie::params::oozie_service_keytab
-  $oozie_principal = $configuration['oozie-site']['oozie.service.HadoopAccessorService.kerberos.principal']
-  
-  $oracle_driver_jar_name = "ojdbc6.jar"
-  $java_share_dir = "/usr/share/java"
-  
-  $artifact_dir = $hdp::params::artifact_dir
-  $driver_location = $hdp::params::jdk_location
-  $driver_curl_target = "${java_share_dir}/${oracle_driver_jar_name}"
-  $curl_cmd = "curl -kf --retry 10 ${driver_location}${oracle_driver_jar_name} -o ${driver_curl_target}"
-  
-  $jdbc_driver_name = $hdp::params::oozie_jdbc_driver
-  if ($jdbc_driver_name == "com.mysql.jdbc.Driver"){
-    $jdbc_driver_jar = "/usr/share/java/mysql-connector-java.jar"
-  } elsif($jdbc_driver_name == "oracle.jdbc.driver.OracleDriver") {
-      $jdbc_driver_jar = "/usr/share/java/ojdbc6.jar"
-  }
-  
-  file { '/tmp/wrap_ooziedb.sh':
-    ensure => present,
-    source => "puppet:///modules/hdp-oozie/wrap_ooziedb.sh",
-    mode => '0755'
-  }
-  
-
-  if ($security == true) {
-    $kinit_if_needed = "${hdp::params::kinit_path_local} -kt ${oozie_keytab} ${oozie_principal}"
-  } else {
-    $kinit_if_needed = "echo 0"
-  }
-  
-  if ($lzo_enabled == true) {
-    $lzo_jar_suffix = "/usr/lib/hadoop/lib/hadoop-lzo-0.5.0.jar"
-  } else {
-    $lzo_jar_suffix = undef
-  }
-
-  if (($lzo_enabled == true) or ($jdbc_driver_name != undef)){
-    $jar_option = "-jars"         
-  } else {
-    $jar_option = ""
-  }
-
-  if (($lzo_enabled != undef) and ($jdbc_driver_name != undef)){
-    $jar_path = "${lzo_jar_suffix}:${jdbc_driver_jar}"        
-  } else {
-    $jar_path = "${lzo_jar_suffix}${jdbc_driver_jar}"
-  }
-
-       
-  $cmd1 = "cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz"
-  $cmd2 =  "cd /usr/lib/oozie && mkdir -p ${oozie_tmp}"
-  if (hdp_get_major_stack_version($hdp::params::stack_version) >= 2) {
-    $cmd3 = $jdbc_driver_name ? {
-        /(com.mysql.jdbc.Driver|oracle.jdbc.driver.OracleDriver)/ => "cd /usr/lib/oozie && chown ${user}:${hdp::params::user_group} ${oozie_tmp} && mkdir -p ${oozie_libext_dir} && cp ${$ext_js_path} ${oozie_libext_dir} && cp ${$jdbc_driver_jar} ${oozie_libext_dir}",
-        default            => "cd /usr/lib/oozie && chown ${user}:${hdp::params::user_group} ${oozie_tmp} && mkdir -p ${oozie_libext_dir} && cp ${$ext_js_path} ${oozie_libext_dir}",
-    }
-  } else {
-    $cmd3 = $jdbc_driver_name ? {
-        /(com.mysql.jdbc.Driver|oracle.jdbc.driver.OracleDriver)/ => "cd /usr/lib/oozie && chown ${user}:${hdp::params::user_group} ${oozie_tmp} && mkdir -p ${oozie_libext_dir} && cp ${$jdbc_driver_jar} ${oozie_libext_dir}",
-        default            => "cd /usr/lib/oozie && chown ${user}:${hdp::params::user_group} ${oozie_tmp}",
-    }
-  }
-     
-  if (hdp_get_major_stack_version($hdp::params::stack_version) >= 2) {
-    $cmd4 = $jdbc_driver_name ? {
-        /(com.mysql.jdbc.Driver|oracle.jdbc.driver.OracleDriver)/ => "cd ${oozie_tmp} && /usr/lib/oozie/bin/oozie-setup.sh prepare-war",
-        default            => "cd ${oozie_tmp} && /usr/lib/oozie/bin/oozie-setup.sh prepare-war",
-    }
-  } else {
-    $cmd4 = $jdbc_driver_name ? {
-        /(com.mysql.jdbc.Driver|oracle.jdbc.driver.OracleDriver)/ => "cd ${oozie_tmp} && /usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 $jar_location -extjs $ext_js_path $jar_option $jar_path",
-        default            => "cd ${oozie_tmp} && /usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 $jar_location -extjs $ext_js_path $jar_option $jar_path",
-    }
-  }
-  $cmd5 =  "cd ${oozie_tmp} && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run ; echo 0"
-  $cmd6 =  "su - ${user} -c '${kinit_if_needed}; hadoop dfs -put /usr/lib/oozie/share ${oozie_hdfs_user_dir} ; hadoop dfs -chmod -R 755 ${oozie_hdfs_user_dir}/share'"
-
-  if ($ensure == 'installed_and_configured') {
-    $sh_cmds = [$cmd1, $cmd2, $cmd3]
-    $user_cmds_on_install = [$cmd4]
-  } elsif ($ensure == 'running') {
-    $user_cmds_on_run = [$cmd5]   
-    $start_cmd = "su - ${user} -c  'cd ${oozie_tmp} && /usr/lib/oozie/bin/oozie-start.sh'"
-    $no_op_test = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
-    if ($jdbc_driver_name == "com.mysql.jdbc.Driver" or $jdbc_driver_name == "oracle.jdbc.driver.OracleDriver") {
-      $db_connection_check_command = "${hdp::params::java64_home}/bin/java -cp ${hdp::params::check_db_connection_jar}:${jdbc_driver_jar} org.apache.ambari.server.DBConnectionVerification ${hdp-oozie::params::oozie_jdbc_connection_url} ${hdp-oozie::params::oozie_metastore_user_name} ${hdp-oozie::params::oozie_metastore_user_passwd} ${jdbc_driver_name}"
-    } else {
-      $db_connection_check_command = undef
-    }
-  } elsif ($ensure == 'stopped') {
-    $stop_cmd  = "su - ${user} -c  'cd ${oozie_tmp} && /usr/lib/oozie/bin/oozie-stop.sh' && rm -f ${pid_file}"
-    $no_op_test = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
-  } else {
-    $daemon_cmd = undef
-  }
-
-  hdp-oozie::service::directory { $hdp-oozie::params::oozie_pid_dir : }
-  hdp-oozie::service::directory { $hdp-oozie::params::oozie_log_dir : }
-  hdp-oozie::service::directory { $hdp-oozie::params::oozie_tmp_dir : }
-  hdp-oozie::service::directory { $hdp-oozie::params::oozie_data_dir : }
-  hdp-oozie::service::directory { $hdp-oozie::params::oozie_lib_dir : }
-  hdp-oozie::service::directory { $hdp-oozie::params::oozie_webapps_dir : }
-
-  anchor{'hdp-oozie::service::begin':} -> Hdp-oozie::Service::Directory<||> -> anchor{'hdp-oozie::service::end':}
-  
-  if ($ensure == 'installed_and_configured') {
-    hdp-oozie::service::exec_sh{$sh_cmds:}
-    hdp-oozie::service::exec_user{$user_cmds_on_install:}
-    Hdp-oozie::Service::Directory<||> -> Hdp-oozie::Service::Exec_sh[$cmd1] -> Hdp-oozie::Service::Exec_sh[$cmd2] ->Hdp-oozie::Service::Exec_sh[$cmd3] -> Hdp-oozie::Service::Exec_user[$cmd4] -> Anchor['hdp-oozie::service::end']
-  } elsif ($ensure == 'running') {
-    hdp-oozie::service::exec_user{$user_cmds_on_run:}
-    hdp::exec { "exec $cmd6" :
-      command => $cmd6,
-      unless => "${kinit_if_needed}; hadoop dfs -ls /user/oozie/share | awk 'BEGIN {count=0;} /share/ {count++} END {if (count > 0) {exit 0} else {exit 1}}'"
-    }
-    hdp::exec { "exec $start_cmd":
-      command => $start_cmd,
-      unless  => $no_op_test,
-      initial_wait => $initial_wait,
-      require => Exec["exec $cmd6"]
-    }
-
-    if ($db_connection_check_command != undef) {
-      hdp::exec { "DB connection check $db_connection_check_command" :
-        command => $db_connection_check_command,
-        path    => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
-      }
-
-      Hdp-oozie::Service::Directory<||> -> Hdp::Exec["DB connection check $db_connection_check_command"] -> Hdp-oozie::Service::Exec_user[$cmd5] -> Hdp::Exec["exec $cmd6"] -> Hdp::Exec["exec $start_cmd"] -> Anchor['hdp-oozie::service::end']
-    } else {
-      Hdp-oozie::Service::Directory<||> -> Hdp-oozie::Service::Exec_user[$cmd5] -> Hdp::Exec["exec $cmd6"] -> Hdp::Exec["exec $start_cmd"] -> Anchor['hdp-oozie::service::end']
-    }
-  } elsif ($ensure == 'stopped') {
-    hdp::exec { "exec $stop_cmd":
-      command => $stop_cmd,
-      onlyif  => $no_op_test,
-      initial_wait => $initial_wait
-   }
-  }
-}
-
-define hdp-oozie::service::directory()
-{
-  hdp::directory_recursive_create { $name: 
-    owner => $hdp-oozie::params::oozie_user,
-    mode => '0755',
-    service_state => $hdp-oozie::service::ensure,
-    force => true
-  }
-}
-define hdp-oozie::service::createsymlinks()
-{
-  hdp::exec { '/usr/lib/oozie/oozie-server/lib/mapred-site.xml':
-    command => "ln -sf /etc/hadoop/conf/mapred-site.xml /usr/lib/oozie/oozie-server/lib/mapred-site.xml",
-    unless => "test -e /usr/lib/oozie/oozie-server/lib/mapred-site.xml"
-  }
-}
-
-define hdp-oozie::service::exec_sh()
-{
-  $no_op_test = "ls ${hdp-oozie::service::pid_file} >/dev/null 2>&1 && ps `cat ${hdp-oozie::service::pid_file}` >/dev/null 2>&1"
-  hdp::exec { "exec $name":
-    command => "/bin/sh -c '$name'",
-    unless  => $no_op_test,
-    initial_wait => $hdp-oozie::service::initial_wait
-  }
-}
-
-define hdp-oozie::service::exec_user()
-{
-  $no_op_test = "ls ${hdp-oozie::service::pid_file} >/dev/null 2>&1 && ps `cat ${hdp-oozie::service::pid_file}` >/dev/null 2>&1"
-  hdp::exec { "exec $name":
-    command => "su - ${hdp-oozie::service::user} -c '$name'",
-    unless  => $no_op_test,
-    initial_wait => $hdp-oozie::service::initial_wait
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-oozie/templates/oozie-env.sh.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-oozie/templates/oozie-env.sh.erb b/ambari-agent/src/main/puppet/modules/hdp-oozie/templates/oozie-env.sh.erb
deleted file mode 100644
index bb2ba72..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-oozie/templates/oozie-env.sh.erb
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/bin/bash
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-# 
-#      http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#Set JAVA HOME
-export JAVA_HOME=<%=scope.function_hdp_template_var("::hdp::params::java64_home")%>
-
-# Set Oozie specific environment variables here.
-
-# Settings for the Embedded Tomcat that runs Oozie
-# Java System properties for Oozie should be specified in this variable
-#
-# export CATALINA_OPTS=
-
-# Oozie configuration file to load from Oozie configuration directory
-#
-# export OOZIE_CONFIG_FILE=oozie-site.xml
-
-# Oozie logs directory
-#
-export OOZIE_LOG=<%=scope.function_hdp_template_var("oozie_log_dir")%>/
-
-# Oozie pid directory
-#
-export CATALINA_PID=<%=scope.function_hdp_template_var("oozie_pid_file")%>
-
-#Location of the data for oozie
-export OOZIE_DATA=<%=scope.function_hdp_template_var("oozie_data_dir")%>/
-
-# Oozie Log4J configuration file to load from Oozie configuration directory
-#
-# export OOZIE_LOG4J_FILE=oozie-log4j.properties
-
-# Reload interval of the Log4J configuration file, in seconds
-#
-# export OOZIE_LOG4J_RELOAD=10
-
-# The port Oozie server runs
-#
-# export OOZIE_HTTP_PORT=11000
-
-# The host name Oozie server runs on
-#
-# export OOZIE_HTTP_HOSTNAME=`hostname -f`
-
-# The base URL for callback URLs to Oozie
-#
-# export OOZIE_BASE_URL="http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie"
-export JAVA_LIBRARY_PATH=/usr/lib/hadoop/lib/native/Linux-amd64-64

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-oozie/templates/oozie-log4j.properties.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-oozie/templates/oozie-log4j.properties.erb b/ambari-agent/src/main/puppet/modules/hdp-oozie/templates/oozie-log4j.properties.erb
deleted file mode 100644
index e4a2662..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-oozie/templates/oozie-log4j.properties.erb
+++ /dev/null
@@ -1,74 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License. See accompanying LICENSE file.
-#
-
-# If the Java System property 'oozie.log.dir' is not defined at Oozie start up time
-# XLogService sets its value to '${oozie.home}/logs'
-
-log4j.appender.oozie=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozie.DatePattern='.'yyyy-MM-dd-HH
-log4j.appender.oozie.File=${oozie.log.dir}/oozie.log
-log4j.appender.oozie.Append=true
-log4j.appender.oozie.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozie.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.appender.oozieops=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozieops.DatePattern='.'yyyy-MM-dd
-log4j.appender.oozieops.File=${oozie.log.dir}/oozie-ops.log
-log4j.appender.oozieops.Append=true
-log4j.appender.oozieops.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozieops.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.appender.oozieinstrumentation=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozieinstrumentation.DatePattern='.'yyyy-MM-dd
-log4j.appender.oozieinstrumentation.File=${oozie.log.dir}/oozie-instrumentation.log
-log4j.appender.oozieinstrumentation.Append=true
-log4j.appender.oozieinstrumentation.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozieinstrumentation.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.appender.oozieaudit=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozieaudit.DatePattern='.'yyyy-MM-dd
-log4j.appender.oozieaudit.File=${oozie.log.dir}/oozie-audit.log
-log4j.appender.oozieaudit.Append=true
-log4j.appender.oozieaudit.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozieaudit.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.appender.openjpa=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.openjpa.DatePattern='.'yyyy-MM-dd
-log4j.appender.openjpa.File=${oozie.log.dir}/oozie-jpa.log
-log4j.appender.openjpa.Append=true
-log4j.appender.openjpa.layout=org.apache.log4j.PatternLayout
-log4j.appender.openjpa.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.logger.openjpa=INFO, openjpa
-log4j.logger.oozieops=INFO, oozieops
-log4j.logger.oozieinstrumentation=ALL, oozieinstrumentation
-log4j.logger.oozieaudit=ALL, oozieaudit
-log4j.logger.org.apache.oozie=INFO, oozie
-log4j.logger.org.apache.hadoop=WARN, oozie
-log4j.logger.org.mortbay=WARN, oozie
-log4j.logger.org.hsqldb=WARN, oozie
-log4j.logger.org.apache.hadoop.security.authentication.server=INFO, oozie

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-pig/files/pigSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-pig/files/pigSmoke.sh b/ambari-agent/src/main/puppet/modules/hdp-pig/files/pigSmoke.sh
deleted file mode 100644
index a22456e..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-pig/files/pigSmoke.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-/*Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License */
-
-A = load 'passwd' using PigStorage(':');
-B = foreach A generate \$0 as id;
-store B into 'pigsmoke.out';

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-pig/manifests/init.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-pig/manifests/init.pp b/ambari-agent/src/main/puppet/modules/hdp-pig/manifests/init.pp
deleted file mode 100644
index 902d967..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-pig/manifests/init.pp
+++ /dev/null
@@ -1,76 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-pig(
-  $service_state = $hdp::params::cluster_client_state
-) inherits hdp-pig::params
-{  
-  $pig_config_dir = $hdp-pig::params::pig_conf_dir
- 
-  if ($hdp::params::use_32_bits_on_slaves == false) {
-    $size = 64
-  } else {
-    $size = 32
-  }
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state == 'uninstalled') {
-    hdp::package { 'pig' :
-      ensure => 'uninstalled',
-      size   => $size
-    }
-    hdp::directory_recursive_create { $pig_config_dir:
-      service_state => $service_state,
-      force => true
-    }
-   anchor { 'hdp-pig::begin': } -> Hdp::Package['pig'] -> Hdp::Directory_recursive_create[$pig_conf_dir] -> anchor { 'hdp-pig::end': }
-
-  } elsif ($service_state == 'installed_and_configured') {
-    hdp::package { 'pig' : 
-      size => $size
-    }
-
-    hdp::directory { $pig_config_dir:
-      service_state => $service_state,
-      force => true,
-      owner => $hdp::params::hdfs_user,
-      group => $hdp::params::user_group,
-      override_owner => true
-    }
-
-    hdp-pig::configfile { ['pig-env.sh','pig.properties','log4j.properties']:}
-  
-    anchor { 'hdp-pig::begin': } -> Hdp::Package['pig'] -> Hdp::Directory[$pig_conf_dir] -> Hdp-pig::Configfile<||> -> anchor { 'hdp-pig::end': }
- } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
-
-### config files
-define hdp-pig::configfile()
-{
-  hdp::configfile { "${hdp::params::pig_conf_dir}/${name}":
-    component => 'pig',
-    owner => $hdp::params::hdfs_user
-  }
-}
-
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-pig/manifests/params.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-pig/manifests/params.pp b/ambari-agent/src/main/puppet/modules/hdp-pig/manifests/params.pp
deleted file mode 100644
index cd94408..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-pig/manifests/params.pp
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-pig::params() inherits hdp::params
-{
-  $pig_conf_dir = $hdp::params::pig_conf_dir
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-pig/manifests/pig/service_check.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-pig/manifests/pig/service_check.pp b/ambari-agent/src/main/puppet/modules/hdp-pig/manifests/pig/service_check.pp
deleted file mode 100644
index a56739a..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-pig/manifests/pig/service_check.pp
+++ /dev/null
@@ -1,70 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-pig::pig::service_check() 
-{
-  $smoke_test_user = $hdp::params::smokeuser
-  $input_file = 'passwd'
-  $output_file = "pigsmoke.out"
-
-  $cleanup_cmd = "dfs -rmr ${output_file} ${input_file}"
-  #cleanup put below to handle retries; if retrying there wil be a stale file that needs cleanup; exit code is fn of second command
-  $create_file_cmd = "${cleanup_cmd}; hadoop dfs -put /etc/passwd ${input_file} " #TODO: inconsistent that second command needs hadoop
-  $test_cmd = "fs -test -e ${output_file}" 
-  
-  anchor { 'hdp-pig::pig::service_check::begin':}
-
-
-  hdp-hadoop::exec-hadoop { 'pig::service_check::create_file':
-    command   => $create_file_cmd,
-    tries     => 3,
-    try_sleep => 5,
-    require   => Anchor['hdp-pig::pig::service_check::begin'],
-    notify    => File['/tmp/pigSmoke.sh'],
-    user      => $smoke_test_user
-  }
-
-  file { '/tmp/pigSmoke.sh':
-    ensure => present,
-    source => "puppet:///modules/hdp-pig/pigSmoke.sh",
-    mode => '0755',
-    require     => Hdp-hadoop::Exec-hadoop['pig::service_check::create_file']
-  }
-
-  exec { '/tmp/pigSmoke.sh':
-    command   => "su - ${smoke_test_user} -c 'pig /tmp/pigSmoke.sh'",
-    tries     => 3,
-    try_sleep => 5,
-    require   => File['/tmp/pigSmoke.sh'],
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    notify    => Hdp-hadoop::Exec-hadoop['pig::service_check::test'],
-    logoutput => "true"
-  }
-
-  hdp-hadoop::exec-hadoop { 'pig::service_check::test':
-    command     => $test_cmd,
-    refreshonly => true,
-    require     => Exec['/tmp/pigSmoke.sh'],
-    before      => Anchor['hdp-pig::pig::service_check::end'], #TODO: remove after testing
-    user      => $smoke_test_user
-  }
-  
-  anchor{ 'hdp-pig::pig::service_check::end':}
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-pig/templates/log4j.properties.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-pig/templates/log4j.properties.erb b/ambari-agent/src/main/puppet/modules/hdp-pig/templates/log4j.properties.erb
deleted file mode 100644
index 9ef6e2c..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-pig/templates/log4j.properties.erb
+++ /dev/null
@@ -1,30 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-# ***** Set root logger level to DEBUG and its only appender to A.
-log4j.logger.org.apache.pig=info, A
-
-# ***** A is set to be a ConsoleAppender.
-log4j.appender.A=org.apache.log4j.ConsoleAppender
-# ***** A uses PatternLayout.
-log4j.appender.A.layout=org.apache.log4j.PatternLayout
-log4j.appender.A.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-pig/templates/pig-env.sh.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-pig/templates/pig-env.sh.erb b/ambari-agent/src/main/puppet/modules/hdp-pig/templates/pig-env.sh.erb
deleted file mode 100644
index a69f817..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-pig/templates/pig-env.sh.erb
+++ /dev/null
@@ -1,17 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-JAVA_HOME=<%=scope.function_hdp_template_var("::hdp::params::java64_home")%>
-HADOOP_HOME=${HADOOP_HOME:-<%=scope.function_hdp_template_var("::hdp::params::hadoop_home")%>}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-pig/templates/pig.properties.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-pig/templates/pig.properties.erb b/ambari-agent/src/main/puppet/modules/hdp-pig/templates/pig.properties.erb
deleted file mode 100644
index 6fcb233..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-pig/templates/pig.properties.erb
+++ /dev/null
@@ -1,55 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-# Pig configuration file. All values can be overwritten by command line arguments.
-
-# log4jconf log4j configuration file
-# log4jconf=./conf/log4j.properties
-
-# a file that contains pig script
-#file=
-
-# load jarfile, colon separated
-#jar=
-
-#verbose print all log messages to screen (default to print only INFO and above to screen)
-#verbose=true
-
-#exectype local|mapreduce, mapreduce is default
-#exectype=local
-
-#pig.logfile=
-
-#Do not spill temp files smaller than this size (bytes)
-#pig.spill.size.threshold=5000000
-#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
-#This should help reduce the number of files being spilled.
-#pig.spill.gc.activation.size=40000000
-
-#the following two parameters are to help estimate the reducer number
-#pig.exec.reducers.bytes.per.reducer=1000000000
-#pig.exec.reducers.max=999
-
-#Use this option only when your Pig job will otherwise die because of
-#using more counter than hadoop configured limit
-#pig.disable.counter=true
-hcat.bin=/usr/bin/hcat

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-repos/manifests/init.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-repos/manifests/init.pp b/ambari-agent/src/main/puppet/modules/hdp-repos/manifests/init.pp
deleted file mode 100644
index e005cab..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-repos/manifests/init.pp
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-repos() {}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-repos/manifests/process_repo.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-repos/manifests/process_repo.pp b/ambari-agent/src/main/puppet/modules/hdp-repos/manifests/process_repo.pp
deleted file mode 100644
index 31ab192..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-repos/manifests/process_repo.pp
+++ /dev/null
@@ -1,42 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-repos::process_repo(
-  $os_type,
-  $repo_id,
-  $base_url,
-  $mirror_list,
-  $repo_name
-) inherits hdp-hadoop::params
-{
-  debug("Getting repo path for os: $hdp_os_type")
-
-  $repo_path = $repos_paths[$hdp_os_type]
-
-  if hdp_is_empty($repo_path) {
-    hdp_fail("There is no repo path for os: $hdp_os_type in hdp::params")
-  }
-
-  file{$repo_name:
-    path => "$repo_path/$repo_name.repo",
-    ensure => file,
-    content => template("hdp-repos/repo.erb")
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-repos/templates/repo.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-repos/templates/repo.erb b/ambari-agent/src/main/puppet/modules/hdp-repos/templates/repo.erb
deleted file mode 100644
index a5edc55..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-repos/templates/repo.erb
+++ /dev/null
@@ -1,27 +0,0 @@
-#
-# 
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-[<%=repo_id%>]
-name=<%=repo_name %>
-<%if scope.function_hdp_is_empty(base_url)%>mirrorlist=<%=mirror_list %><% else %>baseurl=<%=base_url %><% end %>
-path=/
-enabled=1
-gpgcheck=0

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-sqoop/manifests/init.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-sqoop/manifests/init.pp b/ambari-agent/src/main/puppet/modules/hdp-sqoop/manifests/init.pp
deleted file mode 100644
index cd5efe1..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-sqoop/manifests/init.pp
+++ /dev/null
@@ -1,104 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-sqoop(
-  $service_state = $hdp::params::cluster_client_state
-) inherits hdp-sqoop::params
-{
-  if ($hdp::params::use_32_bits_on_slaves == false) {
-    $size = 64
-  } else {
-    $size = 32
-  }
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state == 'uninstalled') {
-    hdp::package { 'sqoop' :
-      ensure => 'uninstalled',
-      size   => $size
-    }
-  } elsif ($service_state == 'installed_and_configured') {
-
-    hdp::package { 'sqoop' :
-      size => $size
-    }
-    class { 'hdp-sqoop::mysql-connector': }
-    if ($package_type == 'hdp') {
-      hdp-sqoop::createsymlinks { ['/usr/lib/sqoop/conf']:}
-    }
-
-    hdp::directory { $conf_dir:
-      service_state => $service_state,
-      force => true,
-      owner => $sqoop_user,
-      group => $hdp::params::user_group,
-      override_owner => true
-    }
-
-    hdp-sqoop::configfile { ['sqoop-env.sh']:}
-
-    hdp-sqoop::ownership { 'ownership': }
-
-    anchor { 'hdp-sqoop::begin': } -> Hdp::Package['sqoop'] -> Class['hdp-sqoop::mysql-connector'] -> Hdp::Directory[$conf_dir] -> Hdp-sqoop::Configfile<||> -> Hdp-sqoop::Ownership['ownership'] -> anchor { 'hdp-sqoop::end': }
- } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
-
-
-define hdp-sqoop::createsymlinks()
-{
-  file { '/usr/lib/sqoop/conf' :
-    #ensure => directory,
-    ensure => link,
-    target => "/etc/sqoop"
-  }
-
-  file { '/etc/default/hadoop' :
-    ensure => link,
-    target => "/usr/bin/hadoop"
-  }
-}
-
-### config files
-define hdp-sqoop::configfile()
-{
-  hdp::configfile { "${hdp::params::sqoop_conf_dir}/${name}":
-    component => 'sqoop',
-    owner     => $hdp::params::sqoop_user
-  }
-}
-
-define hdp-sqoop::ownership {
-  file { "${hdp::params::sqoop_conf_dir}/sqoop-env-template.sh":
-    owner => $hdp::params::sqoop_user,
-    group => $hdp::params::user_group
-  }
-
-  file { "${hdp::params::sqoop_conf_dir}/sqoop-site-template.xml":
-    owner => $hdp::params::sqoop_user,
-    group => $hdp::params::user_group
-  }
-
-  file { "${hdp::params::sqoop_conf_dir}/sqoop-site.xml":
-    owner => $hdp::params::sqoop_user,
-    group => $hdp::params::user_group
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-sqoop/manifests/mysql-connector.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-sqoop/manifests/mysql-connector.pp b/ambari-agent/src/main/puppet/modules/hdp-sqoop/manifests/mysql-connector.pp
deleted file mode 100644
index 12a3971..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-sqoop/manifests/mysql-connector.pp
+++ /dev/null
@@ -1,44 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-sqoop::mysql-connector()
-{
-  include hdp-sqoop::params
-  include hdp-hive::params
-
-  $target = "${hdp::params::artifact_dir}/${zip_name}"
-  $sqoop_lib = $hdp-sqoop::params::sqoop_lib
-
-  anchor { 'hdp-sqoop::mysql-connector::begin':}
-
-  hdp::package { 'mysql-connector-java' :
-    require   => Anchor['hdp-sqoop::mysql-connector::begin']
-  }
-
-   file { "${sqoop_lib}/mysql-connector-java.jar" :
-       ensure => link,
-       target => "/usr/share/java/mysql-connector-java.jar",
-       require => Hdp::Package['mysql-connector-java'],
-       notify  =>  Anchor['hdp-sqoop::mysql-connector::end'],
-   }
-
-   anchor { 'hdp-sqoop::mysql-connector::end':}
-  
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-sqoop/manifests/params.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-sqoop/manifests/params.pp b/ambari-agent/src/main/puppet/modules/hdp-sqoop/manifests/params.pp
deleted file mode 100644
index 03097c1..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-sqoop/manifests/params.pp
+++ /dev/null
@@ -1,30 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-sqoop::params() inherits hdp::params
-{
-  $conf_dir = $hdp::params::sqoop_conf_dir
-
-  $hbase_home = hdp_default("hbase_home","/usr")
-  $hive_home = hdp_default("hive_home","/usr")
-  $zoo_conf_dir = $hdp::params::zk_conf_dir 
-  $sqoop_lib = hdp_default("sqoop_lib","/usr/lib/sqoop/lib/") #TODO: should I remove and just use sqoop_dbroot
-  $keytab_path = hdp_default("keytab_path","/etc/security/keytabs")
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-sqoop/manifests/sqoop/service_check.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-sqoop/manifests/sqoop/service_check.pp b/ambari-agent/src/main/puppet/modules/hdp-sqoop/manifests/sqoop/service_check.pp
deleted file mode 100644
index 71c9c8f..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-sqoop/manifests/sqoop/service_check.pp
+++ /dev/null
@@ -1,50 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-sqoop::sqoop::service_check() 
-{
-  include hdp-sqoop::params
-  $smoke_test_user = $hdp::params::smokeuser
-
-  # TODO:SUHAS Move this to hdp::params
-  $security_enabled=$hdp::params::security_enabled
-  $smoke_user_keytab = $hdp::params::smokeuser_keytab
-  if ($security_enabled == true) {
-    $smoke_user_kinitcmd="${hdp::params::kinit_path_local}  -kt ${smoke_user_keytab} ${smoke_test_user}; "
-  } else {
-    $smoke_user_kinitcmd=""
-  }
-
-  $cmd = "${smoke_user_kinitcmd}su - ${smoke_test_user} -c 'sqoop version'"
-  
-  anchor { 'hdp-sqoop::sqoop::service_check::begin':}
-
-  exec { 'sqoop_smoke':
-    command   => $cmd,
-    tries     => 3,
-    try_sleep => 5,
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    logoutput => "true",
-    require   => Anchor['hdp-sqoop::sqoop::service_check::begin'],
-    before    => Anchor['hdp-sqoop::sqoop::service_check::end']
-  }
-
-  anchor{ 'hdp-sqoop::sqoop::service_check::end':}
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-sqoop/templates/sqoop-env.sh.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-sqoop/templates/sqoop-env.sh.erb b/ambari-agent/src/main/puppet/modules/hdp-sqoop/templates/sqoop-env.sh.erb
deleted file mode 100644
index 90cbc75..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-sqoop/templates/sqoop-env.sh.erb
+++ /dev/null
@@ -1,36 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# included in all the hadoop scripts with source command
-# should not be executable directly
-# also should not be passed any arguments, since we need original $*
-
-# Set Hadoop-specific environment variables here.
-
-#Set path to where bin/hadoop is available
-#Set path to where bin/hadoop is available
-export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
-
-#set the path to where bin/hbase is available
-export HBASE_HOME=${HBASE_HOME:-/usr/lib/hbase}
-
-#Set the path to where bin/hive is available
-export HIVE_HOME=${HIVE_HOME:-/usr/lib/hive}
-
-#Set the path for where zookeper config dir is
-export ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}
-
-# add libthrift in hive to sqoop class path first so hive imports work
-export SQOOP_USER_CLASSPATH="`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}"

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-templeton/files/templetonSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-templeton/files/templetonSmoke.sh b/ambari-agent/src/main/puppet/modules/hdp-templeton/files/templetonSmoke.sh
deleted file mode 100644
index cefc4f0..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-templeton/files/templetonSmoke.sh
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-export ttonhost=$1
-export smoke_test_user=$2
-export smoke_user_keytab=$3
-export security_enabled=$4
-export kinit_path_local=$5
-export ttonurl="http://${ttonhost}:50111/templeton/v1"
-
-if [[ $security_enabled == "true" ]]; then
-  kinitcmd="${kinit_path_local}  -kt ${smoke_user_keytab} ${smoke_test_user}; "
-else
-  kinitcmd=""
-fi
-
-cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>'    $ttonurl/status 2>&1"
-retVal=`su - ${smoke_test_user} -c "$cmd"`
-httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
-
-if [[ "$httpExitCode" -ne "200" ]] ; then
-  echo "Templeton Smoke Test (status cmd): Failed. : $retVal"
-  export TEMPLETON_EXIT_CODE=1
-  exit 1
-fi
-
-exit 0
-
-#try hcat ddl command
-echo "user.name=${smoke_test_user}&exec=show databases;" /tmp/show_db.post.txt
-cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>' -d  \@${destdir}/show_db.post.txt  $ttonurl/ddl 2>&1"
-retVal=`su - ${smoke_test_user} -c "$cmd"`
-httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
-
-if [[ "$httpExitCode" -ne "200" ]] ; then
-  echo "Templeton Smoke Test (ddl cmd): Failed. : $retVal"
-  export TEMPLETON_EXIT_CODE=1
-  exit  1
-fi
-
-# NOT SURE?? SUHAS
-if [[ $security_enabled == "true" ]]; then
-  echo "Templeton Pig Smoke Tests not run in secure mode"
-  exit 0
-fi
-
-#try pig query
-outname=${smoke_test_user}.`date +"%M%d%y"`.$$;
-ttonTestOutput="/tmp/idtest.${outname}.out";
-ttonTestInput="/tmp/idtest.${outname}.in";
-ttonTestScript="idtest.${outname}.pig"
-
-echo "A = load '$ttonTestInput' using PigStorage(':');"  > /tmp/$ttonTestScript
-echo "B = foreach A generate \$0 as id; " >> /tmp/$ttonTestScript
-echo "store B into '$ttonTestOutput';" >> /tmp/$ttonTestScript
-
-#copy pig script to hdfs
-su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal /tmp/$ttonTestScript /tmp/$ttonTestScript"
-
-#copy input file to hdfs
-su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal /etc/passwd $ttonTestInput"
-
-#create, copy post args file
-echo -n "user.name=${smoke_test_user}&file=/tmp/$ttonTestScript" > /tmp/pig_post.txt
-
-#submit pig query
-cmd="curl -s -w 'http_code <%{http_code}>' -d  \@${destdir}/pig_post.txt  $ttonurl/pig 2>&1"
-retVal=`su - ${smoke_test_user} -c "$cmd"`
-httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
-if [[ "$httpExitCode" -ne "200" ]] ; then
-  echo "Templeton Smoke Test (pig cmd): Failed. : $retVal"
-  export TEMPLETON_EXIT_CODE=1
-  exit 1
-fi
-
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/client.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/client.pp b/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/client.pp
deleted file mode 100644
index 38814e8..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/client.pp
+++ /dev/null
@@ -1,40 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-templeton::client(
-  $service_state = $hdp::params::cluster_client_state,
-  $templeton_server = undef
-) inherits hdp::params
-{ 
-  if ($service_state == 'no_op') {
-   } elsif ($service_state in ['installed_and_configured','uninstalled']) {
-     if ($hdp::params::service_exists['hdp-templeton::server'] != true) {
-       #installs package, creates user, sets configuration
-       class { 'hdp-templeton' :
-         service_state => $service_state
-       }
-      if ($templeton_server != undef) {
-        Hdp-Templeton::Configfile<||>{templeton_server => $templeton_server}
-      }
-    }
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/download-hive-tar.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/download-hive-tar.pp b/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/download-hive-tar.pp
deleted file mode 100644
index cc0754c..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/download-hive-tar.pp
+++ /dev/null
@@ -1,46 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-templeton::download-hive-tar()
-{
-  include hdp-templeton::params
-
-  $src_tar_name = $hdp-templeton::params::src_hive_tar_name
-  $dest_tar_name = $hdp-templeton::params::dest_hive_tar_name
-  $target = "${hdp::params::artifact_dir}/${dest_tar_name}"
- 
-  anchor { 'hdp-templeton::download-hive-tar::begin':}         
-
-   hdp::package { 'webhcat-tar-hive' :
-     require   => Anchor['hdp-templeton::download-hive-tar::begin']                                                              
-   }
-  
-#   hdp::exec { 'hive mkdir -p ${artifact_dir} ;  cp /tmp/HDP-templeton/${src_tar_name} ${target}':
-#       command => "mkdir -p ${artifact_dir} ;  cp /tmp/HDP-templeton/${src_tar_name} ${target}",
-#       unless  => "test -f ${target}",
-#       creates => $target,
-#       path    => ["/bin","/usr/bin/"],
-#       require => Hdp::Package['webhcat-tar-hive'],
-#       notify  =>  Anchor['hdp-templeton::download-hive-tar::end'],
-#   }
-
-   anchor { 'hdp-templeton::download-hive-tar::end':}       
-
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/download-pig-tar.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/download-pig-tar.pp b/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/download-pig-tar.pp
deleted file mode 100644
index 471ed6e..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/download-pig-tar.pp
+++ /dev/null
@@ -1,46 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-templeton::download-pig-tar()
-{
-  include hdp-templeton::params
-
-  $src_tar_name = $hdp-templeton::params::src_pig_tar_name
-  $dest_tar_name = $hdp-templeton::params::dest_pig_tar_name
-  $target = "${hdp::params::artifact_dir}/${dest_tar_name}"
-
-  anchor { 'hdp-templeton::download-pig-tar::begin':}
-
-   hdp::package { 'webhcat-tar-pig' :
-     require   => Anchor['hdp-templeton::download-pig-tar::begin']
-   }
-
-#   hdp::exec { 'pig ; mkdir -p ${artifact_dir} ;  cp /tmp/HDP-templeton/${src_tar_name} ${target}':
-#       command => "mkdir -p ${artifact_dir} ;  cp /tmp/HDP-templeton/${src_tar_name} ${target}",
-#       unless  => "test -f ${target}",
-#       creates => $target,
-#       path    => ["/bin","/usr/bin/"],
-#       require => Hdp::Package['webhcat-tar-pig'],
-#       notify  =>  Anchor['hdp-templeton::download-pig-tar::end'],
-#   }
-
-   anchor { 'hdp-templeton::download-pig-tar::end':}
-
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/init.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/init.pp b/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/init.pp
deleted file mode 100644
index 2bf5990..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/init.pp
+++ /dev/null
@@ -1,103 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-templeton(
-  $service_state = undef,
-  $server = false
-)
-{
-# Configs generation  
-
-  $webhcat_user = $hdp-templeton::params::webhcat_user
-  $templeton_config_dir = $hdp-templeton::params::conf_dir
-
-  if has_key($configuration, 'webhcat-site') {
-    configgenerator::configfile{'webhcat-site': 
-      modulespath => $templeton_config_dir,
-      filename => 'webhcat-site.xml',
-      module => 'hdp-templeton',
-      configuration => $configuration['webhcat-site'],
-      owner => $webhcat_user,
-      group => $hdp::params::user_group
-    }
-  } else {
-    file { "${templeton_config_dir}/webhcat-site.xml":
-      owner => $webhcat_user,
-      group => $hdp::params::user_group
-    }
-  }
-
- include hdp-templeton::params 
- 
-  if ($hdp::params::use_32_bits_on_slaves == false) {
-    $size = 64
-  } else {
-    $size = 32
-  }
-
-  if ($service_state == 'uninstalled') {
-      hdp::package { 'webhcat' :
-      size => $size,
-      ensure => 'uninstalled'
-    }
-      hdp::directory { $templeton_config_dir:
-        service_state => $service_state,
-        force => true
-      }
-
-     anchor { 'hdp-templeton::begin': } -> Hdp::Package['webhcat'] -> Hdp::Directory[$templeton_config_dir] ->  anchor { 'hdp-templeton::end': }
-
-  } else {
-    hdp::package { 'webhcat' :
-      size => $size
-    }
-    class { hdp-templeton::download-hive-tar: }
-    class { hdp-templeton::download-pig-tar: }
-
-    hdp::directory { $templeton_config_dir: 
-      service_state => $service_state,
-      force => true,
-      owner => $webhcat_user,
-      group => $hdp::params::user_group,
-      override_owner => true
-    }
-
-    hdp-templeton::configfile { ['webhcat-env.sh']: }
-
-    anchor { 'hdp-templeton::begin': } -> Hdp::Package['webhcat'] -> Hdp::Directory[$templeton_config_dir] -> Hdp-templeton::Configfile<||> ->  anchor { 'hdp-templeton::end': }
-
-     if ($server == true ) { 
-      Hdp::Package['webhcat'] -> Class['hdp-templeton::download-hive-tar'] -> Class['hdp-templeton::download-pig-tar'] -> Anchor['hdp-templeton::end']
-     }
-  }
-}
-
-### config files
-define hdp-templeton::configfile(
-  $mode = undef
-) 
-{
-  hdp::configfile { "${hdp-templeton::params::conf_dir}/${name}":
-    component       => 'templeton',
-    owner           => $hdp-templeton::params::webhcat_user,
-    mode            => $mode
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/params.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/params.pp b/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/params.pp
deleted file mode 100644
index a40cb5c..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/params.pp
+++ /dev/null
@@ -1,60 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-templeton::params() inherits hdp::params
-{
-  $templeton_user = $hdp::params::templeton_user
-
-  ###pig and hive tar url connector
-  $download_url = $hdp::params::apache_artifacts_download_url
-
-  $dest_pig_tar_name = hdp_default("dest_pig_tar_name","pig.tar.gz")
-  $dest_hive_tar_name = hdp_default("dest_hive_tar_name","hive.tar.gz")
-  $src_pig_tar_name = hdp_default("src_pig_tar_name","pig.tar.gz")
-  $src_hive_tar_name = hdp_default("src_hive_tar_name","hive.tar.gz")
-
-  ### templeton-env
-  $conf_dir = hdp_default("conf_dir","/etc/hcatalog/conf")
-
-  ### templeton-env
-  $templeton_log_dir = hdp_default("hcat_log_dir","/var/log/webhcat")
-
-  $templeton_pid_dir = hdp_default("hcat_pid_dir","/var/run/webhcat")
-
-#  $templeton_jar_name= hdp_default("templeton_jar_name","templeton-0.1.4.14.jar")
- 
-#  $hadoop_prefix = hdp_default("hadoop_prefix","/usr")
-#  $hive_prefix = hdp_default("hive_prefix","/usr")
-  
-  ### templeton-site
-  $hadoop_conf_dir = hdp_default("webhcat-site/templeton.hadoop.conf.dir")
-  $templeton_jar = hdp_default("webhcat-site/templeton.jar","/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar")
-  $zookeeper_jar = hdp_default("webhcat-site/zookeeper.jar","/usr/lib/zookeeper/zookeeper.jar")
-  $pig_tar_gz = hdp_default("webhcat-site/pig.tar.gz","$dest_pig_tar_name")
-  $pig_tar_name_hdfs = hdp_default("webhcat-site/pig.tar.name.hdfs","pig-0.9.2.14")
-
-  $hive_tar_gz = hdp_default("webhcat-site/hive.tar.gz","$dest_hive_tar_name")
-  $hive_tar_gz_name = hdp_default("webhcat-site/hive.tar.gz.name","hive-0.9.0.14")
-  $hive_metastore_sasl_enabled = hdp_default("webhcat-site/hive.metastore.sasl.enabled",false)
-
-  $templeton_metastore_principal = hdp_default("webhcat-site/templeton.metastore.principal")
-  $keytab_path = $hdp::params::keytab_path
-  
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/server.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/server.pp b/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/server.pp
deleted file mode 100644
index 82a225b..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/server.pp
+++ /dev/null
@@ -1,134 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-templeton::server(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits  hdp-templeton::params
-{  
-
-  $templeton_user = $hdp-templeton::params::templeton_user
-  if ($service_state == 'no_op') { 
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) {
-  $hdp::params::service_exists['hdp-templeton::server'] = true
-
-  if ( ($service_state == 'installed_and_configured') and
-       ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
-     $masterHost = $kerberos_adminclient_host[0]
-     hdp::download_keytab { 'templeton_headless_keytab' :
-       masterhost => $masterHost,
-       keytabdst => "${$keytab_path}/templeton.headless.keytab",
-       keytabfile => 'templeton.headless.keytab',
-       owner => $hdp::params::templeton_user,
-       hostnameInPrincipals => 'no' 
-     }
-
-     if ( ($hdp::params::service_exists['hdp-hadoop::namenode'] != true) and
-          ($hdp::params::service_exists['hdp-hadoop::snamenode'] != true) and
-          ($hdp::params::service_exists['hdp-oozie::server'] != true) ) {
-       hdp::download_keytab { 'templeton_spnego_service_keytab' :
-         masterhost => $masterHost,
-         keytabdst => "${$keytab_path}/spnego.service.keytab",
-         keytabfile => 'spnego.service.keytab',
-         owner => $hdp::params::templeton_user,
-         group => $hdp::params::user_group,
-         mode => '0440'
-       }
-     }
-  }
-
-  class{ 'hdp-templeton' :
-    service_state => $service_state,
-    server        => true
-  }
-
-  class { 'hdp-templeton::copy-hdfs-directories' :
-    service_state => $service_state
-  }
-
-  class { 'hdp-templeton::service' :
-    ensure       => $service_state,
-  }
-  
-  #top level does not need anchors
-  Class['hdp-templeton'] -> Class['hdp-templeton::copy-hdfs-directories'] -> Class['hdp-templeton::service']
-  } else { 
-  hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
-
-class hdp-templeton::copy-hdfs-directories($service_state)
-{
- $streaming_jar =  hdp_get_dir_from_url(hdp_default("webhcat-site/templeton.streaming.jar",""), "/apps/webhcat/hadoop-streaming.jar") 
- $pig_archiv =  hdp_get_dir_from_url(hdp_default("webhcat-site/templeton.pig.archive",""), "/apps/webhcat/pig.tar.gz")
- $templeton_hive_archive =  hdp_get_dir_from_url(hdp_default("webhcat-site/templeton.hive.archive",""), "/apps/webhcat/hive.tar.gz")
- $webhcat_user = $hdp::params::webhcat_user
- $smoke_test_user = $hdp::params::smokeuser
- $smokeuser_keytab = $hdp::params::smokeuser_keytab
- if ($hdp::params::security_enabled == true) {
-     $kinit_if_needed = "${hdp::params::kinit_path_local} -kt ${smokeuser_keytab} ${smoke_test_user};"
-   } else {
-     $kinit_if_needed = "echo 0;"
-   }
-
-  anchor{ "hdp::hdp-templeton::copy-hdfs-directories::begin" : }
-  anchor{ "hdp::hdp-templeton::copy-hdfs-directories::end" : }
-
-  $kinit_cmd = "su - ${webhcat_user} -c '${kinit_if_needed}'"
-  exec { $kinit_cmd:
-    command => $kinit_cmd,
-    path => ['/bin']
-  }
-
-  if (hdp_get_major_stack_version($hdp::params::stack_version) >= 2) {
-    hdp-hadoop::hdfs::copyfromlocal { '/usr/lib/hadoop-mapreduce/hadoop-streaming*.jar':
-      service_state => $service_state,
-      owner => $webhcat_user,
-      mode  => '755',
-      dest_dir => "$streaming_jar",
-      kinit_if_needed => $kinit_if_needed
-    }
-  }
-  else {
-    hdp-hadoop::hdfs::copyfromlocal { '/usr/lib/hadoop/contrib/streaming/hadoop-streaming*.jar':
-      service_state => $service_state,
-      owner => $webhcat_user,
-      mode  => '755',
-      dest_dir => "$streaming_jar",
-      kinit_if_needed => $kinit_if_needed
-    }
-  }
-  hdp-hadoop::hdfs::copyfromlocal { '/usr/share/HDP-webhcat/pig.tar.gz' :
-    service_state => $service_state,
-    owner => $webhcat_user,
-    mode  => '755',
-    dest_dir => "$pig_archiv",
-  }
-  hdp-hadoop::hdfs::copyfromlocal { '/usr/share/HDP-webhcat/hive.tar.gz' :
-    service_state => $service_state,
-    owner => $webhcat_user,
-    mode  => '755',
-    dest_dir => "$templeton_hive_archive",
-  }
-  Anchor["hdp::hdp-templeton::copy-hdfs-directories::begin"] ->
-  Exec[$kinit_cmd] ->
-  Hdp-hadoop::Hdfs::Copyfromlocal<||>  ->
-  Anchor["hdp::hdp-templeton::copy-hdfs-directories::end"]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/service.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/service.pp b/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/service.pp
deleted file mode 100644
index ceb3db0..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/service.pp
+++ /dev/null
@@ -1,67 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-templeton::service(
-  $ensure,
-  $initial_wait = undef
-)
-{
-  include $hdp-templeton::params
-  
-  $user = "$hdp-templeton::params::webhcat_user"
-  $hadoop_home = $hdp-templeton::params::hadoop_prefix
-  $cmd = "env HADOOP_HOME=${hadoop_home} /usr/lib/hcatalog/sbin/webhcat_server.sh"
-  $pid_file = "${hdp-templeton::params::templeton_pid_dir}/webhcat.pid" 
-
-  if ($ensure == 'running') {
-    $daemon_cmd = "su - ${user} -c  '${cmd} start'"
-    $no_op_test = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
-  } elsif ($ensure == 'stopped') {
-    $daemon_cmd = "su - ${user} -c  '${cmd} stop' && rm -f ${pid_file}"
-    $no_op_test = undef
-  } else {
-    $daemon_cmd = undef
-  }
-
-  hdp-templeton::service::directory { $hdp-templeton::params::templeton_pid_dir : }
-  hdp-templeton::service::directory { $hdp-templeton::params::hcat_log_dir : }
-
-  anchor{'hdp-templeton::service::begin':} -> Hdp-templeton::Service::Directory<||> -> anchor{'hdp-templeton::service::end':}
-  
-  if ($daemon_cmd != undef) {
-    hdp::exec { $daemon_cmd:
-      command => $daemon_cmd,
-      unless  => $no_op_test,
-      initial_wait => $initial_wait
-    }
-    Hdp-templeton::Service::Directory<||> -> Hdp::Exec[$daemon_cmd] -> Anchor['hdp-templeton::service::end']
-  }
-}
-
-define hdp-templeton::service::directory()
-{
-  hdp::directory_recursive_create { $name: 
-    owner => $hdp-templeton::params::webhcat_user,
-    mode => '0755',
-    service_state => $hdp-templeton::service::ensure,
-    force => true
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/templeton/service_check.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/templeton/service_check.pp b/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/templeton/service_check.pp
deleted file mode 100644
index 8172ed6..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/templeton/service_check.pp
+++ /dev/null
@@ -1,70 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-templeton::templeton::service_check()
-{
-  include hdp-templeton::params
-  $smoke_test_user = $hdp::params::smokeuser
-  $security_enabled=$hdp::params::security_enabled
-  if ($security_enabled == true) {
-    $security = "true"
-  } else {
-    $security = "false"
-  }
-  $kinit_path = $hdp::params::kinit_path_local
-  $smoke_user_keytab = $hdp::params::smokeuser_keytab
-
-  $templeton_host = $hdp::params::webhcat_server_host
-
-  $smoke_shell_files = ['templetonSmoke.sh']
-
-  anchor { 'hdp-templeton::templeton::service_check::begin':}
-
-  hdp-templeton::smoke_shell_file { $smoke_shell_files: }
-
-  anchor{ 'hdp-templeton::templeton::service_check::end':}
-}
-
-define hdp-templeton::smoke_shell_file()
-{
-  $smoke_test_user = $hdp::params::smokeuser
-    
-  $security = $hdp-templeton::templeton::service_check::security
-
-  $kinit_path = $hdp::params::kinit_path_local
-  $smoke_user_keytab = $hdp::params::smokeuser_keytab
-
-  $templeton_host = $hdp::params::webhcat_server_host
-
-  file { '/tmp/templetonSmoke.sh':
-    ensure => present,
-    source => "puppet:///modules/hdp-templeton/templetonSmoke.sh",
-    mode => '0755'
-  }
-  
-  exec { '/tmp/templetonSmoke.sh':
-    command   => "sh /tmp/templetonSmoke.sh ${templeton_host} ${smoke_test_user} ${smoke_user_keytab} ${security} ${kinit_path}",
-    tries     => 3,
-    try_sleep => 5,
-    require   => File['/tmp/templetonSmoke.sh'],
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    logoutput => "true"
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-templeton/templates/webhcat-env.sh.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-templeton/templates/webhcat-env.sh.erb b/ambari-agent/src/main/puppet/modules/hdp-templeton/templates/webhcat-env.sh.erb
deleted file mode 100644
index e775a26..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-templeton/templates/webhcat-env.sh.erb
+++ /dev/null
@@ -1,44 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-# The file containing the running pid
-PID_FILE=<%=scope.function_hdp_template_var("::hcat_pid_dir")%>/webhcat.pid
-
-TEMPLETON_LOG_DIR=<%=scope.function_hdp_template_var("::hcat_log_dir")%>/
-
-
-WEBHCAT_LOG_DIR=<%=scope.function_hdp_template_var("::hcat_log_dir")%>/
-
-# The console error log
-ERROR_LOG=<%=scope.function_hdp_template_var("::hcat_log_dir")%>/webhcat-console-error.log
-
-# The console log
-CONSOLE_LOG=<%=scope.function_hdp_template_var("::hcat_log_dir")%>/webhcat-console.log
-
-#TEMPLETON_JAR=<%=scope.function_hdp_template_var("templeton_jar_name")%>
-
-#HADOOP_PREFIX=<%=scope.function_hdp_template_var("hadoop_prefix")%>/
-
-#HCAT_PREFIX=<%=scope.function_hdp_template_var("hive_prefix")%>/
-
-# Set HADOOP_HOME to point to a specific hadoop install directory
-export HADOOP_HOME=/usr/lib/hadoop

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-tez/manifests/init.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-tez/manifests/init.pp b/ambari-agent/src/main/puppet/modules/hdp-tez/manifests/init.pp
deleted file mode 100644
index 25bbc1a..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-tez/manifests/init.pp
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-class hdp-tez::initialize()
-{
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-tez/manifests/tez_client.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-tez/manifests/tez_client.pp b/ambari-agent/src/main/puppet/modules/hdp-tez/manifests/tez_client.pp
deleted file mode 100644
index bc4cfd7..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-tez/manifests/tez_client.pp
+++ /dev/null
@@ -1,40 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-tez::tez_client(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-)
-{
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in 'installed_and_configured') {
-
-    include hdp-tez::initialize
-
-    $package_name = 'tez_client'
-
-    hdp::package{ $package_name :
-      ensure       => present,
-      package_type => $package_name
-    }
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-yarn/files/validateYarnComponentStatus.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-yarn/files/validateYarnComponentStatus.py b/ambari-agent/src/main/puppet/modules/hdp-yarn/files/validateYarnComponentStatus.py
deleted file mode 100644
index 6bb8041..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-yarn/files/validateYarnComponentStatus.py
+++ /dev/null
@@ -1,165 +0,0 @@
-#!/usr/bin/env ambari-python-wrap
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import optparse
-import subprocess
-import json
-
-RESOURCEMANAGER = 'rm'
-NODEMANAGER = 'nm'
-HISTORYSERVER = 'hs'
-
-STARTED_STATE = 'STARTED'
-RUNNING_STATE = 'RUNNING'
-
-#Return reponse for given path and address
-def getResponse(path, address, ssl_enabled):
-
-  command = "curl"
-  httpGssnegotiate = "--negotiate"
-  userpswd = "-u:"
-  insecure = "-k"# This is smoke test, no need to check CA of server
-  if ssl_enabled:
-    url = 'https://' + address + path
-  else:
-    url = 'http://' + address + path
-
-  command_with_flags = [command,httpGssnegotiate,userpswd,insecure,url]
-  try:
-    proc = subprocess.Popen(command_with_flags, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-    (stdout, stderr) = proc.communicate()
-    response = json.loads(stdout)
-    if response == None:
-      print 'There is no response for url: ' + str(url)
-      exit(1)
-    return response
-  except Exception as e:
-    print 'Error getting response for url:' + str(url), e
-    exit(1)
-
-#Verify that REST api is available for given component
-def validateAvailability(component, path, address, ssl_enabled):
-
-  try:
-    response = getResponse(path, address, ssl_enabled)
-    is_valid = validateAvailabilityResponse(component, response)
-    if not is_valid:
-      exit(1)
-  except Exception as e:
-    print 'Error checking availability status of component', e
-    exit(1)
-
-#Validate component-specific response
-def validateAvailabilityResponse(component, response):
-  try:
-    if component == RESOURCEMANAGER:
-      rm_state = response['clusterInfo']['state']
-      if rm_state == STARTED_STATE:
-        return True
-      else:
-        print 'Resourcemanager is not started'
-        return False
-
-    elif component == NODEMANAGER:
-      node_healthy = bool(response['nodeInfo']['nodeHealthy'])
-      if node_healthy:
-        return True
-      else:
-        return False
-    elif component == HISTORYSERVER:
-      hs_start_time = response['historyInfo']['startedOn']
-      if hs_start_time > 0:
-        return True
-      else:
-        return False
-    else:
-      return False
-  except Exception as e:
-    print 'Error validation of availability response for ' + str(component), e
-    return False
-
-#Verify that component has required resources to work
-def validateAbility(component, path, address, ssl_enabled):
-
-  try:
-    response = getResponse(path, address, ssl_enabled)
-    is_valid = validateAbilityResponse(component, response)
-    if not is_valid:
-      exit(1)
-  except Exception as e:
-    print 'Error checking ability of component', e
-    exit(1)
-
-#Validate component-specific response that it has required resources to work
-def validateAbilityResponse(component, response):
-  try:
-    if component == RESOURCEMANAGER:
-      nodes = []
-      if response.has_key('nodes') and not response['nodes'] == None and response['nodes'].has_key('node'):
-        nodes = response['nodes']['node']
-      connected_nodes_count = len(nodes)
-      if connected_nodes_count == 0:
-        print 'There is no connected nodemanagers to resourcemanager'
-        return False
-      active_nodes = filter(lambda x: x['state'] == RUNNING_STATE, nodes)
-      active_nodes_count = len(active_nodes)
-
-      if connected_nodes_count == 0:
-        print 'There is no connected active nodemanagers to resourcemanager'
-        return False
-      else:
-        return True
-    else:
-      return False
-  except Exception as e:
-    print 'Error validation of ability response', e
-    return False
-
-#
-# Main.
-#
-def main():
-  parser = optparse.OptionParser(usage="usage: %prog [options] component ")
-  parser.add_option("-p", "--port", dest="address", help="Host:Port for REST API of a desired component")
-  parser.add_option("-s", "--ssl", dest="ssl_enabled", help="Is SSL enabled for UI of component")
-
-  (options, args) = parser.parse_args()
-
-  component = args[0]
-
-  address = options.address
-  ssl_enabled = (options.ssl_enabled) in 'true'
-  if component == RESOURCEMANAGER:
-    path = '/ws/v1/cluster/info'
-  elif component == NODEMANAGER:
-    path = '/ws/v1/node/info'
-  elif component == HISTORYSERVER:
-    path = '/ws/v1/history/info'
-  else:
-    parser.error("Invalid component")
-
-  validateAvailability(component, path, address, ssl_enabled)
-
-  if component == RESOURCEMANAGER:
-    path = '/ws/v1/cluster/nodes'
-    validateAbility(component, path, address, ssl_enabled)
-
-if __name__ == "__main__":
-  main()


[14/15] AMBARI-5747 Remove facter-1.6.10, Ruby, Puppet dependencies from pom.xml and src (dsen)

Posted by ds...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmondLib.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmondLib.sh b/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmondLib.sh
deleted file mode 100644
index 6190a78..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmondLib.sh
+++ /dev/null
@@ -1,556 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Slurp in all our user-customizable settings.
-source ./gangliaEnv.sh;
-
-# Get access to Ganglia-wide constants etc.
-source ./gangliaLib.sh;
-
-GMOND_BIN=/usr/sbin/gmond;
-GMOND_CORE_CONF_FILE=gmond.core.conf;
-GMOND_MASTER_CONF_FILE=gmond.master.conf;
-GMOND_SLAVE_CONF_FILE=gmond.slave.conf;
-GMOND_PID_FILE=gmond.pid;
-
-# Functions.
-function getGmondCoreConfFileName()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # ${clusterName} is not empty. 
-        echo "${GANGLIA_CONF_DIR}/${clusterName}/${GMOND_CORE_CONF_FILE}";
-    else
-        echo "${GANGLIA_CONF_DIR}/${GMOND_CORE_CONF_FILE}";
-    fi
-}
-
-function getGmondMasterConfFileName()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # ${clusterName} is not empty. 
-        echo "${GANGLIA_CONF_DIR}/${clusterName}/conf.d/${GMOND_MASTER_CONF_FILE}";
-    else
-        echo "${GANGLIA_CONF_DIR}/conf.d/${GMOND_MASTER_CONF_FILE}";
-    fi
-}
-
-function getGmondSlaveConfFileName()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # ${clusterName} is not empty. 
-        echo "${GANGLIA_CONF_DIR}/${clusterName}/conf.d/${GMOND_SLAVE_CONF_FILE}";
-    else
-        echo "${GANGLIA_CONF_DIR}/conf.d/${GMOND_SLAVE_CONF_FILE}";
-    fi
-}
-
-function getGmondPidFileName()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # ${clusterName} is not empty. 
-        echo "${GANGLIA_RUNTIME_DIR}/${clusterName}/${GMOND_PID_FILE}";
-    else
-        echo "${GANGLIA_RUNTIME_DIR}/${GMOND_PID_FILE}";
-    fi
-}
-
-function removeGmondPidFileName()
-{
-    clusterName=${1};
-    gmondPidFileName=`getGmondPidFileName ${clusterName}`;
-    if [ -e "${gmondPidFileName}" ]; 
-     then
-      rm -rf ${gmondPidFileName};          
-    fi 
-}
-
-
-function getGmondLoggedPid()
-{
-    gmondPidFile=`getGmondPidFileName ${1}`;
-
-    if [ -e "${gmondPidFile}" ]
-    then
-        echo `cat ${gmondPidFile}`;
-    fi
-}
-
-function getGmondRunningPid()
-{
-    gmondLoggedPid=`getGmondLoggedPid ${1}`;
-
-    if [ -n "${gmondLoggedPid}" ]
-    then
-        echo `ps -o pid=MYPID -p ${gmondLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
-    fi
-}
-
-function generateGmondCoreConf()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
-
-        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
-        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
-        then
-            now=`date`;
-
-            cat << END_OF_GMOND_CORE_CONF
-#################### Generated by ${0} on ${now} ####################
-#
-/* This configuration is as close to 2.5.x default behavior as possible
-   The values closely match ./gmond/metric.h definitions in 2.5.x */
-globals {
-  daemonize = yes
-  setuid = yes
-  user = ${GMOND_USER}
-  debug_level = 0
-  max_udp_msg_len = 1472
-  mute = no
-  deaf = no 
-  allow_extra_data = yes
-  host_dmax = 0 /*secs */
-  host_tmax = 20 /*secs */
-  cleanup_threshold = 300 /*secs */
-  gexec = no
-  send_metadata_interval = 30 /*secs */
-}
-
-/*
- * The cluster attributes specified will be used as part of the <CLUSTER>
- * tag that will wrap all hosts collected by this instance.
- */
-cluster {
-  name = "${gmondClusterName}"
-  owner = "unspecified"
-  latlong = "unspecified"
-  url = "unspecified"
-}
-
-/* The host section describes attributes of the host, like the location */
-host {
-  location = "unspecified"
-}
-
-/* You can specify as many tcp_accept_channels as you like to share
- * an XML description of the state of the cluster.
- *
- * At the very least, every gmond must expose its XML state to 
- * queriers from localhost.
- */
-tcp_accept_channel {
-  bind = localhost
-  port = ${gmondPort}
-}
-
-/* Each metrics module that is referenced by gmond must be specified and
-   loaded. If the module has been statically linked with gmond, it does
-   not require a load path. However all dynamically loadable modules must
-   include a load path. */
-modules {
-  module {
-    name = "core_metrics"
-  }
-  module {
-    name = "cpu_module"
-    path = "modcpu.so"
-  }
-  module {
-    name = "disk_module"
-    path = "moddisk.so"
-  }
-  module {
-    name = "load_module"
-    path = "modload.so"
-  }
-  module {
-    name = "mem_module"
-    path = "modmem.so"
-  }
-  module {
-    name = "net_module"
-    path = "modnet.so"
-  }
-  module {
-    name = "proc_module"
-    path = "modproc.so"
-  }
-  module {
-    name = "sys_module"
-    path = "modsys.so"
-  }
-}
-
-/* The old internal 2.5.x metric array has been replaced by the following
-   collection_group directives.  What follows is the default behavior for
-   collecting and sending metrics that is as close to 2.5.x behavior as
-   possible. */
-
-/* This collection group will cause a heartbeat (or beacon) to be sent every
-   20 seconds.  In the heartbeat is the GMOND_STARTED data which expresses
-   the age of the running gmond. */
-collection_group {
-  collect_once = yes
-  time_threshold = 20
-  metric {
-    name = "heartbeat"
-  }
-}
-
-/* This collection group will send general info about this host total memory every
-   180 secs.
-   This information doesn't change between reboots and is only collected
-   once. This information needed for heatmap showing */
- collection_group {
-   collect_once = yes
-   time_threshold = 180
-   metric {
-    name = "mem_total"
-    title = "Memory Total"
-   }
- }
-
-/* This collection group will send general info about this host every
-   1200 secs.
-   This information doesn't change between reboots and is only collected
-   once. */
-collection_group {
-  collect_once = yes
-  time_threshold = 1200
-  metric {
-    name = "cpu_num"
-    title = "CPU Count"
-  }
-  metric {
-    name = "cpu_speed"
-    title = "CPU Speed"
-  }
-  /* Should this be here? Swap can be added/removed between reboots. */
-  metric {
-    name = "swap_total"
-    title = "Swap Space Total"
-  }
-  metric {
-    name = "boottime"
-    title = "Last Boot Time"
-  }
-  metric {
-    name = "machine_type"
-    title = "Machine Type"
-  }
-  metric {
-    name = "os_name"
-    title = "Operating System"
-  }
-  metric {
-    name = "os_release"
-    title = "Operating System Release"
-  }
-  metric {
-    name = "location"
-    title = "Location"
-  }
-}
-
-/* This collection group will send the status of gexecd for this host
-   every 300 secs.*/
-/* Unlike 2.5.x the default behavior is to report gexecd OFF. */
-collection_group {
-  collect_once = yes
-  time_threshold = 300
-  metric {
-    name = "gexec"
-    title = "Gexec Status"
-  }
-}
-
-/* This collection group will collect the CPU status info every 20 secs.
-   The time threshold is set to 90 seconds.  In honesty, this
-   time_threshold could be set significantly higher to reduce
-   unneccessary  network chatter. */
-collection_group {
-  collect_every = 20
-  time_threshold = 90
-  /* CPU status */
-  metric {
-    name = "cpu_user"
-    value_threshold = "1.0"
-    title = "CPU User"
-  }
-  metric {
-    name = "cpu_system"
-    value_threshold = "1.0"
-    title = "CPU System"
-  }
-  metric {
-    name = "cpu_idle"
-    value_threshold = "5.0"
-    title = "CPU Idle"
-  }
-  metric {
-    name = "cpu_nice"
-    value_threshold = "1.0"
-    title = "CPU Nice"
-  }
-  metric {
-    name = "cpu_aidle"
-    value_threshold = "5.0"
-    title = "CPU aidle"
-  }
-  metric {
-    name = "cpu_wio"
-    value_threshold = "1.0"
-    title = "CPU wio"
-  }
-  /* The next two metrics are optional if you want more detail...
-     ... since they are accounted for in cpu_system.
-  metric {
-    name = "cpu_intr"
-    value_threshold = "1.0"
-    title = "CPU intr"
-  }
-  metric {
-    name = "cpu_sintr"
-    value_threshold = "1.0"
-    title = "CPU sintr"
-  }
-  */
-}
-
-collection_group {
-  collect_every = 20
-  time_threshold = 90
-  /* Load Averages */
-  metric {
-    name = "load_one"
-    value_threshold = "1.0"
-    title = "One Minute Load Average"
-  }
-  metric {
-    name = "load_five"
-    value_threshold = "1.0"
-    title = "Five Minute Load Average"
-  }
-  metric {
-    name = "load_fifteen"
-    value_threshold = "1.0"
-    title = "Fifteen Minute Load Average"
-  }
-}
-
-/* This group collects the number of running and total processes */
-collection_group {
-  collect_every = 80
-  time_threshold = 950
-  metric {
-    name = "proc_run"
-    value_threshold = "1.0"
-    title = "Total Running Processes"
-  }
-  metric {
-    name = "proc_total"
-    value_threshold = "1.0"
-    title = "Total Processes"
-  }
-}
-
-/* This collection group grabs the volatile memory metrics every 40 secs and
-   sends them at least every 180 secs.  This time_threshold can be increased
-   significantly to reduce unneeded network traffic. */
-collection_group {
-  collect_every = 40
-  time_threshold = 180
-  metric {
-    name = "mem_free"
-    value_threshold = "1024.0"
-    title = "Free Memory"
-  }
-  metric {
-    name = "mem_shared"
-    value_threshold = "1024.0"
-    title = "Shared Memory"
-  }
-  metric {
-    name = "mem_buffers"
-    value_threshold = "1024.0"
-    title = "Memory Buffers"
-  }
-  metric {
-    name = "mem_cached"
-    value_threshold = "1024.0"
-    title = "Cached Memory"
-  }
-  metric {
-    name = "swap_free"
-    value_threshold = "1024.0"
-    title = "Free Swap Space"
-  }
-}
-
-collection_group {
-  collect_every = 40
-  time_threshold = 300
-  metric {
-    name = "bytes_out"
-    value_threshold = 4096
-    title = "Bytes Sent"
-  }
-  metric {
-    name = "bytes_in"
-    value_threshold = 4096
-    title = "Bytes Received"
-  }
-  metric {
-    name = "pkts_in"
-    value_threshold = 256
-    title = "Packets Received"
-  }
-  metric {
-    name = "pkts_out"
-    value_threshold = 256
-    title = "Packets Sent"
-  }
-}
-
-
-collection_group {
-  collect_every = 40
-  time_threshold = 180
-  metric {
-    name = "disk_free"
-    value_threshold = 1.0
-    title = "Disk Space Available"
-  }
-  metric {
-    name = "part_max_used"
-    value_threshold = 1.0
-    title = "Maximum Disk Space Used"
-  }
-  metric {
-    name = "disk_total"
-    value_threshold = 1.0
-    title = "Total Disk Space"
-  }
-}
-
-udp_recv_channel {
-    port = 0
-}
-
-
-include ("${GANGLIA_CONF_DIR}/${gmondClusterName}/conf.d/*.conf")
-END_OF_GMOND_CORE_CONF
-        else
-            return 2;
-        fi
-    else
-        return 1;
-    fi
-}
-
-function generateGmondMasterConf
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
-
-        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
-        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
-        then
-            now=`date`;
-
-            cat << END_OF_GMOND_MASTER_CONF
-#################### Generated by ${0} on ${now} ####################
-/* Masters only receive; they never send. */
-udp_recv_channel {
-  bind = ${gmondMasterIP}
-  port = ${gmondPort}
-}
-
-/* The gmond cluster master must additionally provide an XML 
- * description of the cluster to the gmetad that will query it.
- */
-tcp_accept_channel {
-  bind = ${gmondMasterIP}
-  port = ${gmondPort}
-}
-END_OF_GMOND_MASTER_CONF
-        else
-            return 2;
-        fi
-    else
-        return 1;
-    fi
-}
-
-function generateGmondSlaveConf
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
-
-        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
-        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
-        then
-            now=`date`;
-
-            cat << END_OF_GMOND_SLAVE_CONF
-#################### Generated by ${0} on ${now} ####################
-/* Slaves only send; they never receive. */
-udp_send_channel {
-  #bind_hostname = yes # Highly recommended, soon to be default.
-                       # This option tells gmond to use a source address
-                       # that resolves to the machine's hostname.  Without
-                       # this, the metrics may appear to come from any
-                       # interface and the DNS names associated with
-                       # those IPs will be used to create the RRDs.
-  host = ${gmondMasterIP}
-  port = ${gmondPort}
-  ttl = 1
-}
-END_OF_GMOND_SLAVE_CONF
-        else
-            return 2;
-        fi
-    else
-        return 1;
-    fi
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/rrd.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/rrd.py b/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/rrd.py
deleted file mode 100755
index d29a944..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/rrd.py
+++ /dev/null
@@ -1,207 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import cgi
-import os
-import rrdtool
-import sys
-import time
-import re
-import urlparse
-
-# place this script in /var/www/cgi-bin of the Ganglia collector
-# requires 'yum install rrdtool-python' on the Ganglia collector
-
-
-def printMetric(clusterName, hostName, metricName, file, cf, start, end, resolution, pointInTime):
-  if clusterName.endswith("rrds"):
-    clusterName = ""
-
-  args = [file, cf]
-
-  if start is not None:
-    args.extend(["-s", start])
-
-  if end is not None:
-    args.extend(["-e", end])
-
-  if resolution is not None:
-    args.extend(["-r", resolution])
-
-  rrdMetric = rrdtool.fetch(args)
-  # ds_name
-  sys.stdout.write(rrdMetric[1][0])
-  sys.stdout.write("\n")
-
-  sys.stdout.write(clusterName)
-  sys.stdout.write("\n")
-  sys.stdout.write(hostName)
-  sys.stdout.write("\n")
-  sys.stdout.write(metricName)
-  sys.stdout.write("\n")
-
-  # write time
-  sys.stdout.write(str(rrdMetric[0][0]))
-  sys.stdout.write("\n")
-  # write step
-  sys.stdout.write(str(rrdMetric[0][2]))
-  sys.stdout.write("\n")
-
-  if not pointInTime:
-    valueCount = 0
-    lastValue = None
-
-    for tuple in rrdMetric[2]:
-
-      thisValue = tuple[0]
-
-      if valueCount > 0 and thisValue == lastValue:
-        valueCount += 1
-      else:
-        if valueCount > 1:
-          sys.stdout.write("[~r]")
-          sys.stdout.write(str(valueCount))
-          sys.stdout.write("\n")
-
-        if thisValue is None:
-          sys.stdout.write("[~n]\n")
-        else:
-          sys.stdout.write(str(thisValue))
-          sys.stdout.write("\n")
-
-        valueCount = 1
-        lastValue = thisValue
-  else:
-    value = None
-    idx   = -1
-    tuple = rrdMetric[2]
-    tupleLastIdx = len(tuple) * -1
-
-    while value is None and idx >= tupleLastIdx:
-      value = tuple[idx][0]
-      idx-=1
-
-    if value is not None:
-      sys.stdout.write(str(value))
-      sys.stdout.write("\n")
-
-  sys.stdout.write("[~EOM]\n")
-  return
-
-def stripList(l):
-  return([x.strip() for x in l])
-
-sys.stdout.write("Content-type: text/plain\n\n")
-
-# write start time
-sys.stdout.write(str(time.mktime(time.gmtime())))
-sys.stdout.write("\n")
-
-requestMethod = os.environ['REQUEST_METHOD']
-
-if requestMethod == 'POST':
-  postData = sys.stdin.readline()
-  queryString = cgi.parse_qs(postData)
-  queryString = dict((k, v[0]) for k, v in queryString.items())
-elif requestMethod == 'GET':
-  queryString = dict(cgi.parse_qsl(os.environ['QUERY_STRING']));
-
-if "m" in queryString:
-  metricParts = queryString["m"].split(",")
-else:
-  metricParts = [""]
-metricParts = stripList(metricParts)
-
-hostParts = []
-if "h" in queryString:
-  hostParts = queryString["h"].split(",")
-hostParts = stripList(hostParts)
-
-if "c" in queryString:
-  clusterParts = queryString["c"].split(",")
-else:
-  clusterParts = [""]
-clusterParts = stripList(clusterParts)
-
-if "p" in queryString:
-  rrdPath = queryString["p"]
-else:
-  rrdPath = "/var/lib/ganglia/rrds/"
-
-start = None
-if "s" in queryString:
-  start = queryString["s"]
-
-end = None
-if "e" in queryString:
-  end = queryString["e"]
-
-resolution = None
-if "r" in queryString:
-  resolution = queryString["r"]
-
-if "cf" in queryString:
-  cf = queryString["cf"]
-else:
-  cf = "AVERAGE"
-
-if "pt" in queryString:
-  pointInTime = True
-else:
-  pointInTime = False
-
-def _walk(*args, **kwargs):
-
-  for root,dirs,files in os.walk(*args, **kwargs):
-    for dir in dirs:
-      qualified_dir = os.path.join(root,dir)
-      if os.path.islink(qualified_dir):
-        for x in os.walk(qualified_dir, **kwargs):
-          yield x
-    yield (root, dirs, files)
-
-
-for cluster in clusterParts:
-  for path, dirs, files in _walk(rrdPath + cluster):
-    pathParts = path.split("/")
-    #Process only path which contains files. If no host parameter passed - process all hosts folders and summary info
-    #If host parameter passed - process only this host folder
-    if len(files) > 0 and (len(hostParts) == 0 or pathParts[-1] in hostParts):
-      for metric in metricParts:
-        file = metric + ".rrd"
-        fileFullPath = os.path.join(path, file)
-        if os.path.exists(fileFullPath):
-          #Exact name of metric
-          printMetric(pathParts[-2], pathParts[-1], file[:-4], os.path.join(path, file), cf, start, end, resolution, pointInTime)
-        else:
-          #Regex as metric name
-          metricRegex = metric + '\.rrd$'
-          p = re.compile(metricRegex)
-          matchedFiles = filter(p.match, files)
-          for matchedFile in matchedFiles:
-            printMetric(pathParts[-2], pathParts[-1], matchedFile[:-4], os.path.join(path, matchedFile), cf, start, end, resolution, pointInTime)
-
-
-sys.stdout.write("[~EOF]\n")
-# write end time
-sys.stdout.write(str(time.mktime(time.gmtime())))
-sys.stdout.write("\n")
-
-sys.stdout.flush
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/rrdcachedLib.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/rrdcachedLib.sh b/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/rrdcachedLib.sh
deleted file mode 100644
index 8b7c257..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/rrdcachedLib.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get access to Ganglia-wide constants etc.
-source ./gangliaLib.sh;
-
-RRDCACHED_BIN=/usr/bin/rrdcached;
-RRDCACHED_PID_FILE=${GANGLIA_RUNTIME_DIR}/rrdcached.pid;
-RRDCACHED_ALL_ACCESS_UNIX_SOCKET=${GANGLIA_RUNTIME_DIR}/rrdcached.sock;
-RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET=${GANGLIA_RUNTIME_DIR}/rrdcached.limited.sock;
-
-function getRrdcachedLoggedPid()
-{
-    if [ -e "${RRDCACHED_PID_FILE}" ]
-    then
-        echo `cat ${RRDCACHED_PID_FILE}`;
-    fi
-}
-
-function getRrdcachedRunningPid()
-{
-    rrdcachedLoggedPid=`getRrdcachedLoggedPid`;
-
-    if [ -n "${rrdcachedLoggedPid}" ]
-    then
-        echo `ps -o pid=MYPID -p ${rrdcachedLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
-    fi
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/setupGanglia.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/setupGanglia.sh b/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/setupGanglia.sh
deleted file mode 100644
index 5145b9c..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/setupGanglia.sh
+++ /dev/null
@@ -1,141 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get access to Ganglia-wide constants, utilities etc.
-source ./gangliaLib.sh
-
-function usage()
-{
-  cat << END_USAGE
-Usage: ${0} [-c <gmondClusterName> [-m]] [-t] [-o <owner>] [-g <group>]
-
-Options:
-  -c <gmondClusterName>   The name of the Ganglia Cluster whose gmond configuration we're here to generate.
-
-  -m                      Whether this gmond (if -t is not specified) is the master for its Ganglia 
-                          Cluster. Without this, we generate slave gmond configuration.
-
-  -t                      Whether this is a call to generate gmetad configuration (as opposed to the
-                          gmond configuration that is generated without this).
-  -o <owner>              Owner
-  -g <group>              Group
-END_USAGE
-}
-
-function instantiateGmetadConf()
-{
-  # gmetad utility library.
-  source ./gmetadLib.sh;
-
-  generateGmetadConf > ${GMETAD_CONF_FILE};
-}
-
-function instantiateGmondConf()
-{
-  # gmond utility library.
-  source ./gmondLib.sh;
- 
-  gmondClusterName=${1};
-
-  if [ "x" != "x${gmondClusterName}" ]
-  then
-
-    createDirectory "${GANGLIA_RUNTIME_DIR}/${gmondClusterName}";
-    createDirectory "${GANGLIA_CONF_DIR}/${gmondClusterName}/conf.d";
-    
-    # Always blindly generate the core gmond config - that goes on every box running gmond. 
-    generateGmondCoreConf ${gmondClusterName} > `getGmondCoreConfFileName ${gmondClusterName}`;
-
-    isMasterGmond=${2};
-
-    # Decide whether we want to add on the master or slave gmond config.
-    if [ "0" -eq "${isMasterGmond}" ]
-    then
-      generateGmondSlaveConf ${gmondClusterName} > `getGmondSlaveConfFileName ${gmondClusterName}`;
-    else
-      generateGmondMasterConf ${gmondClusterName} > `getGmondMasterConfFileName ${gmondClusterName}`;
-    fi
-
-    chown -R ${3}:${4} ${GANGLIA_CONF_DIR}/${gmondClusterName}
-
-  else
-    echo "No gmondClusterName passed in, nothing to instantiate";
-  fi
-}
-
-# main()
-
-gmondClusterName=;
-isMasterGmond=0;
-configureGmetad=0;
-owner='root';
-group='root';
-
-while getopts ":c:mto:g:" OPTION
-do
-  case ${OPTION} in
-    c) 
-      gmondClusterName=${OPTARG};
-      ;;
-    m)
-      isMasterGmond=1;
-      ;;
-    t)
-      configureGmetad=1;
-      ;;
-    o)
-      owner=${OPTARG};
-      ;;
-    g)
-      group=${OPTARG};
-      ;;
-    ?)
-      usage;
-      exit 1;
-  esac
-done
-
-# Initialization.
-createDirectory ${GANGLIA_CONF_DIR};
-createDirectory ${GANGLIA_RUNTIME_DIR};
-# So rrdcached can drop its PID files in here.
-chmod a+w ${GANGLIA_RUNTIME_DIR};
-chown ${owner}:${group} ${GANGLIA_CONF_DIR};
-
-if [ -n "${gmondClusterName}" ]
-then
-
-  # Be forgiving of users who pass in -c along with -t (which always takes precedence).
-  if [ "1" -eq "${configureGmetad}" ]
-  then
-    instantiateGmetadConf;
-  else
-    instantiateGmondConf ${gmondClusterName} ${isMasterGmond} ${owner} ${group};
-  fi
-
-elif [ "1" -eq "${configureGmetad}" ]
-then
-  instantiateGmetadConf;
-else
-  usage;
-  exit 2;
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startGmetad.sh b/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startGmetad.sh
deleted file mode 100644
index ab5102d..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startGmetad.sh
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./gmetadLib.sh;
-
-# To get access to ${RRDCACHED_ALL_ACCESS_UNIX_SOCKET}.
-source ./rrdcachedLib.sh;
-
-# Before starting gmetad, start rrdcached.
-./startRrdcached.sh;
-
-if [ $? -eq 0 ] 
-then
-    gmetadRunningPid=`getGmetadRunningPid`;
-
-    # Only attempt to start gmetad if there's not already one running.
-    if [ -z "${gmetadRunningPid}" ]
-    then
-        env RRDCACHED_ADDRESS=${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
-                    ${GMETAD_BIN} --conf=${GMETAD_CONF_FILE} --pid-file=${GMETAD_PID_FILE};
-
-        for i in `seq 0 5`; do
-          gmetadRunningPid=`getGmetadRunningPid`;
-          if [ -n "${gmetadRunningPid}" ]
-          then
-            break;
-          fi
-          sleep 1;
-        done
-
-        if [ -n "${gmetadRunningPid}" ]
-        then
-            echo "Started ${GMETAD_BIN} with PID ${gmetadRunningPid}";
-        else
-            echo "Failed to start ${GMETAD_BIN}";
-            exit 1;
-        fi
-    else
-        echo "${GMETAD_BIN} already running with PID ${gmetadRunningPid}";
-    fi
-else
-    echo "Not starting ${GMETAD_BIN} because starting ${RRDCACHED_BIN} failed.";
-    exit 2;
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startGmond.sh b/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startGmond.sh
deleted file mode 100644
index 55420b7..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startGmond.sh
+++ /dev/null
@@ -1,80 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
-source ./gmondLib.sh;
-
-function startGmondForCluster()
-{
-    gmondClusterName=${1};
-
-    gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
-
-    # Only attempt to start gmond if there's not already one running.
-    if [ -z "${gmondRunningPid}" ]
-    then
-      gmondCoreConfFileName=`getGmondCoreConfFileName ${gmondClusterName}`;
-      removeGmondPidFileName ${gmondClusterName};
-      if [ -e "${gmondCoreConfFileName}" ]
-      then 
-        gmondPidFileName=`getGmondPidFileName ${gmondClusterName}`;
-
-        ${GMOND_BIN} --conf=${gmondCoreConfFileName} --pid-file=${gmondPidFileName};
-
-        for i in `seq 0 5`; do
-          gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
-          if [ -n "${gmondRunningPid}" ]
-          then
-            break;
-          fi
-          sleep 1;
-        done
-  
-        if [ -n "${gmondRunningPid}" ]
-        then
-            echo "Started ${GMOND_BIN} for cluster ${gmondClusterName} with PID ${gmondRunningPid}";
-        else
-            echo "Failed to start ${GMOND_BIN} for cluster ${gmondClusterName}";
-            exit 1;
-        fi
-      fi 
-    else
-      echo "${GMOND_BIN} for cluster ${gmondClusterName} already running with PID ${gmondRunningPid}";
-    fi
-}
-
-# main()
-gmondClusterName=${1};
-
-if [ "x" == "x${gmondClusterName}" ]
-then
-    # No ${gmondClusterName} passed in as command-line arg, so start 
-    # all the gmonds we know about.
-    for gmondClusterName in `getConfiguredGangliaClusterNames`
-    do
-        startGmondForCluster ${gmondClusterName};
-    done
-else
-    # Just start the one ${gmondClusterName} that was asked for.
-    startGmondForCluster ${gmondClusterName};
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startRrdcached.sh b/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startRrdcached.sh
deleted file mode 100644
index e79472b..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startRrdcached.sh
+++ /dev/null
@@ -1,69 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Slurp in all our user-customizable settings.
-source ./gangliaEnv.sh;
-
-# Get all our common constants etc. set up.
-source ./rrdcachedLib.sh;
-
-rrdcachedRunningPid=`getRrdcachedRunningPid`;
-
-# Only attempt to start rrdcached if there's not already one running.
-if [ -z "${rrdcachedRunningPid}" ]
-then
-    #changed because problem puppet had with nobody user
-    #sudo -u ${GMETAD_USER} ${RRDCACHED_BIN} -p ${RRDCACHED_PID_FILE} \
-    #         -m 664 -l unix:${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
-    #         -m 777 -P FLUSH,STATS,HELP -l unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET} \
-    #         -b /var/lib/ganglia/rrds -B
-    su - ${GMETAD_USER} -c "${RRDCACHED_BIN} -p ${RRDCACHED_PID_FILE} \
-             -m 664 -l unix:${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
-             -m 777 -P FLUSH,STATS,HELP -l unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET} \
-             -b ${RRDCACHED_BASE_DIR} -B"
-
-    # Ideally, we'd use ${RRDCACHED_BIN}'s -s ${WEBSERVER_GROUP} option for 
-    # this, but it doesn't take sometimes due to a lack of permissions,
-    # so perform the operation explicitly to be super-sure.
-    chgrp ${WEBSERVER_GROUP} ${RRDCACHED_ALL_ACCESS_UNIX_SOCKET};
-    chgrp ${WEBSERVER_GROUP} ${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET};
-
-    # Check to make sure rrdcached actually started up.
-    for i in `seq 0 5`; do
-      rrdcachedRunningPid=`getRrdcachedRunningPid`;
-      if [ -n "${rrdcachedRunningPid}" ]
-        then
-          break;
-      fi
-      sleep 1;
-    done
-
-    if [ -n "${rrdcachedRunningPid}" ]
-    then
-        echo "Started ${RRDCACHED_BIN} with PID ${rrdcachedRunningPid}";
-    else
-        echo "Failed to start ${RRDCACHED_BIN}";
-        exit 1;
-    fi
-else
-    echo "${RRDCACHED_BIN} already running with PID ${rrdcachedRunningPid}";
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopGmetad.sh b/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopGmetad.sh
deleted file mode 100644
index 2764e0e..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopGmetad.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./gmetadLib.sh;
-
-gmetadRunningPid=`getGmetadRunningPid`;
-
-# Only go ahead with the termination if we could find a running PID.
-if [ -n "${gmetadRunningPid}" ]
-then
-    kill -KILL ${gmetadRunningPid};
-    echo "Stopped ${GMETAD_BIN} (with PID ${gmetadRunningPid})";
-fi
-
-# Poll again.
-gmetadRunningPid=`getGmetadRunningPid`;
-
-# Once we've killed gmetad, there should no longer be a running PID.
-if [ -z "${gmetadRunningPid}" ]
-then
-    # It's safe to stop rrdcached now.
-    ./stopRrdcached.sh;
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopGmond.sh b/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopGmond.sh
deleted file mode 100644
index 94cfc28..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopGmond.sh
+++ /dev/null
@@ -1,55 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
-source ./gmondLib.sh;
-
-function stopGmondForCluster()
-{
-    gmondClusterName=${1};
-
-    gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
-
-    # Only go ahead with the termination if we could find a running PID.
-    if [ -n "${gmondRunningPid}" ]
-    then
-      kill -KILL ${gmondRunningPid};
-      removeGmondPidFileName ${gmondClusterName};
-      echo "Stopped ${GMOND_BIN} for cluster ${gmondClusterName} (with PID ${gmondRunningPid})";
-    fi
-}
-
-# main()
-gmondClusterName=${1};
-
-if [ "x" == "x${gmondClusterName}" ]
-then
-    # No ${gmondClusterName} passed in as command-line arg, so stop
-    # all the gmonds we know about.
-    for gmondClusterName in `getConfiguredGangliaClusterNames`
-    do
-        stopGmondForCluster ${gmondClusterName};
-    done
-else
-    stopGmondForCluster ${gmondClusterName};
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopRrdcached.sh b/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopRrdcached.sh
deleted file mode 100644
index 0a0d8d8..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopRrdcached.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./rrdcachedLib.sh;
-
-rrdcachedRunningPid=`getRrdcachedRunningPid`;
-
-# Only go ahead with the termination if we could find a running PID.
-if [ -n "${rrdcachedRunningPid}" ]
-then
-    kill -TERM ${rrdcachedRunningPid};
-    # ${RRDCACHED_BIN} takes a few seconds to drain its buffers, so wait 
-    # until we're sure it's well and truly dead. 
-    #
-    # Without this, an immediately following startRrdcached.sh won't do
-    # anything, because it still sees this soon-to-die instance alive,
-    # and the net result is that after a few seconds, there's no
-    # ${RRDCACHED_BIN} running on the box anymore.
-    sleep 5;
-    echo "Stopped ${RRDCACHED_BIN} (with PID ${rrdcachedRunningPid})";
-fi 

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/teardownGanglia.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/teardownGanglia.sh b/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/teardownGanglia.sh
deleted file mode 100644
index b27f7a2..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/teardownGanglia.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get access to Ganglia-wide constants, utilities etc.
-source ./gangliaLib.sh;
-
-# Undo what we did while setting up Ganglia on this box.
-rm -rf ${GANGLIA_CONF_DIR};
-rm -rf ${GANGLIA_RUNTIME_DIR};

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/config.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/config.pp b/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/config.pp
deleted file mode 100644
index bf69f51..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/config.pp
+++ /dev/null
@@ -1,79 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-ganglia::config(
-  $ganglia_server_host = undef,
-  $service_state = $hdp::params::cluster_service_state
-)
-{
- if ($service_state in ['running','installed_and_configured','stopped']) {
-    #TODO: divide into what is needed on server vs what is needed on monitored nodes
-    $shell_cmds_dir = $hdp-ganglia::params::ganglia_shell_cmds_dir
-    $shell_files = ['checkGmond.sh','checkRrdcached.sh','gmetadLib.sh','gmondLib.sh','rrdcachedLib.sh' ,'setupGanglia.sh','startGmetad.sh','startGmond.sh','startRrdcached.sh','stopGmetad.sh','stopGmond.sh','stopRrdcached.sh','teardownGanglia.sh']
-
-    hdp::directory_recursive_create { $shell_cmds_dir :
-      owner => root,
-      group => root
-    } 
-
-     hdp-ganglia::config::init_file { ['gmetad','gmond']: }
-
-     hdp-ganglia::config::shell_file { $shell_files: }                       
-
-     hdp-ganglia::config::file { ['gangliaClusters.conf','gangliaEnv.sh','gangliaLib.sh']: 
-       ganglia_server_host => $ganglia_server_host
-     }
- 
-     anchor{'hdp-ganglia::config::begin':} -> Hdp::Directory_recursive_create[$shell_cmds_dir] -> Hdp-ganglia::Config::Shell_file<||> -> anchor{'hdp-ganglia::config::end':}
-     Anchor['hdp-ganglia::config::begin'] -> Hdp-ganglia::Config::Init_file<||> -> Anchor['hdp-ganglia::config::end']
-     Anchor['hdp-ganglia::config::begin'] -> Hdp-ganglia::Config::File<||> -> Anchor['hdp-ganglia::config::end']
-  }
-}
-
-define hdp-ganglia::config::shell_file()
-{
-  file { "${hdp-ganglia::params::ganglia_shell_cmds_dir}/${name}":
-    source => "puppet:///modules/hdp-ganglia/${name}", 
-    mode => '0755'
-  }
-}
-
-define hdp-ganglia::config::init_file()
-{
-  file { "/etc/init.d/hdp-${name}":
-    source => "puppet:///modules/hdp-ganglia/${name}.init", 
-    mode => '0755'
-  }
-}
-
-### config files
-define hdp-ganglia::config::file(
-  $ganglia_server_host = undef
-)
-{
-  hdp::configfile { "${hdp-ganglia::params::ganglia_shell_cmds_dir}/${name}":
-    component           => 'ganglia',
-    owner               => root,
-    group               => root
-  }
-  if ($ganglia_server_host != undef) {
-    Hdp::Configfile<||>{ganglia_server_host => $ganglia_server_host}
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/config/generate_daemon.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/config/generate_daemon.pp b/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/config/generate_daemon.pp
deleted file mode 100755
index 1718cb8..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/config/generate_daemon.pp
+++ /dev/null
@@ -1,43 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#TODO: these scripts called should be converted to native puppet
-define hdp-ganglia::config::generate_daemon(
-  $ganglia_service,
-  $role,
-  $owner = 'root',
-  $group = $hdp::params::user_group
-)
-{
-  $shell_cmds_dir = $hdp-ganglia::params::ganglia_shell_cmds_dir
-  $cmd = $ganglia_service ? {
-    'gmond'  => $role ? {
-      'server' => "${shell_cmds_dir}/setupGanglia.sh -c ${name} -m -o ${owner} -g ${group}",
-       default =>  "${shell_cmds_dir}/setupGanglia.sh -c ${name} -o ${owner} -g ${group}"
-    },
-    'gmetad' => "${shell_cmds_dir}/setupGanglia.sh -t -o ${owner} -g ${group}",
-     default => hdp_fail("Unexpected ganglia service: ${$ganglia_service}")	
-  }
-
-  #TODO: put in test condition
-  hdp::exec { $cmd:
-    command => $cmd
- }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/hdp-gmetad/service_check.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/hdp-gmetad/service_check.pp b/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/hdp-gmetad/service_check.pp
deleted file mode 100644
index 15cbe36..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/hdp-gmetad/service_check.pp
+++ /dev/null
@@ -1,36 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-ganglia::hdp-gmetad::service_check() 
-{
-  
-  anchor { 'hdp-ganglia::hdp-gmetad::service_check::begin':}
-
-  exec { 'hdp-gmetad':
-    command   => "/etc/init.d/hdp-gmetad status | grep -v failed",
-    tries     => 3,
-    try_sleep => 5,
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    before      => Anchor['hdp-ganglia::hdp-gmetad::service_check::end'],
-    logoutput => "true"
-  }
-
-  anchor{ 'hdp-ganglia::hdp-gmetad::service_check::end':}
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/hdp-gmond/service_check.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/hdp-gmond/service_check.pp b/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/hdp-gmond/service_check.pp
deleted file mode 100644
index 8c1ed52..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/hdp-gmond/service_check.pp
+++ /dev/null
@@ -1,36 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-ganglia::hdp-gmond::service_check() 
-{
-  
-  anchor { 'hdp-ganglia::hdp-gmond::service_check::begin':}
-
-  exec { 'hdp-gmond':
-    command   => "/etc/init.d/hdp-gmond status | grep -v failed",
-    tries     => 3,
-    try_sleep => 5,
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    before      => Anchor['hdp-ganglia::hdp-gmond::service_check::end'],
-    logoutput => "true"
-  }
-
-  anchor{ 'hdp-ganglia::hdp-gmond::service_check::end':}
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/init.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/init.pp b/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/init.pp
deleted file mode 100644
index 2c98355..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/init.pp
+++ /dev/null
@@ -1,53 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-ganglia(
-  $service_state
-)
-{
-  if ! ($service_state in ['no_op', 'uninstalled']) {
-    include hdp-ganglia::params
-    $gmetad_user = $hdp-ganglia::params::gmetad_user
-    $gmond_user = $hdp-ganglia::params::gmond_user
-
-    hdp::group { 'gmetad_group' :
-      group_name => $gmetad_user,
-    }
-
-    hdp::group { 'gmond_group':
-      group_name => $gmond_user,
-    }
-
-    hdp::user { 'gmond_user': 
-      user_name =>  $gmond_user,
-      gid    => $gmond_user,
-      groups => ["$gmond_user"]
-    }
-  
-    hdp::user { 'gmetad_user':
-      user_name => $gmetad_user,
-      gid    => $gmetad_user,
-      groups => ["$gmetad_user"]
-    }
-
-    anchor{'hdp-ganglia::begin':} -> Hdp::Group<|title == 'gmond_group' or title == 'gmetad_group'|> -> Hdp::User['gmond_user'] -> Hdp::User['gmetad_user'] ->  anchor{'hdp-ganglia::end':}
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/monitor.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/monitor.pp b/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/monitor.pp
deleted file mode 100644
index 965eb30..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/monitor.pp
+++ /dev/null
@@ -1,165 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-ganglia::monitor(
-  $service_state = $hdp::params::cluster_service_state,
-  $ganglia_server_host = undef,
-  $opts = {}
-) inherits hdp-ganglia::params
-{
-  if  ($service_state == 'no_op') {
-  } elsif ($service_state == 'uninstalled') {     
-
-   hdp::package { 'ganglia-monitor':         
-       ensure      => 'uninstalled', 
-      java_needed => false      
-   }
-
-  } else {
-    if ($hdp::params::service_exists['hdp-ganglia::server'] != true) {
-      class { 'hdp-ganglia':
-       service_state => $service_state
-      }
-    }
-
-    hdp::package { 'ganglia-monitor': }
-
-    hdp::package { 'ganglia-gmond-modules-python': }
-
-    if ($hdp::params::service_exists['hdp-ganglia::server'] != true) {
-      class { 'hdp-ganglia::config': ganglia_server_host => $ganglia_server_host}
-    }
-
-    if (($hdp::params::service_exists['hdp-hadoop::datanode'] == true) or ($hdp::params::service_exists['hdp-hadoop::namenode'] == true) or ($hdp::params::service_exists['hdp-hadoop::jobtracker'] == true) or ($hdp::params::service_exists['hdp-hadoop::tasktracker'] == true) or ($hdp::params::service_exists['hdp-yarn::resourcemanager'] == true) or ($hdp::params::service_exists['hdp-yarn::nodemanager'] == true) or ($hdp::params::service_exists['hdp-yarn::historyserver'] == true) or ($hdp::params::service_exists['hdp-hadoop::client'] == true) or ($hdp::params::service_exists['hdp-hadoop::snamenode'] == true)) {
-     class { 'hdp-hadoop::enable-ganglia': }
-   }
-
-    if ($service_exists['hdp-hbase::master'] == true) {
-      class { 'hdp-hbase::master::enable-ganglia': }
-    }
-  
-    if ($service_exists['hdp-hbase::regionserver'] == true) {
-      class { 'hdp-hbase::regionserver::enable-ganglia': }
-    }
-
-    class { 'hdp-ganglia::monitor::config-gen': }
-  
-    class { 'hdp-ganglia::monitor::gmond': ensure => $service_state}
-
-    class { 'hdp-ganglia::monitor::ownership': }
-
-    if ($hdp::params::service_exists['hdp-ganglia::server'] != true) {
-      Class['hdp-ganglia'] -> Hdp::Package['ganglia-monitor'] -> Hdp::Package['ganglia-gmond-modules-python'] -> Class['hdp-ganglia::config'] -> 
-        Class['hdp-ganglia::monitor::config-gen'] -> Class['hdp-ganglia::monitor::ownership'] ->
-        Class['hdp-ganglia::monitor::gmond']
-    } else {
-      Hdp::Package['ganglia-monitor'] -> Hdp::Package['ganglia-gmond-modules-python'] -> Class['hdp-ganglia::monitor::config-gen'] ->
-        Class['hdp-ganglia::monitor::ownership'] -> Class['hdp-ganglia::monitor::gmond']
-    }
-  }
-}
-
-
-class hdp-ganglia::monitor::config-gen()
-{
-
-  $service_exists = $hdp::params::service_exists
-
-  if ($hdp::params::is_namenode_master) {
-    hdp-ganglia::config::generate_daemon { 'HDPNameNode':}
-  }
-  if ($hdp::params::is_jtnode_master) {
-    hdp-ganglia::config::generate_daemon { 'HDPJobTracker':}
-  }
-  if ($hdp::params::is_rmnode_master) {
-    hdp-ganglia::config::generate_daemon { 'HDPResourceManager':}
-  }
-  if ($hdp::params::is_hsnode_master) {
-    hdp-ganglia::config::generate_daemon { 'HDPHistoryServer':}
-  }
-  if ($hdp::params::is_hbase_master) {
-    hdp-ganglia::config::generate_daemon { 'HDPHBaseMaster':}
-  }
-  
-  if (($hdp::params::is_slave == true) 
-    or (($hdp::params::is_namenode_master == false) 
-      and ($hdp::params::is_jtnode_master == false) 
-      and ($hdp::params::is_rmnode_master == false) 
-      and ($hdp::params::is_hsnode_master == false) 
-      and ($hdp::params::is_hbase_master ==  false))) {
-    hdp-ganglia::config::generate_daemon { 'HDPSlaves':}
-  }
-
-  Hdp-ganglia::Config::Generate_daemon<||>{
-    ganglia_service => 'gmond',
-    role => 'monitor'
-  }
-   # 
-  anchor{'hdp-ganglia::monitor::config-gen::begin':} -> Hdp-ganglia::Config::Generate_daemon<||> -> anchor{'hdp-ganglia::monitor::config-gen::end':}
-}
-
-class hdp-ganglia::monitor::gmond(
-  $ensure
-  )
-{
-  if ($ensure == 'running') {
-    class { 'hdp-ganglia::server::delete_default_gmond_process': }
-    $command = "service hdp-gmond start >> /tmp/gmond.log  2>&1 ; /bin/ps auwx | /bin/grep [g]mond  >> /tmp/gmond.log  2>&1"
-   } elsif  ($ensure == 'stopped') {
-    $command = "service hdp-gmond stop >> /tmp/gmond.log  2>&1 ; /bin/ps auwx | /bin/grep [g]mond  >> /tmp/gmond.log  2>&1"
-  }
-  if ($ensure == 'running' or $ensure == 'stopped') {
-    hdp::exec { "hdp-gmond service" :
-      command => $command,
-      path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
-    }
-  }
-}
-
-class hdp-ganglia::monitor::ownership() {
-
-  file { "${hdp-ganglia::params::ganglia_dir}/conf.d":
-    owner  => 'root',
-    group  => $hdp::params::user_group
-  }
-
-  file { "${hdp-ganglia::params::ganglia_dir}/conf.d/modgstatus.conf":
-    owner => 'root',
-    group => $hdp::params::user_group
-  }
-
-  file { "${hdp-ganglia::params::ganglia_dir}/conf.d/multicpu.conf":
-    owner => 'root',
-    group => $hdp::params::user_group
-  }
-
-  file { "${hdp-ganglia::params::ganglia_dir}/gmond.conf":
-    owner => 'root',
-    group => $hdp::params::user_group
-  }
-}
-
-class hdp-ganglia::server::delete_default_gmond_process() {
-  hdp::exec { "delete_default_gmond_process" :
-    command => "chkconfig gmond off",
-    path => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    require => Class['hdp-ganglia::monitor::gmond']
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/monitor_and_server.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/monitor_and_server.pp b/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/monitor_and_server.pp
deleted file mode 100644
index 418c4cb..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/monitor_and_server.pp
+++ /dev/null
@@ -1,79 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-ganglia::monitor_and_server(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-ganglia::params
-{
-  $ganglia_shell_cmds_dir = $hdp-ganglia::params::ganglia_shell_cmds_dir
-  $ganglia_conf_dir = $hdp-ganglia::params::ganglia_conf_dir
-  $ganglia_runtime_dir = $hdp-ganglia::params::ganglia_runtime_dir
-
-  #note: includes the common package ganglia-monitor
-  class { 'hdp-ganglia':
-    service_state => $service_state
-  }
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['uninstalled']) {
-    class { 'hdp-ganglia::server::packages':
-      ensure => 'uninstalled'
-      }
-
-    hdp::directory { [$ganglia_conf_dir,$ganglia_runtime_dir]:
-      service_state => $service_state,
-      force => true
-    }
-    
-    class { 'hdp-ganglia::config':
-      service_state => $service_state
-    }
-
-    Class['hdp-ganglia'] -> Class['hdp-ganglia::server::packages'] -> 
-      Hdp::Directory[$ganglia_conf_dir] -> Hdp::Directory[$ganglia_runtime_dir] ->
-      Class['hdp-ganglia::config']
-  } elsif ($service_state in ['running','stopped','installed_and_configured']) {
-    class { 'hdp-ganglia::server::packages': }
-
-    class { 'hdp-ganglia::config': 
-     ganglia_server_host => $hdp::params::host_address,
-     service_state       => $service_state
-     }
-
-    class {'hdp-ganglia::monitor::config-gen': }      
-    
-    
-    hdp-ganglia::config::generate_daemon { 'gmetad':
-      ganglia_service => 'gmetad'
-    }
-
-    class { 'hdp-ganglia::service::change_permission':
-      ensure => $service_state
-    }
-
-    #top level no anchors needed
-    Class['hdp-ganglia'] -> Class['hdp-ganglia::server::packages'] -> Class['hdp-ganglia::config'] -> 
-      Class['hdp-ganglia::monitor::config-gen'] -> Hdp-ganglia::Config::Generate_daemon['gmetad'] ->
-      Class['hdp-ganglia::service::change_permission']
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/params.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/params.pp b/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/params.pp
deleted file mode 100644
index 31c8a1f..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/params.pp
+++ /dev/null
@@ -1,35 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-ganglia::params() inherits hdp::params
-{
-  $ganglia_conf_dir = "/etc/ganglia/hdp"
-  $ganglia_dir = "/etc/ganglia"
-  $ganglia_runtime_dir = "/var/run/ganglia/hdp"
-
-  $ganglia_shell_cmds_dir = hdp_default("ganglia_shell_cmd_dir","/usr/libexec/hdp/ganglia")
-  
-  $gmetad_user = $hdp::params::gmetad_user
-  $gmond_user = $hdp::params::gmond_user
-
-  $webserver_group = hdp_default("webserver_group","apache")
-  $rrdcached_default_base_dir = "/var/lib/ganglia/rrds"
-  $rrdcached_base_dir = hdp_default("rrdcached_base_dir", "/var/lib/ganglia/rrds")
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/server.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/server.pp b/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/server.pp
deleted file mode 100644
index 7dca30c..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/server.pp
+++ /dev/null
@@ -1,259 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-ganglia::server(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits  hdp-ganglia::params
-{
-  $hdp::params::service_exists['hdp-ganglia::server'] = true
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state == 'uninstalled') {
-
-   class { 'hdp-ganglia::server::packages':
-      ensure => 'uninstalled',
-      service_state => $service_state
-   }
-
-   class { 'hdp-ganglia::server::files':
-      ensure => 'absent'
-   }
-
-  } else {
-  class { 'hdp-ganglia':
-    service_state => $service_state
-  }
-
-  class { 'hdp-ganglia::server::packages':
-    ensure => 'present',
-    service_state => $service_state
-  }
-
-  class { 'hdp-ganglia::config': 
-    ganglia_server_host => $hdp::params::host_address,
-    service_state       => $service_state 
-  }
-
-  if ($hdp::params::has_namenodes) {
-    hdp-ganglia::config::generate_daemon { 'HDPNameNode':
-      ganglia_service => 'gmond',
-      role => 'server'
-    }
-  }
-  
-  if ($hdp::params::has_jobtracker) {
-    hdp-ganglia::config::generate_daemon { 'HDPJobTracker':
-      ganglia_service => 'gmond',
-      role => 'server'
-    }
-  }
-  
-  if ($hdp::params::has_hbase_masters) {
-    hdp-ganglia::config::generate_daemon { 'HDPHBaseMaster':
-      ganglia_service => 'gmond',
-      role => 'server'
-    }
-  }
-
-  if ($hdp::params::has_resourcemanager) {
-    hdp-ganglia::config::generate_daemon { 'HDPResourceManager':
-      ganglia_service => 'gmond',
-      role => 'server'
-    }
-  }
-  
-  if ($hdp::params::has_histroryserver) {
-    hdp-ganglia::config::generate_daemon { 'HDPHistoryServer':
-      ganglia_service => 'gmond',
-      role => 'server'
-    }
-  }
-
-  hdp-ganglia::config::generate_daemon { 'HDPSlaves':
-    ganglia_service => 'gmond',
-    role => 'server'
-  }
-
-  hdp-ganglia::config::generate_daemon { 'gmetad':
-    ganglia_service => 'gmetad',
-    role => 'server'
-  }
-
-  class { 'hdp-ganglia::server::gmetad': ensure => $service_state}
-
-  class { 'hdp-ganglia::service::change_permission': ensure => $service_state }
-
-  if ($service_state == 'installed_and_configured') {
-    $webserver_state = 'restart'
-  } elsif ($service_state == 'running') {
-    $webserver_state = 'running'
-  } else {
-    # We are never stopping httpd
-    #$webserver_state = $service_state
-  }
-
-  class { 'hdp-monitor-webserver': service_state => $webserver_state}
-
-  class { 'hdp-ganglia::server::files':
-     ensure => 'present'
-  }
-
-  file { "${hdp-ganglia::params::ganglia_dir}/gmetad.conf":
-    owner => 'root',
-    group => $hdp::params::user_group
-  }
-
-  #top level does not need anchors
-  Class['hdp-ganglia'] -> Class['hdp-ganglia::server::packages'] -> Class['hdp-ganglia::config'] ->
-    Hdp-ganglia::Config::Generate_daemon<||> ->
-    File["${hdp-ganglia::params::ganglia_dir}/gmetad.conf"] -> Class['hdp-ganglia::service::change_permission'] ->
-    Class['hdp-ganglia::server::files'] -> Class['hdp-ganglia::server::gmetad'] -> Class['hdp-monitor-webserver']
- }
-}
-
-class hdp-ganglia::server::packages(
-  $ensure = present,
-  $service_state = 'installed_and_configured'
-)
-{
-  hdp::package { ['libganglia','ganglia-devel','ganglia-server','ganglia-web']: 
-    ensure      => $ensure,
-    java_needed => false,
-    require => Hdp::Package ['rrdtool-python']
-  }
-
-  # Removing conflicting packages only once to workaround "/bin/rpm -e absent-absent-absent.absent" bug (BUG-2881)
-  if ($service_state == 'installed_and_configured' and $hdp::params::hdp_os_type == 'centos5') {
-    # Remove conflicting 32bit package
-    hdp::package { ['rrdtool-devel']:
-      ensure      => 'absent',
-      java_needed => false,
-      before => Hdp::Package ['rrdtool']
-    }
-
-    # Remove conflicting 32bit package
-    hdp::package { ['rrdtool']:
-      ensure      => 'absent',
-      java_needed => false,
-      before => Hdp::Package ['rrdtool-python']
-    }
-  }
-
-  hdp::package { ['rrdtool-python']:
-    ensure      => $ensure,
-    java_needed => false
-  }
-
-}
-
-class hdp-ganglia::server::files(
-  $ensure = present 
-)
-{
-  $rrd_py_path = $hdp::params::rrd_py_path [$hdp::params::hdp_os_type]
-  hdp::directory_recursive_create{$rrd_py_path:
-    ensure => "directory", 
-    override_owner => false 
-  }
-
-  $rrd_py_file_path = "${rrd_py_path}/rrd.py"
-
-  file{$rrd_py_file_path :
-    ensure => $ensure,
-    source => "puppet:///modules/hdp-ganglia/rrd.py",
-    mode   => '0755'
-  }
-
-  anchor{ 'hdp-ganglia::server::files::begin' : } -> Hdp::Directory_recursive_create[$rrd_py_path] -> File[$rrd_py_file_path] -> anchor{ 'hdp-ganglia::server::files::end' : }
-
-  $rrd_files_dir = $hdp-ganglia::params::rrdcached_base_dir
-  $rrd_file_owner = $hdp-ganglia::params::gmetad_user
-  $rrdcached_default_file_dir = $hdp-ganglia::params::rrdcached_default_base_dir
-
-  ## If directory is different fr omdefault make sure it exists
-  if ($rrdcached_default_file_dir != $rrd_files_dir) {
-    hdp::directory_recursive_create{ $rrd_files_dir :
-      ensure => "directory",
-      owner => $rrd_file_owner,
-      group => $rrd_file_owner,
-      mode => '0755'
-    }
-
-    file { $rrdcached_default_file_dir :
-      ensure => link,
-      target => $rrd_files_dir,
-      force => true
-    }
-
-    File[$rrd_py_file_path] -> Hdp::Directory_recursive_create[$rrd_files_dir] -> File[$rrdcached_default_file_dir] -> Anchor['hdp-ganglia::server::files::end']
-  }
-  elsif ($rrd_file_owner != $hdp::params::NOBODY_USER) {
-    #owner of rrdcached_default_file_dir is 'nobody' by default 
-    #need to change owner to gmetad_user for proper gmetad service start
-    
-    hdp::directory { $rrdcached_default_file_dir:
-      owner => $rrd_file_owner,
-      group => $rrd_file_owner,
-      override_owner => true
-    }
-    
-    File[$rrd_py_file_path] -> Hdp::Directory[$rrdcached_default_file_dir] -> Anchor['hdp-ganglia::server::files::end']
-  }
-}
-
-
-class hdp-ganglia::service::change_permission(
-  $ensure
-)
-{
-  if ($ensure == 'running' or $ensure == 'installed_and_configured') {
-    hdp::directory_recursive_create { '/var/lib/ganglia/dwoo' :
-      mode => '0777',
-      owner => $hdp-ganglia::params::gmetad_user
-    }
-  }
-}
-
-class hdp-ganglia::server::gmetad(
-  $ensure
-)
-{
-  if ($ensure == 'running') {
-    class { 'hdp-ganglia::server::delete_default_gmetad_process': }
-    $command = "service hdp-gmetad start >> /tmp/gmetad.log  2>&1 ; /bin/ps auwx | /bin/grep [g]metad  >> /tmp/gmetad.log  2>&1"
-   } elsif  ($ensure == 'stopped') {
-    $command = "service hdp-gmetad stop >> /tmp/gmetad.log  2>&1 ; /bin/ps auwx | /bin/grep [g]metad  >> /tmp/gmetad.log  2>&1"
-  }
-  if ($ensure == 'running' or $ensure == 'stopped') {
-    hdp::exec { "hdp-gmetad service" :
-      command => "$command",
-      path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
-    }
-  }
-}
-
-class hdp-ganglia::server::delete_default_gmetad_process() {
-  hdp::exec { "delete_default_gmetad_process" :
-    command => "chkconfig gmetad off",
-    path => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    require => Class['hdp-ganglia::server::gmetad']
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaClusters.conf.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaClusters.conf.erb b/ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaClusters.conf.erb
deleted file mode 100644
index 5e03bd5..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaClusters.conf.erb
+++ /dev/null
@@ -1,43 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-#########################################################
-### ClusterName           GmondMasterHost   GmondPort ###
-#########################################################
-
-    HDPSlaves       	<%=scope.function_hdp_host("ganglia_server_host")%>  8660
-
-<% if (scope.function_hdp_default('namenode_host') != '')%>
-    HDPNameNode         <%=scope.function_hdp_host("ganglia_server_host")%>  8661
-<%end-%>
-<% if (scope.function_hdp_get_major_stack_version([scope.function_hdp_template_var("stack_version")]) < 2)%>
-    <% if (scope.function_hdp_default('jtnode_host') != '')%>
-    HDPJobTracker     	<%=scope.function_hdp_host("ganglia_server_host")%>  8662
-    <%end-%>
-<%end-%>
-<% if (scope.function_hdp_default('hbase_master_hosts') != '')%>
-    HDPHBaseMaster      <%=scope.function_hdp_host("ganglia_server_host")%>  8663
-<%end-%>
-<% if (scope.function_hdp_get_major_stack_version([scope.function_hdp_template_var("stack_version")]) >= 2)%>
-    <% if (scope.function_hdp_default('rm_host') != '')%>
-    HDPResourceManager  <%=scope.function_hdp_host("ganglia_server_host")%>  8664
-    <%end-%>
-    <% if (scope.function_hdp_default('hs_host') != '')%>
-    HDPHistoryServer    <%=scope.function_hdp_host("ganglia_server_host")%>  8666
-    <%end-%>
-<%end-%>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaEnv.sh.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaEnv.sh.erb b/ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaEnv.sh.erb
deleted file mode 100644
index 4be541d..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaEnv.sh.erb
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Unix users and groups for the binaries we start up.
-GMETAD_USER=<%=scope.function_hdp_template_var("gmetad_user")%>;
-GMOND_USER=<%=scope.function_hdp_template_var("gmond_user")%>;
-WEBSERVER_GROUP=<%=scope.function_hdp_template_var("webserver_group")%>;

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaLib.sh.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaLib.sh.erb b/ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaLib.sh.erb
deleted file mode 100644
index f129e37..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaLib.sh.erb
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-GANGLIA_CONF_DIR=<%=scope.function_hdp_template_var("ganglia_conf_dir")%>;
-GANGLIA_RUNTIME_DIR=<%=scope.function_hdp_template_var("ganglia_runtime_dir")%>;
-RRDCACHED_BASE_DIR=<%=scope.function_hdp_template_var("rrdcached_base_dir")%>;
-
-# This file contains all the info about each Ganglia Cluster in our Grid.
-GANGLIA_CLUSTERS_CONF_FILE=./gangliaClusters.conf;
-
-function createDirectory()
-{
-    directoryPath=${1};
-
-    if [ "x" != "x${directoryPath}" ]
-    then
-        mkdir -p ${directoryPath};
-    fi
-}
-
-function getGangliaClusterInfo()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # Fetch the particular entry for ${clusterName} from ${GANGLIA_CLUSTERS_CONF_FILE}.
-        awk -v clusterName=${clusterName} '($1 !~ /^#/) && ($1 == clusterName)' ${GANGLIA_CLUSTERS_CONF_FILE};
-    else
-        # Spit out all the non-comment, non-empty lines from ${GANGLIA_CLUSTERS_CONF_FILE}.
-        awk '($1 !~ /^#/) && (NF)' ${GANGLIA_CLUSTERS_CONF_FILE};
-    fi
-}
-
-function getConfiguredGangliaClusterNames()
-{
-  # Find all the subdirectories in ${GANGLIA_CONF_DIR} and extract only 
-  # the subdirectory name from each.
-  if [ -e ${GANGLIA_CONF_DIR} ]
-  then  
-    find ${GANGLIA_CONF_DIR} -maxdepth 1 -mindepth 1 -type d | xargs -n1 basename;
-  fi
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/files/checkForFormat.sh b/ambari-agent/src/main/puppet/modules/hdp-hadoop/files/checkForFormat.sh
deleted file mode 100644
index d14091a..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/files/checkForFormat.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-export hdfs_user=$1
-shift
-export conf_dir=$1
-shift
-export mark_dir=$1
-shift
-export name_dirs=$*
-
-export EXIT_CODE=0
-export command="namenode -format"
-export list_of_non_empty_dirs=""
-
-mark_file=/var/run/hadoop/hdfs/namenode-formatted
-if [[ -f ${mark_file} ]] ; then
-  rm -f ${mark_file}
-  mkdir -p ${mark_dir}
-fi
-
-if [[ ! -d $mark_dir ]] ; then
-  for dir in `echo $name_dirs | tr ',' ' '` ; do
-    echo "NameNode Dirname = $dir"
-    cmd="ls $dir | wc -l  | grep -q ^0$"
-    eval $cmd
-    if [[ $? -ne 0 ]] ; then
-      (( EXIT_CODE = $EXIT_CODE + 1 ))
-      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
-    fi
-  done
-
-  if [[ $EXIT_CODE == 0 ]] ; then
-    su - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
-  else
-    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
-  fi
-else
-  echo "${mark_dir} exists. Namenode DFS already formatted"
-fi
-
-exit $EXIT_CODE
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/files/checkWebUI.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/files/checkWebUI.py b/ambari-agent/src/main/puppet/modules/hdp-hadoop/files/checkWebUI.py
deleted file mode 100644
index f8e9c1a..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/files/checkWebUI.py
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import optparse
-import httplib
-
-#
-# Main.
-#
-def main():
-  parser = optparse.OptionParser(usage="usage: %prog [options] component ")
-  parser.add_option("-m", "--hosts", dest="hosts", help="Comma separated hosts list for WEB UI to check it availability")
-  parser.add_option("-p", "--port", dest="port", help="Port of WEB UI to check it availability")
-
-  (options, args) = parser.parse_args()
-  
-  hosts = options.hosts.split(',')
-  port = options.port
-
-  for host in hosts:
-    try:
-      conn = httplib.HTTPConnection(host, port)
-      # This can be modified to get a partial url part to be sent with request
-      conn.request("GET", "/")
-      httpCode = conn.getresponse().status
-      conn.close()
-    except Exception:
-      httpCode = 404
-
-    if httpCode != 200:
-      print "Cannot access WEB UI on: http://" + host + ":" + port
-      exit(1)
-      
-
-if __name__ == "__main__":
-  main()


[08/15] AMBARI-5747 Remove facter-1.6.10, Ruby, Puppet dependencies from pom.xml and src (dsen)

Posted by ds...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/server/config.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/server/config.pp b/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/server/config.pp
deleted file mode 100644
index c527e1f..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/server/config.pp
+++ /dev/null
@@ -1,86 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-nagios::server::config()
-{
-
-  $host_cfg = $hdp-nagios::params::nagios_host_cfg
-  $nagios_lookup_daemon_str = $hdp::params::nagios_lookup_daemon_strs[$hdp::params::hdp_os_type]
-  
-  hdp-nagios::server::configfile { 'nagios.cfg': conf_dir => $hdp-nagios::params::conf_dir, group => $hdp-nagios::params::nagios_group }
-  hdp-nagios::server::configfile { 'resource.cfg': conf_dir => $hdp-nagios::params::conf_dir, group => $hdp-nagios::params::nagios_group }
-  hdp-nagios::server::configfile { 'hadoop-hosts.cfg': }
-  hdp-nagios::server::configfile { 'hadoop-hostgroups.cfg': }
-  hdp-nagios::server::configfile { 'hadoop-servicegroups.cfg': }
-  hdp-nagios::server::configfile { 'hadoop-services.cfg': }
-  hdp-nagios::server::configfile { 'hadoop-commands.cfg': }
-  hdp-nagios::server::configfile { 'contacts.cfg': }
-  if ($hdp::params::hdp_os_type in ['centos5', 'centos6', 'redhat5', 'redhat6', 'oraclelinux5', 'oraclelinux6']) {
-    hdp-nagios::server::configfile { 'nagios': conf_dir => '/etc/init.d/', mode => '0755', owner => 'root', group => 'root'}
-  }
-
-  hdp-nagios::server::check { 'check_cpu.pl': }
-  hdp-nagios::server::check { 'check_datanode_storage.php': }
-  hdp-nagios::server::check { 'check_aggregate.php': }
-  hdp-nagios::server::check { 'check_hdfs_blocks.php': }
-  hdp-nagios::server::check { 'check_hdfs_capacity.php': }
-  hdp-nagios::server::check { 'check_rpcq_latency.php': }
-  hdp-nagios::server::check { 'check_webui.sh': }
-  hdp-nagios::server::check { 'check_name_dir_status.php': }
-  hdp-nagios::server::check { 'check_oozie_status.sh': }
-  hdp-nagios::server::check { 'check_templeton_status.sh': }
-  hdp-nagios::server::check { 'check_hive_metastore_status.sh': }
-  hdp-nagios::server::check { 'check_hue_status.sh': }
-  hdp-nagios::server::check { 'check_mapred_local_dir_used.sh': }
-  hdp-nagios::server::check { 'check_nodemanager_health.sh': }
-  hdp-nagios::server::check { 'check_namenodes_ha.sh': }
-  hdp-nagios::server::check { 'hdp_nagios_init.php': }
-
-  anchor{'hdp-nagios::server::config::begin':} -> Hdp-nagios::Server::Configfile<||> -> anchor{'hdp-nagios::server::config::end':}
-  Anchor['hdp-nagios::server::config::begin'] -> Hdp-nagios::Server::Check<||> -> Anchor['hdp-nagios::server::config::end']
-}
-
-
-###config file helper
-define hdp-nagios::server::configfile(
-  $owner = $hdp-nagios::params::nagios_user,
-  $group = $hdp::params::user_group,
-  $conf_dir = $hdp-nagios::params::nagios_obj_dir,
-  $mode = undef
-) 
-{
-  
-  hdp::configfile { "${conf_dir}/${name}":
-    component      => 'nagios',
-    owner          => $owner,
-    group          => $group,
-    mode           => $mode
-  }
-
-  
-}
-
-define hdp-nagios::server::check()
-{
-  file { "${hdp-nagios::params::plugins_dir}/${name}":
-    source => "puppet:///modules/hdp-nagios/${name}", 
-    mode => '0755'
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/server/packages.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/server/packages.pp b/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/server/packages.pp
deleted file mode 100644
index 03d241d..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/server/packages.pp
+++ /dev/null
@@ -1,110 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-nagios::server::packages(
-  $service_state = $hdp::params::cluster_service_state
-)
-{
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['uninstalled']) {
-     hdp-nagios::server::package { ['nagios-server','nagios-fping','nagios-plugins','nagios-addons']:
-      ensure => 'uninstalled'
-    }
-  } elsif ($service_state in ['running','stopped','installed_and_configured']) {
-
-  hdp::package { 'perl':
-    ensure      => present,
-    java_needed => false
-  }
-
-  hdp::package { 'perl-Net-SNMP':
-    ensure      => present,
-    java_needed => false
-  }
-
-  hdp::package { 'nagios-plugins': 
-    ensure      => present,
-    java_needed => false
-  }
-  
-  hdp::package { 'nagios-server':
-    ensure      => present,
-    java_needed => false
-  }
-
-  hdp::package { 'nagios-devel': 
-    ensure      => present,
-    java_needed => false
-  }
-  
-  hdp::package { 'nagios-fping': 
-    ensure      => present,
-    java_needed => false
-  }
-  
-  hdp::package { 'nagios-addons': 
-    ensure      => present,
-    java_needed => false
-  }
-  
-  hdp::package { 'nagios-php-pecl-json': 
-    ensure      => present,
-    java_needed => false
-  }
-  
-  
-debug("## state: $service_state")
-  if ($service_state == 'installed_and_configured') {
-    
-    hdp::package::remove_pkg { 'hdp_mon_nagios_addons':
-      package_type => 'hdp_mon_nagios_addons'
-    }
-
-    hdp::package::remove_pkg { 'nagios-plugins':
-      package_type => 'nagios-plugins'
-    }
-
-    exec { "remove_package nagios":
-      path    => "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
-      command => "rpm -e --allmatches --nopostun nagios ; true"
-    }
-
-    debug("##Adding removing dep")
-    # Removing conflicting packages. Names of packages being removed are hardcoded and not resolved via hdp::params
-    Hdp::Package::Remove_pkg['hdp_mon_nagios_addons'] -> Hdp::Package::Remove_pkg['nagios-plugins'] -> Exec['remove_package nagios'] -> Hdp::Package['nagios-plugins']
-  }
-
-  Hdp::Package['nagios-plugins'] -> Hdp::Package['nagios-server'] -> Hdp::Package['nagios-devel'] -> Hdp::Package['nagios-fping'] -> Hdp::Package['nagios-addons'] -> Hdp::Package['nagios-php-pecl-json']
-    
-
-} 
-
-}
-
-
-define hdp-nagios::server::package(
-  $ensure = present
-)
-{
-  hdp::package { $name: 
-    ensure      => $ensure,
-    java_needed => false
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/target.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/target.pp b/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/target.pp
deleted file mode 100644
index 4de784f..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/target.pp
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-nagios::target(){}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/contacts.cfg.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/contacts.cfg.erb b/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/contacts.cfg.erb
deleted file mode 100644
index ee6f09e..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/contacts.cfg.erb
+++ /dev/null
@@ -1,91 +0,0 @@
-###############################################################################
-# CONTACTS.CFG - SAMPLE CONTACT/CONTACTGROUP DEFINITIONS
-#
-# Last Modified: 05-31-2007
-#
-# NOTES: This config file provides you with some example contact and contact
-#        group definitions that you can reference in host and service
-#        definitions.
-#       
-#        You don't need to keep these definitions in a separate file from your
-#        other object definitions.  This has been done just to make things
-#        easier to understand.
-#
-###############################################################################
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-
-###############################################################################
-###############################################################################
-#
-# CONTACTS
-#
-###############################################################################
-###############################################################################
-
-# Just one contact defined by default - the Nagios admin (that's you)
-# This contact definition inherits a lot of default values from the 'generic-contact' 
-# template which is defined elsewhere.
-
-define contact{
-        contact_name    <%=scope.function_hdp_template_var("nagios_web_login")%>    ; Short name of user
-        use             generic-contact                                             ; Inherit default values from generic-contact template (defined above)
-        alias           Nagios Admin                                                ; Full name of user
-
-        email                           <%=scope.function_hdp_template_var("nagios_contact")%>	; <<***** CHANGE THIS TO YOUR EMAIL ADDRESS ******
-        }
-
-# Contact which writes all Nagios alerts to the system logger.
-define contact{
-        contact_name                    sys_logger         ; Short name of user
-        use                             generic-contact    ; Inherit default values from generic-contact template (defined above)
-        alias                           System Logger      ; Full name of user
-        host_notifications_enabled      1
-        service_notifications_enabled   1
-        service_notification_period     24x7
-        host_notification_period        24x7
-        service_notification_options    w,u,c,r,s
-        host_notification_options       d,u,r,s
-        can_submit_commands             1
-        retain_status_information       1
-        service_notification_commands   service_sys_logger
-        host_notification_commands      host_sys_logger
-        }
-
-###############################################################################
-###############################################################################
-#
-# CONTACT GROUPS
-#
-###############################################################################
-###############################################################################
-
-# We only have one contact in this simple configuration file, so there is
-# no need to create more than one contact group.
-
-define contactgroup {
-        contactgroup_name       admins
-        alias                   Nagios Administrators
-        members                 <%=scope.function_hdp_template_var("nagios_web_login")%>,sys_logger
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-commands.cfg.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-commands.cfg.erb b/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-commands.cfg.erb
deleted file mode 100644
index e49199b..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-commands.cfg.erb
+++ /dev/null
@@ -1,114 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-<% if scope.function_hdp_template_var("hdp_os_type") != "suse"%>
-# 'check_cpu' check remote cpu load
-define command {
-        command_name    check_cpu
-        command_line    $USER1$/check_cpu.pl -H $HOSTADDRESS$ -C hadoop -w $ARG1$ -c $ARG2$
-       }
-<% end %>
-
-# Check data node storage full 
-define command {
-        command_name    check_datanode_storage
-        command_line    php $USER1$/check_datanode_storage.php -h $HOSTADDRESS$ -p $ARG1$ -w $ARG2$ -c $ARG3$ -e $ARG4$ -k $ARG5$ -r $ARG6$ -t $ARG7$ -s $ARG8$
-       }
-
-define command{
-        command_name    check_hdfs_blocks
-        command_line    php $USER1$/check_hdfs_blocks.php -h $ARG1$ -p $ARG2$ -w $ARG3$ -c $ARG4$ -s $ARG5$ -e $ARG6$ -k $ARG7$ -r $ARG8$ -t $ARG9$ -u $ARG10$
-       }
-
-define command{
-        command_name    check_hdfs_capacity
-        command_line    php $USER1$/check_hdfs_capacity.php -h $ARG1$ -p $ARG2$ -w $ARG3$ -c $ARG4$ -e $ARG5$ -k $ARG6$ -r $ARG7$ -t $ARG8$ -s $ARG9$
-       }
-
-define command{
-        command_name    check_aggregate
-        command_line    php $USER1$/check_aggregate.php -f /var/nagios/status.dat -s 1 -t service -n $ARG1$ -w $ARG2$ -c $ARG3$
-       }
-
-define command{
-        command_name    check_rpcq_latency
-        command_line    php $USER1$/check_rpcq_latency.php -h $HOSTADDRESS$ -p $ARG2$ -n $ARG1$ -w $ARG3$ -c $ARG4$ -e $ARG5$ -k $ARG6$ -r $ARG7$ -t $ARG8$ -s $ARG9$
-       }
-
-define command{
-        command_name    check_nagios
-        command_line    $USER1$/check_nagios -e $ARG1$ -F $ARG2$ -C $ARG3$ 
-       }
-
-define command{
-        command_name    check_webui
-        command_line    $USER1$/check_webui.sh $ARG1$ $HOSTADDRESS$ $ARG2$
-       }
-
-define command{
-        command_name    check_name_dir_status
-        command_line    php $USER1$/check_name_dir_status.php -h $HOSTADDRESS$ -p $ARG1$ -e $ARG2$ -k $ARG3$ -r $ARG4$ -t $ARG5$ -s $ARG6$
-       }
-
-define command{
-        command_name    check_oozie_status
-        command_line    $USER1$/check_oozie_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$
-       }
-
-define command{
-        command_name    check_templeton_status
-        command_line    $USER1$/check_templeton_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$
-       }
-
-define command{
-        command_name    check_hive_metastore_status
-        command_line    $USER1$/check_hive_metastore_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$
-       }
-define command{
-        command_name    check_hue_status
-        command_line    $USER1$/check_hue_status.sh
-       }
-
-define command{
-       command_name    check_mapred_local_dir_used_space
-       command_line    $USER1$/check_mapred_local_dir_used.sh $ARG1$ $ARG2$
-       }
-
-define command{
-       command_name    check_namenodes_ha
-       command_line    $USER1$/check_namenodes_ha.sh $ARG1$ $ARG2$
-       }
-
-define command{
-        command_name    check_nodemanager_health
-        command_line    $USER1$/check_nodemanager_health.sh $HOSTADDRESS$ $ARG1$
-       }
-
-define command{
-        command_name    host_sys_logger
-        command_line    $USER1$/sys_logger.py $HOSTSTATETYPE$ $HOSTATTEMPT$ $HOSTSTATE$ "Host::Ping" "Event Host=$HOSTADDRESS$($HOSTSTATE$), $HOSTOUTPUT$ $LONGHOSTOUTPUT$"
-       }
-
-define command{
-        command_name    service_sys_logger
-        command_line    $USER1$/sys_logger.py $SERVICESTATETYPE$ $SERVICEATTEMPT$ $SERVICESTATE$ "$SERVICEDESC$" "Event Host=$HOSTADDRESS$ Service Description=$SERVICEDESC$($SERVICESTATE$), $SERVICEOUTPUT$ $LONGSERVICEOUTPUT$"
-       }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-hostgroups.cfg.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-hostgroups.cfg.erb b/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-hostgroups.cfg.erb
deleted file mode 100644
index 9bac137..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-hostgroups.cfg.erb
+++ /dev/null
@@ -1,20 +0,0 @@
-<% all_hosts = Array.new -%>
-<%scope.function_hdp_template_var("hostgroup_defs").each do |name,info|-%>
-<%members = scope.function_hdp_host(info['host_member_info'])-%>
-<%unless scope.function_hdp_is_empty(members) -%>
-<% all_hosts += [members].flatten-%>
-define hostgroup {
-        hostgroup_name  <%=name%>
-        alias           <%=name%>
-        members         <%=[members].flatten.join(',')%>
-}
-
-<%end-%>
-<%end%>
-<%unless all_hosts.empty?-%>
-define hostgroup {
-        hostgroup_name  all-servers
-        alias           All Servers
-        members         <%=all_hosts.uniq.join(',')%>
-}
-<%end%>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-hosts.cfg.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-hosts.cfg.erb b/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-hosts.cfg.erb
deleted file mode 100644
index 4e97548..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-hosts.cfg.erb
+++ /dev/null
@@ -1,16 +0,0 @@
-<%scope.function_hdp_nagios_all_hosts().each do |host|-%>
-define host {
-        alias        <%=host%>
-        host_name    <%=host%>
-        use          linux-server
-        address      <%=host%>
-        check_interval         0.25
-        retry_interval         0.25
-        max_check_attempts     4
-        notifications_enabled     1
-        first_notification_delay  0     # Send notification soon after change in the hard state
-        notification_interval     0     # Send the notification once
-        notification_options      d,u,r
-}
-
-<%end%>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-servicegroups.cfg.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-servicegroups.cfg.erb b/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-servicegroups.cfg.erb
deleted file mode 100644
index eb382bc..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-servicegroups.cfg.erb
+++ /dev/null
@@ -1,80 +0,0 @@
-<%if scope.function_hdp_nagios_members_exist('namenode') ||
-  scope.function_hdp_nagios_members_exist('snamenode')  ||
-  scope.function_hdp_nagios_members_exist('slaves')-%>
-define servicegroup {
-  servicegroup_name  HDFS
-  alias  HDFS Checks
-}
-<% end -%>
-<%if scope.function_hdp_nagios_members_exist('jobtracker') ||
-  scope.function_hdp_nagios_members_exist('historyserver2')-%>
-define servicegroup {
-  servicegroup_name  MAPREDUCE
-  alias  MAPREDUCE Checks
-}
-<% end -%>
-<%if scope.function_hdp_nagios_members_exist('resourcemanager') ||
-  scope.function_hdp_nagios_members_exist('nodemanagers') -%>
-define servicegroup {
-  servicegroup_name  YARN
-  alias  YARN Checks
-}
-<% end -%>
-<%if scope.function_hdp_nagios_members_exist('flume-servers')-%>
-define servicegroup {
-  servicegroup_name  FLUME
-  alias  FLUME Checks
-}
-<% end -%>
-<%if scope.function_hdp_nagios_members_exist('hbasemasters')-%>
-define servicegroup {
-  servicegroup_name  HBASE
-  alias  HBASE Checks
-}
-<% end -%>
-<%if scope.function_hdp_nagios_members_exist('oozie-server')-%>
-define servicegroup {
-  servicegroup_name  OOZIE
-  alias  OOZIE Checks
-}
-<% end -%>
-<%if scope.function_hdp_nagios_members_exist('webhcat-server')-%>
-define servicegroup {
-  servicegroup_name  WEBHCAT
-  alias  WEBHCAT Checks
-}
-<% end -%>
-<%if scope.function_hdp_nagios_members_exist('nagios-server')-%>
-define servicegroup {
-  servicegroup_name  NAGIOS
-  alias  NAGIOS Checks
-}
-<% end -%>
-<%if scope.function_hdp_nagios_members_exist('ganglia-server')-%>
-define servicegroup {
-  servicegroup_name  GANGLIA
-  alias  GANGLIA Checks
-}
-<% end -%>
-<%if scope.function_hdp_nagios_members_exist('hiveserver')-%>
-define servicegroup {
-  servicegroup_name  HIVE-METASTORE
-  alias  HIVE-METASTORE Checks
-}
-<% end -%>
-<%if scope.function_hdp_nagios_members_exist('zookeeper-servers')-%>
-define servicegroup {
-  servicegroup_name  ZOOKEEPER
-  alias  ZOOKEEPER Checks
-}
-<% end -%>
-define servicegroup {
-  servicegroup_name  AMBARI
-  alias  AMBARI Checks
-}
-<%if scope.function_hdp_nagios_members_exist('hue-server')-%>
-define servicegroup {
-  servicegroup_name  HUE
-  alias  HUE Checks
-}
-<% end -%>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-services.cfg.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-services.cfg.erb b/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-services.cfg.erb
deleted file mode 100644
index 6e8acd6..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-services.cfg.erb
+++ /dev/null
@@ -1,753 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-
-# NAGIOS SERVER Check (status log update)
-<%if scope.function_hdp_nagios_members_exist('nagios-server')-%>
-define service {
-        name                            hadoop-service
-        use                             generic-service
-        notification_options            w,u,c,r,f,s
-        first_notification_delay        0
-        notification_interval           0                 # Send the notification once
-        contact_groups                  admins
-        notifications_enabled           1
-        event_handler_enabled           1
-        register                        0
-}
-
-define service {        
-        hostgroup_name          nagios-server        
-        use                     hadoop-service
-        service_description     NAGIOS::Nagios status log freshness
-        servicegroups           NAGIOS
-        check_command           check_nagios!10!/var/nagios/status.dat!<%=scope.function_hdp_template_var("::hdp-nagios::server::config::nagios_lookup_daemon_str")%>
-        normal_check_interval   5
-        retry_check_interval    0.5
-        max_check_attempts      2
-}
-
-<%if scope.function_hdp_nagios_members_exist('namenode')-%>
-# NAGIOS SERVER HDFS Checks
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::Percent DataNodes with space available
-        servicegroups           HDFS
-        check_command           check_aggregate!"DATANODE::DataNode space"!10%!30%
-        normal_check_interval   2
-        retry_check_interval    1 
-        max_check_attempts      1
-}
-
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::Percent DataNodes live
-        servicegroups           HDFS
-        check_command           check_aggregate!"DATANODE::DataNode process"!10%!30%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-<%end-%> #end HDFS if
-<% if scope.function_hdp_nagios_members_exist('namenode') &&
-        (scope.function_hdp_get_major_stack_version([scope.function_hdp_template_var("stack_version")]) >= 2) &&
-        (scope.function_hdp_template_var("::hdp::params::dfs_ha_enabled"))%>
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::NameNode HA Healthy
-        servicegroups           HDFS
-        check_command           check_namenodes_ha!$HOSTGROUPMEMBERS:namenode$!<%=scope.function_hdp_template_var("::hdp::namenode_port")%>
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      5
-}
-<%end-%> #end HDFS if
-
-
-# AMBARI AGENT Checks
-<%scope.function_hdp_template_var("all_hosts").each_with_index do |hostname, index|-%>
-define service {
-        host_name	        <%=hostname%>
-        use                     hadoop-service
-        service_description     AMBARI::Ambari Agent process
-        servicegroups           AMBARI
-        check_command           check_tcp!<%=scope.function_hdp_template_var("::all_ping_ports")[index]%>!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-
-<%end-%> #end do loop
-
-# NAGIOS SERVER ZOOKEEPER Checks
-<%if scope.function_hdp_nagios_members_exist('zookeeper-servers')-%>
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     ZOOKEEPER::Percent ZooKeeper Servers live
-        servicegroups           ZOOKEEPER
-        check_command           check_aggregate!"ZOOKEEPER::ZooKeeper Server process"!35%!70%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-<%end-%>
-
-# NAGIOS SERVER HBASE Checks
-<%if scope.function_hdp_nagios_members_exist('hbasemasters')-%>
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HBASE::Percent RegionServers live
-        servicegroups           HBASE
-        check_command           check_aggregate!"REGIONSERVER::RegionServer process"!10%!30%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-<%end-%>
-<%end-%> #end if nagios-server
-
-
-
-# GANGLIA SERVER Checks
-<%if scope.function_hdp_nagios_members_exist('ganglia-server')-%>
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Server process
-        servicegroups           GANGLIA
-        check_command           check_tcp!<%=scope.function_hdp_template_var("::hdp::ganglia_port")%>!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for Slaves
-        servicegroups           GANGLIA
-        check_command           check_tcp!<%=scope.function_hdp_template_var("::hdp::ganglia_collector_slaves_port")%>!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for NameNode
-        servicegroups           GANGLIA
-        check_command           check_tcp!<%=scope.function_hdp_template_var("::hdp::ganglia_collector_namenode_port")%>!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-
-<%if scope.function_hdp_nagios_members_exist('jobtracker')-%>
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for JobTracker
-        servicegroups           GANGLIA
-        check_command           check_tcp!<%=scope.function_hdp_template_var("::hdp::ganglia_collector_jobtracker_port")%>!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-<%end-%>
-
-<%if scope.function_hdp_nagios_members_exist('hbasemasters')-%>
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for HBase Master
-        servicegroups           GANGLIA
-        check_command           check_tcp!<%=scope.function_hdp_template_var("::hdp::ganglia_collector_hbase_port")%>!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-<%end-%>
-
-<%if scope.function_hdp_nagios_members_exist('resourcemanager')-%>
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for ResourceManager
-        servicegroups           GANGLIA
-        check_command           check_tcp!<%=scope.function_hdp_template_var("::hdp::ganglia_collector_rm_port")%>!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-<%end-%>
-
-<%if scope.function_hdp_nagios_members_exist('historyserver2')-%>
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for HistoryServer
-        servicegroups           GANGLIA
-        check_command           check_tcp!<%=scope.function_hdp_template_var("::hdp::ganglia_collector_hs_port")%>!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-<%end-%>
-
-<%end-%> #end if ganglia
-
-<%if scope.function_hdp_nagios_members_exist('snamenode')-%>
-# Secondary namenode checks
-define service {
-        hostgroup_name          snamenode
-        use                     hadoop-service
-        service_description     NAMENODE::Secondary NameNode process
-        servicegroups           HDFS
-        check_command           check_tcp!<%=scope.function_hdp_template_var("snamenode_port")%>!-w 1 -c 1
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-<%end-%>
-
-<%if scope.function_hdp_nagios_members_exist('namenode')-%>
-# HDFS Checks
-<% @namenodes = scope.function_hdp_template_var("::hdp::params::namenode_host"); @namenodes.each do |namenode| -%>
-
-define service {
-        host_name               <%= namenode %>
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode edit logs directory status on <%= namenode %>
-        servicegroups           HDFS
-        check_command           check_name_dir_status!<%=scope.function_hdp_template_var("::hdp::namenode_port")%>!<%=scope.function_hdp_template_var("::hdp::params::hadoop_ssl_enabled")%>!<%=scope.function_hdp_template_var("nagios_keytab_path")%>!<%=scope.function_hdp_template_var("nagios_principal_name")%>!<%=scope.function_hdp_template_var("::hdp::params::kinit_path_local")%>!<%=scope.function_hdp_template_var("::hdp::params::security_enabled")%>
-        normal_check_interval   0.5
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-<% if scope.function_hdp_template_var("hdp_os_type") != "suse"%>
-define service {
-        host_name               <%= namenode %>
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode host CPU utilization on <%= namenode %>
-        servicegroups           HDFS
-        check_command           check_cpu!200%!250%
-        normal_check_interval   5
-        retry_check_interval    2
-        max_check_attempts      5
-}
-<% end %> #end os type
-
-define service {
-        host_name               <%= namenode %>
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode Web UI on <%= namenode %>
-        servicegroups           HDFS
-        check_command           check_webui!namenode!<%=scope.function_hdp_template_var("::hdp::namenode_port")%>
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-
-define service {
-        host_name               <%= namenode %>
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode process on <%= namenode %>
-        servicegroups           HDFS
-        check_command           check_tcp!<%=scope.function_hdp_template_var("::hdp::namenode_metadata_port")%>!-w 1 -c 1
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-
-define service {
-        host_name               <%= namenode %>
-        use                     hadoop-service
-        service_description     HDFS::NameNode RPC latency on <%= namenode %>
-        servicegroups           HDFS
-        check_command           check_rpcq_latency!NameNode!<%=scope.function_hdp_template_var("::hdp::namenode_port")%>!3000!5000!<%=scope.function_hdp_template_var("::hdp::params::hadoop_ssl_enabled")%>!<%=scope.function_hdp_template_var("nagios_keytab_path")%>!<%=scope.function_hdp_template_var("nagios_principal_name")%>!<%=scope.function_hdp_template_var("::hdp::params::kinit_path_local")%>!<%=scope.function_hdp_template_var("::hdp::params::security_enabled")%>
-        normal_check_interval   5
-        retry_check_interval    1
-        max_check_attempts      5
-}
-
-
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::Blocks health
-        servicegroups           HDFS
-        check_command           check_hdfs_blocks!$HOSTGROUPMEMBERS:namenode$!<%=scope.function_hdp_template_var("::hdp::namenode_port")%>!0%!0%!<%=scope.function_hdp_template_var("::hdp-nagios::params::nn_metrics_property")%>!<%=scope.function_hdp_template_var("::hdp::params::hadoop_ssl_enabled")%>!<%=scope.function_hdp_template_var("nagios_keytab_path")%>!<%=scope.function_hdp_template_var("nagios_principal_name")%>!<%=scope.function_hdp_template_var("::hdp::params::kinit_path_local")%>!<%=scope.function_hdp_template_var("::hdp::params::security_enabled")%>
-        normal_check_interval   2
-        retry_check_interval    1 
-        max_check_attempts      1
-}
-
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::HDFS capacity utilization
-        servicegroups           HDFS
-        check_command           check_hdfs_capacity!$HOSTGROUPMEMBERS:namenode$!<%=scope.function_hdp_template_var("::hdp::namenode_port")%>!80%!90%!<%=scope.function_hdp_template_var("::hdp::params::hadoop_ssl_enabled")%>!<%=scope.function_hdp_template_var("nagios_keytab_path")%>!<%=scope.function_hdp_template_var("nagios_principal_name")%>!<%=scope.function_hdp_template_var("::hdp::params::kinit_path_local")%>!<%=scope.function_hdp_template_var("::hdp::params::security_enabled")%>
-        normal_check_interval   10
-        retry_check_interval    1 
-        max_check_attempts      1
-}
-<% end -%> #end do loop
-<% end -%> #end if HDFS (namenode)
-
-# MAPREDUCE Checks
-<%if scope.function_hdp_nagios_members_exist('jobtracker')-%>
-define service {
-        hostgroup_name          jobtracker
-        use                     hadoop-service
-        service_description     JOBTRACKER::JobTracker Web UI
-        servicegroups           MAPREDUCE
-        check_command           check_webui!jobtracker!<%=scope.function_hdp_template_var("::hdp::jtnode_port")%>
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-
-define service {
-        hostgroup_name          jobtracker
-        use                     hadoop-service
-        service_description     JOBTRACKER::HistoryServer Web UI
-        servicegroups           MAPREDUCE
-        check_command           check_webui!jobhistory!<%=scope.function_hdp_template_var("::hdp::jobhistory_port")%>
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-<% if scope.function_hdp_template_var("hdp_os_type") != "suse"%>
-define service {
-        hostgroup_name          jobtracker
-        use                     hadoop-service
-        service_description     JOBTRACKER::JobTracker CPU utilization
-        servicegroups           MAPREDUCE
-        check_command           check_cpu!200%!250%
-        normal_check_interval   5
-        retry_check_interval    2 
-        max_check_attempts      5
-}
-<% end %>
-
-define service {
-        hostgroup_name          jobtracker
-        use                     hadoop-service
-        use                     hadoop-service
-        service_description     JOBTRACKER::JobTracker process
-        servicegroups           MAPREDUCE
-        check_command           check_tcp!<%=scope.function_hdp_template_var("::hdp::jtnode_port")%>!-w 1 -c 1
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-
-define service {
-        hostgroup_name          jobtracker
-        use                     hadoop-service
-        service_description     MAPREDUCE::JobTracker RPC latency
-        servicegroups           MAPREDUCE
-        check_command           check_rpcq_latency!JobTracker!<%=scope.function_hdp_template_var("::hdp::jtnode_port")%>!3000!5000!<%=scope.function_hdp_template_var("::hdp::params::hadoop_ssl_enabled")%>!<%=scope.function_hdp_template_var("nagios_keytab_path")%>!<%=scope.function_hdp_template_var("nagios_principal_name")%>!<%=scope.function_hdp_template_var("::hdp::params::kinit_path_local")%>!<%=scope.function_hdp_template_var("::hdp::params::security_enabled")%>
-        normal_check_interval   5
-        retry_check_interval    1 
-        max_check_attempts      5
-}
-
-<%end-%> #end if MapReduce
-
-<%if scope.function_hdp_nagios_members_exist('tasktracker-servers')-%>
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     MAPREDUCE::Percent TaskTrackers live
-        servicegroups           MAPREDUCE
-        check_command           check_aggregate!"TASKTRACKER::TaskTracker process"!10%!30%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-
-# MAPREDUCE::TASKTRACKER Checks 
-define service {
-        hostgroup_name          tasktracker-servers
-        use                     hadoop-service
-        service_description     TASKTRACKER::TaskTracker process
-        servicegroups           MAPREDUCE
-        check_command           check_tcp!<%=scope.function_hdp_template_var("::hdp::tasktracker_port")%>!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-# MAPREDUCE::TASKTRACKER Mapreduce local dir used space
-define service {
-        hostgroup_name          tasktracker-servers
-        use                     hadoop-service
-        service_description     ::MapReduce local dir space
-        servicegroups           MAPREDUCE
-        check_command           check_mapred_local_dir_used_space!<%=scope.function_hdp_default("::hdp::mapred-site/mapred.local.dir")%>!85%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-
-<%end-%>
-
-
-<%if scope.function_hdp_nagios_members_exist('resourcemanager')-%>
-# YARN::RESOURCEMANAGER Checks 
-define service {
-        hostgroup_name          resourcemanager
-        use                     hadoop-service
-        service_description     RESOURCEMANAGER::ResourceManager Web UI
-        servicegroups           YARN
-        check_command           check_webui!resourcemanager!<%=scope.function_hdp_template_var("::hdp::rm_port")%>
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-
-<% if scope.function_hdp_template_var("hdp_os_type") != "suse"%>
-define service {
-        hostgroup_name          resourcemanager
-        use                     hadoop-service
-        service_description     RESOURCEMANAGER::ResourceManager CPU utilization
-        servicegroups           YARN
-        check_command           check_cpu!200%!250%
-        normal_check_interval   5
-        retry_check_interval    2 
-        max_check_attempts      5
-}
-<% end %>
-
-define service {
-        hostgroup_name          resourcemanager
-        use                     hadoop-service
-        service_description     RESOURCEMANAGER::ResourceManager RPC latency
-        servicegroups           YARN
-        check_command           check_rpcq_latency!ResourceManager!<%=scope.function_hdp_template_var("::hdp::rm_port")%>!3000!5000!<%=scope.function_hdp_template_var("::hdp::params::hadoop_ssl_enabled")%>!<%=scope.function_hdp_template_var("nagios_keytab_path")%>!<%=scope.function_hdp_template_var("nagios_principal_name")%>!<%=scope.function_hdp_template_var("::hdp::params::kinit_path_local")%>!<%=scope.function_hdp_template_var("::hdp::params::security_enabled")%>
-        normal_check_interval   5
-        retry_check_interval    1 
-        max_check_attempts      5
-}
-
-define service {
-        hostgroup_name          resourcemanager
-        use                     hadoop-service
-        service_description     RESOURCEMANAGER::ResourceManager process
-        servicegroups           YARN
-        check_command           check_tcp!<%=scope.function_hdp_template_var("::hdp::rm_port")%>!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-<% end %>
-
-<%if scope.function_hdp_nagios_members_exist('nodemanagers')-%>
-# YARN::NODEMANAGER Checks
-define service {
-        hostgroup_name          nodemanagers
-        use                     hadoop-service
-        service_description     NODEMANAGER::NodeManager process
-        servicegroups           YARN
-        check_command           check_tcp!<%=scope.function_hdp_template_var("::hdp::nm_port")%>!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-define service {
-        hostgroup_name          nodemanagers
-        use                     hadoop-service
-        service_description     NODEMANAGER::NodeManager health
-        servicegroups           YARN
-        check_command           check_nodemanager_health!<%=scope.function_hdp_template_var("::hdp::nm_port")%>!<%=scope.function_hdp_template_var("::hdp::params::security_enabled")%>!<%=scope.function_hdp_template_var("nagios_keytab_path")%>!<%=scope.function_hdp_template_var("nagios_principal_name")%>!<%=scope.function_hdp_template_var("kinit_path_local")%>
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     NODEMANAGER::Percent NodeManagers live
-        servicegroups           YARN
-        check_command           check_aggregate!"NODEMANAGER::NodeManager process"!10%!30%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-<% end %>
-
-<%if scope.function_hdp_nagios_members_exist('historyserver2')-%>
-# MAPREDUCE::JOBHISTORY Checks
-define service {
-        hostgroup_name          historyserver2
-        use                     hadoop-service
-        service_description     JOBHISTORY::HistoryServer Web UI
-        servicegroups           MAPREDUCE
-        check_command           check_webui!historyserver2!<%=scope.function_hdp_template_var("::hdp::hs_port")%>
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-
-<% if scope.function_hdp_template_var("hdp_os_type") != "suse"%>
-define service {
-        hostgroup_name          historyserver2
-        use                     hadoop-service
-        service_description     JOBHISTORY::HistoryServer CPU utilization
-        servicegroups           MAPREDUCE
-        check_command           check_cpu!200%!250%
-        normal_check_interval   5
-        retry_check_interval    2 
-        max_check_attempts      5
-}
-<% end %>
-
-define service {
-        hostgroup_name          historyserver2
-        use                     hadoop-service
-        service_description     JOBHISTORY::HistoryServer RPC latency
-        servicegroups           MAPREDUCE
-        check_command           check_rpcq_latency!JobHistoryServer!<%=scope.function_hdp_template_var("::hdp::hs_port")%>!3000!5000!<%=scope.function_hdp_template_var("::hdp::params::hadoop_ssl_enabled")%>!<%=scope.function_hdp_template_var("nagios_keytab_path")%>!<%=scope.function_hdp_template_var("nagios_principal_name")%>!<%=scope.function_hdp_template_var("::hdp::params::kinit_path_local")%>!<%=scope.function_hdp_template_var("::hdp::params::security_enabled")%>
-        normal_check_interval   5
-        retry_check_interval    1 
-        max_check_attempts      5
-}
-
-define service {
-        hostgroup_name          historyserver2
-        use                     hadoop-service
-        service_description     JOBHISTORY::HistoryServer process
-        servicegroups           MAPREDUCE
-        check_command           check_tcp!<%=scope.function_hdp_template_var("::hdp::hs_port")%>!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-<% end %>
-
-<%if scope.function_hdp_nagios_members_exist('journalnodes')-%>
-# Journalnode checks
-define service {
-        hostgroup_name          journalnodes
-        use                     hadoop-service
-        service_description     JOURNALNODE::JournalNode process
-        servicegroups           HDFS
-        check_command           check_tcp!<%=scope.function_hdp_template_var("::hdp::journalnode_port")%>!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-<%if scope.function_hdp_template_var("::hdp::params::dfs_ha_enabled")-%>
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::Percent JournalNodes live
-        servicegroups           HDFS
-        check_command           check_aggregate!"JOURNALNODE::JournalNode process"!33%!50%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-<%end-%>
-<%end-%>
-
-<%if scope.function_hdp_nagios_members_exist('slaves')-%>
-# HDFS::DATANODE Checks
-define service {
-        hostgroup_name          slaves
-        use                     hadoop-service
-        service_description     DATANODE::DataNode process
-        servicegroups           HDFS
-        check_command           check_tcp!<%=scope.function_hdp_template_var("::hdp::datanode_port")%>!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-define service {
-        hostgroup_name          slaves
-        use                     hadoop-service
-        service_description     DATANODE::DataNode space
-        servicegroups           HDFS
-        check_command           check_datanode_storage!<%=scope.function_hdp_template_var("::hdp::datanode_port")%>!90%!90%!<%=scope.function_hdp_template_var("::hdp::params::hadoop_ssl_enabled")%>!<%=scope.function_hdp_template_var("nagios_keytab_path")%>!<%=scope.function_hdp_template_var("nagios_principal_name")%>!<%=scope.function_hdp_template_var("::hdp::params::kinit_path_local")%>!<%=scope.function_hdp_template_var("::hdp::params::security_enabled")%>
-        normal_check_interval   5
-        retry_check_interval    1
-        max_check_attempts      2
-}
-
-<%end-%>
-
-<%if scope.function_hdp_nagios_members_exist('flume-servers')-%>
-# FLUME Checks
-define service {
-        hostgroup_name          flume-servers
-        use                     hadoop-service
-        service_description     FLUME::Flume Agent process
-        servicegroups           FLUME
-        check_command           check_tcp!<%=scope.function_hdp_template_var("flume_port")%>!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-<%end-%>
-
-
-<%if scope.function_hdp_nagios_members_exist('zookeeper-servers')-%>
-# ZOOKEEPER Checks
-define service {
-        hostgroup_name          zookeeper-servers
-        use                     hadoop-service
-        service_description     ZOOKEEPER::ZooKeeper Server process
-        servicegroups           ZOOKEEPER
-        check_command           check_tcp!<%=scope.function_hdp_template_var("::clientPort")%>!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-<%end-%>
-
-<%if scope.function_hdp_nagios_members_exist('hbasemasters')-%>
-# HBASE::REGIONSERVER Checks
-define service {
-        hostgroup_name          region-servers
-        use                     hadoop-service
-        service_description     REGIONSERVER::RegionServer process
-        servicegroups           HBASE
-        check_command           check_tcp!<%=scope.function_hdp_template_var("::hdp::hbase_rs_port")%>!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-# HBASE:: MASTER Checks
-# define service {
-#         hostgroup_name          hbasemasters
-#         use                     hadoop-service
-#         service_description     HBASEMASTER::HBase Master Web UI
-#         servicegroups           HBASE
-#         check_command           check_webui!hbase!<%=scope.function_hdp_template_var("::hdp::hbase_master_port")%>
-#         normal_check_interval   1
-#         retry_check_interval    1
-#         max_check_attempts      3
-# }
-<% @hbasemasters = scope.function_hdp_template_var("::hdp::params::hbase_master_hosts"); @hbasemasters.each do |hbasemaster| -%>
-<% if scope.function_hdp_template_var("hdp_os_type") != "suse"%>
-define service {
-        host_name               <%= hbasemaster %>
-        use                     hadoop-service
-        service_description     HBASEMASTER::HBase Master CPU utilization on <%= hbasemaster %>
-        servicegroups           HBASE
-        check_command           check_cpu!200%!250%
-        normal_check_interval   5
-        retry_check_interval    2 
-        max_check_attempts      5
-}
-<% end %> #end if suse
-define service {
-        host_name               <%= hbasemaster %>
-        use                     hadoop-service
-        service_description     HBASEMASTER::HBase Master process on <%= hbasemaster %>
-        servicegroups           HBASE
-        check_command           check_tcp!<%=scope.function_hdp_template_var("::hdp::hbase_master_rpc_port")%>!-w 1 -c 1
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-<%end-%> #end do loop
-<%end-%> #end if Hbase
-
-<%if scope.function_hdp_nagios_members_exist('hiveserver')-%>
-# HIVE Metastore check
-define service {
-        hostgroup_name          hiveserver
-        use                     hadoop-service
-        service_description     HIVE-METASTORE::Hive Metastore status
-        servicegroups           HIVE-METASTORE
-        <%if scope.function_hdp_template_var("::hdp::params::security_enabled")-%>
-        check_command           check_hive_metastore_status!<%=scope.function_hdp_template_var("::hive_metastore_port")%>!<%=scope.function_hdp_template_var("java64_home")%>!true!<%=scope.function_hdp_template_var("nagios_keytab_path")%>!<%=scope.function_hdp_template_var("nagios_principal_name")%>!<%=scope.function_hdp_template_var("kinit_path_local")%>
-        <%else-%>
-        check_command           check_hive_metastore_status!<%=scope.function_hdp_template_var("::hive_metastore_port")%>!<%=scope.function_hdp_template_var("java64_home")%>!false
-        <%end-%>
-        normal_check_interval   0.5
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-<%end-%>
-<%if scope.function_hdp_nagios_members_exist('oozie-server')-%>
-# Oozie check
-define service {
-        hostgroup_name          oozie-server
-        use                     hadoop-service
-        service_description     OOZIE::Oozie Server status
-        servicegroups           OOZIE
-        <%if scope.function_hdp_template_var("::hdp::params::security_enabled")-%>
-        check_command           check_oozie_status!<%=scope.function_hdp_template_var("::hdp::oozie_server_port")%>!<%=scope.function_hdp_template_var("java64_home")%>!true!<%=scope.function_hdp_template_var("nagios_keytab_path")%>!<%=scope.function_hdp_template_var("nagios_principal_name")%>!<%=scope.function_hdp_template_var("kinit_path_local")%>
-        <%else-%>
-        check_command           check_oozie_status!<%=scope.function_hdp_template_var("::hdp::oozie_server_port")%>!<%=scope.function_hdp_template_var("java64_home")%>!false
-        <%end-%>
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-<%end-%>
-<%if scope.function_hdp_nagios_members_exist('webhcat-server')-%>
-# WEBHCAT check
-define service {
-        hostgroup_name          webhcat-server
-        use                     hadoop-service
-        service_description     WEBHCAT::WebHCat Server status
-        servicegroups           WEBHCAT 
-        <%if scope.function_hdp_template_var("::hdp::params::security_enabled")-%>
-        check_command           check_templeton_status!<%=scope.function_hdp_template_var("::hdp::templeton_port")%>!v1!<%=scope.function_hdp_template_var("::hdp::params::security_enabled")%>!<%=scope.function_hdp_template_var("nagios_keytab_path")%>!<%=scope.function_hdp_template_var("nagios_principal_name")%>!<%=scope.function_hdp_template_var("kinit_path_local")%>
-        <%else-%>
-        check_command           check_templeton_status!<%=scope.function_hdp_template_var("::hdp::templeton_port")%>!v1!false
-        <%end-%>
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-<%end-%>
-
-<%if scope.function_hdp_nagios_members_exist('hue-server')-%>
-define service {
-        hostgroup_name          hue-server
-        use                     hadoop-service
-        service_description     HUE::Hue Server status
-        servicegroups           HUE
-        check_command           check_hue_status
-        normal_check_interval   100
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-<%end-%>
-


[10/15] AMBARI-5747 Remove facter-1.6.10, Ruby, Puppet dependencies from pom.xml and src (dsen)

Posted by ds...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/service.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/service.pp b/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/service.pp
deleted file mode 100644
index ed7731c..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/service.pp
+++ /dev/null
@@ -1,129 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hive::service(
-  $ensure,
-  $service_type
-)
-{
-  include $hdp-hive::params
-  
-  $hive_user = $hdp-hive::params::hive_user
-  $hadoop_home = $hdp::params::hadoop_home
-  $hive_pid_dir = $hdp-hive::params::hive_pid_dir
-  $hive_pid = $hdp-hive::params::hive_pid
-  $hive_log_dir = $hdp-hive::params::hive_log_dir
-  $start_hiveserver2_script = $hdp-hive::params::start_hiveserver2_script
-  $start_metastore_script = $hdp-hive::params::start_metastore_script
-  $hive_var_lib = $hdp-hive::params::hive_var_lib
-  $hive_server_conf_dir = $hdp-hive::params::hive_server_conf_dir
-
-  $start_hiveserver2_path = "/tmp/$start_hiveserver2_script"
-  $start_metastore_path = "/tmp/$start_metastore_script"
-
-  if ($service_type == 'metastore') {
-    $pid_file = "$hive_pid_dir/hive.pid" 
-    $cmd = "env HADOOP_HOME=${hadoop_home} JAVA_HOME=$hdp::params::java64_home $start_metastore_path ${hive_log_dir}/hive.out ${hive_log_dir}/hive.log $pid_file $hdp-hive::params::hive_server_conf_dir"
-    
-  } elsif ($service_type == 'hiveserver2') {
-    $pid_file = "$hive_pid_dir/$hive_pid" 
-    $cmd = "env JAVA_HOME=$hdp::params::java64_home $start_hiveserver2_path ${hive_log_dir}/hive-server2.out  ${hive_log_dir}/hive-server2.log $pid_file ${hive_server_conf_dir}"
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_type}")
-  }
-
-
-  $no_op_test = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
-
-  if ($ensure == 'running') {
-    $daemon_cmd = "su - ${hive_user} -c  '${cmd} '"
-  } elsif ($ensure == 'stopped') {
-    $daemon_cmd = "kill `cat $pid_file` >/dev/null 2>&1 && rm -f ${pid_file}"
-  } else {
-    $daemon_cmd = undef
-  }
-
-  hdp-hive::service::directory { $hive_pid_dir : }
-  hdp-hive::service::directory { $hive_log_dir : }
-  hdp-hive::service::directory { $hive_var_lib : }
-
-  file { $start_hiveserver2_path:
-    ensure => present,
-    source => "puppet:///modules/hdp-hive/$start_hiveserver2_script",
-    mode => '0755',
-  }
-
-  file { $start_metastore_path:
-    ensure => present,
-    source => "puppet:///modules/hdp-hive/$start_metastore_script",
-    mode => '0755',
-  }
-
-  anchor{'hdp-hive::service::begin':} -> Hdp-hive::Service::Directory<||> -> anchor{'hdp-hive::service::end':}
-  
-  if ($daemon_cmd != undef) {
-    if ($ensure == 'running') {
-
-      $pid_file_state = 'present'
-      hdp::exec { $daemon_cmd:
-        command => $daemon_cmd,
-        unless  => $no_op_test
-      }
-    } elsif ($ensure == 'stopped') {
-      $pid_file_state = 'absent'
-      hdp::exec { $daemon_cmd:
-        command => $daemon_cmd,
-        onlyif  => $no_op_test
-      }
-    }
-
-    file { $pid_file:
-      ensure => $pid_file_state
-    }
-
-    if ($ensure == 'running' and ($hive_jdbc_driver == "com.mysql.jdbc.Driver" or $hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver")) {
-      $db_connection_check_command = "${hdp::params::java64_home}/bin/java -cp ${hdp::params::check_db_connection_jar}:/usr/share/java/${hdp-hive::params::jdbc_jar_name} org.apache.ambari.server.DBConnectionVerification ${hdp-hive::params::hive_jdbc_connection_url} ${hdp-hive::params::hive_metastore_user_name} ${hdp-hive::params::hive_metastore_user_passwd} ${hdp-hive::params::hive_jdbc_driver}"
-    } else {
-      $db_connection_check_command = undef
-    }
-
-    if ($db_connection_check_command != undef) {
-      hdp::exec { "DB connection check $db_connection_check_command" :
-        command => $db_connection_check_command,
-        path    => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
-      }
-
-      Hdp-hive::Service::Directory<||> -> Hdp::Exec["DB connection check $db_connection_check_command"] -> File[ $start_metastore_path]-> File[ $start_hiveserver2_path]-> Hdp::Exec[$daemon_cmd] -> File[$pid_file] -> Anchor['hdp-hive::service::end']
-    } else {
-      Hdp-hive::Service::Directory<||> -> File[ $start_metastore_path]-> File[ $start_hiveserver2_path]-> Hdp::Exec[$daemon_cmd] -> File[$pid_file] -> Anchor['hdp-hive::service::end']
-    }
-  }
-}
-
-define hdp-hive::service::directory()
-{
-  hdp::directory_recursive_create { $name: 
-    owner => $hdp-hive::params::hive_user,
-    mode => '0755',
-    service_state => $::ensure,
-    force => true
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hive/templates/hive-env.sh.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hive/templates/hive-env.sh.erb b/ambari-agent/src/main/puppet/modules/hdp-hive/templates/hive-env.sh.erb
deleted file mode 100644
index 80d84f4..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hive/templates/hive-env.sh.erb
+++ /dev/null
@@ -1,55 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Set Hive and Hadoop environment variables here. These variables can be used
-# to control the execution of Hive. It should be used by admins to configure
-# the Hive installation (so that users do not have to set environment variables
-# or set command line parameters to get correct behavior).
-#
-# The hive service being invoked (CLI/HWI etc.) is available via the environment
-# variable SERVICE
-
-# Hive Client memory usage can be an issue if a large number of clients
-# are running at the same time. The flags below have been useful in
-# reducing memory usage:
-#
- if [ "$SERVICE" = "cli" ]; then
-   if [ -z "$DEBUG" ]; then
-     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
-   else
-     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
-   fi
- fi
-
-# The heap size of the jvm stared by hive shell script can be controlled via:
-
-export HADOOP_HEAPSIZE="<%=scope.function_hdp_template_var("::hadoop_heapsize")%>"
-export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
-
-# Larger heap size may be required when running queries over large number of files or partitions.
-# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
-# appropriate for hive server (hwi etc).
-
-
-# Set HADOOP_HOME to point to a specific hadoop install directory
-HADOOP_HOME=${HADOOP_HOME:-<%=scope.function_hdp_template_var("::hdp::params::hadoop_home")%>}
-
-# Hive Configuration Directory can be controlled by:
-export HIVE_CONF_DIR=<%=scope.function_hdp_template_var("::hdp-hive::hive_config_dir")%>
-
-# Folder containing extra ibraries required for hive compilation/execution can be controlled by:
-# export HIVE_AUX_JARS_PATH=
-export HIVE_AUX_JARS_PATH=<%=scope.function_hdp_template_var("hive_aux_jars_path")%>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hue/manifests/hue/service_check.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hue/manifests/hue/service_check.pp b/ambari-agent/src/main/puppet/modules/hdp-hue/manifests/hue/service_check.pp
deleted file mode 100644
index cfee202..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hue/manifests/hue/service_check.pp
+++ /dev/null
@@ -1,47 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hue::hue::service_check() inherits hdp-hue::params
-{
-  $status_check_cmd = "/etc/init.d/hue status | grep 'is running'"
-  $smoke_test_cmd = "${hue_home_dir}/build/env/bin/hue smoke_test"
-
-  anchor { 'hdp-hue::hue::service_check::begin' : }
-
-  exec { 'hue-status-check':
-    command   => $status_check_cmd,
-    tries     => 3,
-    try_sleep => 5,
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    logoutput => "true"
-  }
-
-  exec { 'hue-smoke-test':
-      command   => $smoke_test_cmd,
-      tries     => 3,
-      try_sleep => 5,
-      path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-      require   => Exec['hue-status-check'],
-      before    => Anchor['hdp-hue::hue::service_check::end'],
-      logoutput => "true"
-    }
-
-  anchor { 'hdp-hue::hue::service_check::end' : }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hue/manifests/init.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hue/manifests/init.pp b/ambari-agent/src/main/puppet/modules/hdp-hue/manifests/init.pp
deleted file mode 100644
index 876f76c..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hue/manifests/init.pp
+++ /dev/null
@@ -1,83 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hue(
-  $service_state = undef
-)
-{
-  include $hdp-hue::params
-
-  $hue_user = $hdp-hue::params::hue_server_user
-  $hue_conf_dir = $hdp::params::hue_conf_dir
-
-  if ($service_state == 'uninstalled') {
-
-    hdp::package { 'hue-server':
-      ensure => 'uninstalled'
-    }
-
-    hdp::directory { $hue_config_dir:
-      service_state => $service_state,
-      force => true
-    }
-
-  } else {
-    ## Install package
-    hdp::package { 'hue-server': }
-
-    ## Create user
-    hdp::user{ 'hue_user':
-      user_name => $hue_user
-    }
-
-    ## Create dir
-    hdp::directory_recursive_create { $hue_conf_dir:
-      service_state => $service_state,
-      force => true,
-      owner => $hue_user
-    }
-
-    # Configs generation
-    if has_key($configuration, 'hue-site') {
-      hdp-hue::generate_config_file { 'hue-ini':
-        config_file_path => $hdp-hue::params::hue_conf_file
-      }
-    }
-
-    anchor { 'hdp-hue::begin': } -> Hdp::Package['hue-server'] ->  Hdp::User['hue_user'] -> Hdp::Directory_recursive_create[$hue_conf_dir] -> Hdp-Hue::Generate_config_file<||> -> anchor { 'hdp-hue::end': }
-
-  }
-}
-
-define hdp-hue::generate_config_file(
-  $config_file_path
-)
-{
-  if (hdp_is_empty($configuration) == false and
-    hdp_is_empty($configuration['hue-site']) == false)
-  {
-    ## Create hue.ini file
-    file { $config_file_path :
-      ensure => file,
-      content => template('hdp-hue/hue-ini.cfg.erb'),
-      owner => $hdp-hue::params::hue_server_user
-    }
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hue/manifests/params.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hue/manifests/params.pp b/ambari-agent/src/main/puppet/modules/hdp-hue/manifests/params.pp
deleted file mode 100644
index 533501d..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hue/manifests/params.pp
+++ /dev/null
@@ -1,108 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-class hdp-hue::params() inherits hdp::params {
-
-  ## Global configuration properties
-
-  $hue_conf_file = "${hdp::params::hue_conf_dir}/hue.ini"
-  $hue_pid_dir = hdp_default("hue_pid_dir", "/var/run/hue")
-  $hue_log_dir = hdp_default("hue_log_dir", "/var/log/hue")
-  $hue_lock_file = hdp_default("hue_lock_file", "/var/lock/subsys/hue")
-  $hue_server_user = hdp_default("hue_user", "hue")
-  $hue_server_group = hdp_default("hue_user_group", "hadoop")
-  $hue_home_dir = hdp_default("hue_home_dir", "/usr/lib/hue")
-
-  # Other properties - not exposed
-
-  $hue_hadoop_home = $hdp::params::hadoop_lib_home
-  $hue_hadoop_mapred_home = $hue_hadoop_home
-  $security_enabled = $hdp::params::security_enabled
-  $hue_hive_conf_dir = $hdp::params::hive_conf_dir
-  $hue_pig_java_home = $hdp::params::java64_home
-  $webhcat_server_host = hdp_default("webhcat_server_host")
-
-  # All non-global properties
-
-  if has_key($configuration, 'hue-site') {
-    $hue-site = $configuration['hue-site']
-
-    # Hadoop Configuration properties
-
-    $hue_hadoop_fs_defaultfs = hdp_get_value_from_map($hue-site, "fs_defaultfs", "")
-    $hue_hadoop_webhdfs_url = hdp_get_value_from_map($hue-site, "webhdfs_url", "")
-    $hue_hadoop_jt_host = hdp_get_value_from_map($hue-site, "jobtracker_host", hdp_default("jtnode_host"))
-    $hue_hadoop_jt_port = hdp_get_value_from_map($hue-site, "jobtracker_port", "50030")
-    $hue_hive_home_dir = hdp_get_value_from_map($hue-site, "hive_home_dir", "/usr/lib/hive")
-    $hue_templeton_url = hdp_get_value_from_map($hue-site, "templeton_url", "http://${webhcat_server_host}:50111/templeton/v1")
-
-    # Database Configuration properties
-
-    $hue_db_engine = hdp_get_value_from_map($hue-site, "db_engine", "")
-    $hue_db_port = hdp_get_value_from_map($hue-site, "db_port", "")
-    $hue_db_host = hdp_get_value_from_map($hue-site, "db_host", "")
-    $hue_db_user = hdp_get_value_from_map($hue-site, "db_user", "")
-    $hue_db_password = hdp_get_value_from_map($hue-site, "db_password", "")
-    $hue_db_name = hdp_get_value_from_map($hue-site, "db_name", "")
-
-    # Hue Email Configuration properties
-
-    $hue_smtp_host = hdp_get_value_from_map($hue-site, "smtp_host", "")
-    $hue_smtp_port = hdp_get_value_from_map($hue-site, "smtp_port", "")
-    $hue_smtp_user = hdp_get_value_from_map($hue-site, "smtp_user", "")
-    $hue_smtp_password = hdp_get_value_from_map($hue-site, "smtp_password", "")
-    $hue_smtp_tls = hdp_get_value_from_map($hue-site, "tls", "no")
-    $hue_default_from_email = hdp_get_value_from_map($hue-site, "default_from_email", "hueadmin@sandbox.com")
-
-    # Hue Configuration properties
-
-    $hue_debug_messages = hdp_get_value_from_map($hue-site, "send_debug_messages", "1")
-    $hue_database_logging = hdp_get_value_from_map($hue-site, "database_logging", "0")
-    $hue_secret_key = hdp_get_value_from_map($hue-site, "secret_key", "ThisisusedforsecurehashinginthesessionstoreSetthistoarandomstringthelongerthebetter")
-    $hue_http_host = hdp_get_value_from_map($hue-site, "http_host", "0.0.0.0")
-    $hue_http_port = hdp_get_value_from_map($hue-site, "http_port", "8000")
-    $hue_time_zone = hdp_get_value_from_map($hue-site, "time_zone", "America/Los_Angeles")
-    $hue_django_debug_mode = hdp_get_value_from_map($hue-site, "django_debug_mode", "1")
-    $hue_use_cherrypy_server = hdp_get_value_from_map($hue-site, "use_cherrypy_server", "false")
-    $hue_http_500_debug_mode = hdp_get_value_from_map($hue-site, "http_500_debug_mode", "1")
-    $hue_backend_auth_policy = hdp_get_value_from_map($hue-site, "backend_auth", "desktop.auth.backend.AllowAllBackend")
-
-    $hue_hadoop_yarn_host = hdp_get_value_from_map($hue-site, "resourcemanager_host", "")
-    $hue_hadoop_yarn_port = hdp_get_value_from_map($hue-site, "resourcemanager_port", "")
-
-    # Shell Configuration properties
-
-    $hue_pig_shell_command = hdp_get_value_from_map($hue-site, "pig_shell_command", "/usr/bin/pig -l /dev/null")
-    $hue_hbase_nice_name = hdp_get_value_from_map($hue-site, "hbase_nice_name", "HBase Shell")
-    $hue_hbase_shell_command = hdp_get_value_from_map($hue-site, "hbase_shell_command", "/usr/bin/hbase shell")
-    $hue_bash_nice_name = hdp_get_value_from_map($hue-site, "bash_nice_name", "Bash (Test only!!!)")
-    $hue_bash_shell_command = hdp_get_value_from_map($hue-site, "bash_shell_command", "/bin/bash")
-
-    $hue_whitelist = hdp_get_value_from_map($hue-site, "whitelist", "(localhost|127\\.0\\.0\\.1):(${jtnode_port}|${namenode_port}|${tasktracker_port}|${datanode_port}|${jobhistory_port})")
-
-    # Security Configuration properties
-
-    $hue_keytab_path = hdp_get_value_from_map($hue-site, "hue_keytab", "${keytab_path}/hue.service.keytab")
-    $hue_principal = hdp_get_value_from_map($hue-site, "hue_principal", "hue/_HOST@${kerberos_domain}")
-
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hue/manifests/server.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hue/manifests/server.pp b/ambari-agent/src/main/puppet/modules/hdp-hue/manifests/server.pp
deleted file mode 100644
index bcbb150..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hue/manifests/server.pp
+++ /dev/null
@@ -1,46 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hue::server(
-  $service_state = $hdp::params::cluster_service_state,
-  $setup = false,
-  $opts = {}
-) inherits  hdp-hue::params
-{
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) {
-    $hdp::params::service_exists['hdp-hue::server'] = true
-
-    #installs package, creates user, sets configuration
-    class{ 'hdp-hue' :
-      service_state => $service_state,
-    }
-
-    Hdp-hue::Generate_config_file<||>{ config_file_path => $hdp-hue::params::hue_conf_file }
-
-    class { 'hdp-hue::service' :
-      ensure => $service_state
-    }
-
-    #top level does not need anchors
-    Class['hdp-hue'] -> Class['hdp-hue::service']
-    } else {
-      hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-    }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hue/manifests/service.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hue/manifests/service.pp b/ambari-agent/src/main/puppet/modules/hdp-hue/manifests/service.pp
deleted file mode 100644
index 179337e..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hue/manifests/service.pp
+++ /dev/null
@@ -1,76 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hue::service(
-  $ensure,
-  $initial_wait = undef
-)
-{
-  include $hdp-hue::params
-
-  $hue_user = $hdp-hue::params::hue_server_user
-  $hue_start_cmd = "/etc/init.d/hue start --USER=${hue_user} --LOGDIR=${hue_log_dir} --LOCKFILE=${hue_lock_file} --PIDFILE=${hue_pid_dir}/supervisor.pid"
-  $hue_stop_cmd = "/etc/init.d/hue stop"
-
-  $pid_dir = $hdp-hue::params::hue_pid_dir
-  $log_dir = $hdp-hue::params::hue_log_dir
-  $pid_file = "${pid_dir}/supervisor.pid"
-  $no_op_test = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
-
-  if ($ensure == 'running') {
-    $daemon_cmd = $hue_start_cmd
-  } elsif ($ensure == 'stopped') {
-    $daemon_cmd = $hue_stop_cmd
-  } else {
-    $daemon_cmd = undef
-  }
-
-  hdp-hue::service::directory { $pid_dir :
-    service_state => $ensure,
-  }
-
-  hdp-hue::service::directory { $log_dir :
-    service_state => $ensure,
-  }
-
-  anchor {'hdp-hue::service::begin': } -> Hdp-hue::Service::Directory<||> -> anchor {'hdp-hue::service::end': }
-
-  if ($daemon_cmd != undef) {
-    hdp::exec { $daemon_cmd:
-      command => $daemon_cmd,
-      unless  => $no_op_test,
-      initial_wait => $initial_wait
-    }
-    Hdp-hue::Service::Directory<||> -> Hdp::Exec[$daemon_cmd] -> Anchor['hdp-hue::service::end']
-  }
-
-}
-
-define hdp-hue::service::directory(
- $service_state
-)
-{
-  hdp::directory_recursive_create { $name:
-    owner => $hdp-hue::params::hue_server_user,
-    mode => '0755',
-    service_state => $service_state,
-    force => true
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hue/templates/hue-ini.cfg.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hue/templates/hue-ini.cfg.erb b/ambari-agent/src/main/puppet/modules/hdp-hue/templates/hue-ini.cfg.erb
deleted file mode 100644
index a263371..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hue/templates/hue-ini.cfg.erb
+++ /dev/null
@@ -1,496 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# Hue configuration file
-# ===================================
-#
-# For complete documentation about the contents of this file, run
-#       $ <hue_root>/build/env/bin/hue config_help
-#
-# All .ini files under the current directory are treated equally.  Their
-# contents are merged to form the Hue configuration, which can
-# can be viewed on the Hue at
-#       http://<hue_host>:<port>/dump_config
-
-
-###########################################################################
-# General configuration for core Desktop features (authentication, etc)
-###########################################################################
-
-[desktop]
-
-  send_dbug_messages=<%=scope.function_hdp_template_var("::hdp-hue::params::hue_debug_messages")%>
-
-  # To show database transactions, set database_logging to 1
-  database_logging=<%=scope.function_hdp_template_var("::hdp-hue::params::hue_database_logging")%>
-
-  # Set this to a random string, the longer the better.
-  # This is used for secure hashing in the session store.
-  secret_key=<%=scope.function_hdp_template_var("::hdp-hue::params::hue_secret_key")%>
-
-  # Webserver listens on this address and port
-  http_host=<%=scope.function_hdp_template_var("::hdp-hue::params::hue_http_host")%>
-  http_port=<%=scope.function_hdp_template_var("::hdp-hue::params::hue_http_port")%>
-
-  # Time zone name
-  time_zone=<%=scope.function_hdp_template_var("::hdp-hue::params::hue_time_zone")%>
-
-  # Turn off debug
-  django_debug_mode=<%=scope.function_hdp_template_var("::hdp-hue::params::hue_django_debug_mode")%>
-
-  # Turn off backtrace for server error
-  http_500_debug_mode=<%=scope.function_hdp_template_var("::hdp-hue::params::hue_http_500_debug_mode")%>
-
-  # Server email for internal error messages
-  ## django_server_email='hue@localhost.localdomain'
-
-  # Email backend
-  ## django_email_backend=django.core.mail.backends.smtp.EmailBackend
-
-  # Set to true to use CherryPy as the webserver, set to false
-  # to use Spawning as the webserver. Defaults to Spawning if
-  # key is not specified.
-  use_cherrypy_server = <%=scope.function_hdp_template_var("::hdp-hue::params::hue_use_cherrypy_server")%>
-
-  # Webserver runs as this user
-  server_user=<%=scope.function_hdp_template_var("::hdp-hue::params::hue_server_user")%>
-  server_group=<%=scope.function_hdp_template_var("::hdp-hue::params::hue_server_group")%>
-
-  # If set to false, runcpserver will not actually start the web server.
-  # Used if Apache is being used as a WSGI container.
-  ## enable_server=yes
-
-  # Number of threads used by the CherryPy web server
-  #cherrypy_server_threads=2
-
-  # Filename of SSL Certificate
-  ## ssl_certificate=
-
-  # Filename of SSL RSA Private Key
-  ## ssl_private_key=
-
-  # Default encoding for site data
-  ## default_site_encoding=utf-8
-
-  # Administrators
-  # ----------------
-  [[django_admins]]
-    ## [[[admin1]]]
-    ## name=john
-    ## email=john@doe.com
-
-  # UI customizations
-  # -------------------
-  [[custom]]
-
-  # Top banner HTML code
-  ## banner_top_html=
-
-  # Configuration options for user authentication into the web application
-  # ------------------------------------------------------------------------
-  [[auth]]
-
-    # Authentication backend. Common settings are:
-    # - django.contrib.auth.backends.ModelBackend (entirely Django backend)
-    # - desktop.auth.backend.AllowAllBackend (allows everyone)
-    # - desktop.auth.backend.AllowFirstUserDjangoBackend
-    #     (Default. Relies on Django and user manager, after the first login)
-    # - desktop.auth.backend.LdapBackend
-    # - desktop.auth.backend.PamBackend
-    backend=<%=scope.function_hdp_template_var("::hdp-hue::params::hue_backend_auth_policy")%>
-
-    ## pam_service=login
-
-  # Configuration options for connecting to LDAP and Active Directory
-  # -------------------------------------------------------------------
-  [[ldap]]
-
-  # The search base for finding users and groups
-  ## base_dn="DC=mycompany,DC=com"
-
-  # The NT domain to connect to (only for use with Active Directory)
-  ## nt_domain=mycompany.com
-
-  # URL of the LDAP server
-  ## ldap_url=ldap://auth.mycompany.com
-
-  # Path to certificate for authentication over TLS
-  ## ldap_cert=
-
-  # Distinguished name of the user to bind as -- not necessary if the LDAP server
-  # supports anonymous searches
-  ## bind_dn="CN=ServiceAccount,DC=mycompany,DC=com"
-
-  # Password of the bind user -- not necessary if the LDAP server supports
-  # anonymous searches
-  ## bind_password=
-
-  # Pattern for searching for usernames -- Use <username> for the parameter
-  # For use when using LdapBackend for Hue authentication
-  ## ldap_username_pattern="uid=<username>,ou=People,dc=mycompany,dc=com"
-
-      [[[users]]]
-
-      # Base filter for searching for users
-      ## user_filter="objectclass=*"
-
-      # The username attribute in the LDAP schema
-      ## user_name_attr=sAMAccountName
-
-      [[[groups]]]
-
-      # Base filter for searching for groups
-      ## group_filter="objectclass=*"
-
-      # The username attribute in the LDAP schema
-      ## group_name_attr=cn
-
-  # Configuration options for specifying the Desktop Database.  For more info,
-  # see http://docs.djangoproject.com/en/1.1/ref/settings/#database-engine
-  # ------------------------------------------------------------------------
-  [[database]]
-    # Database engine is typically one of:
-    # postgresql, mysql, sqlite3, or oracle
-    #
-    # Note that for sqlite3, 'name', below is a filename;
-    # for other backends, it is the database name.
-  <% if scope.function_hdp_template_var("::hdp-hue::params::hue_db_engine") != "" %>
-    engine=<%=scope.function_hdp_template_var("::hdp-hue::params::hue_db_engine")%>
-    host=<%=scope.function_hdp_template_var("::hdp-hue::params::hue_db_port")%>
-    port=<%=scope.function_hdp_template_var("::hdp-hue::params::hue_db_host")%>
-    user=<%=scope.function_hdp_template_var("::hdp-hue::params::hue_db_user")%>
-    password=<%=scope.function_hdp_template_var("::hdp-hue::params::hue_db_password")%>
-    name=<%=scope.function_hdp_template_var("::hdp-hue::params::hue_db_name")%>
-  <% end %>
-
-
-  # Configuration options for connecting to an external SMTP server
-  # ------------------------------------------------------------------------
-  [[smtp]]
-
-  <% if scope.function_hdp_template_var("::hdp-hue::params::hue_smtp_host") != "" %>
-    # The SMTP server information for email notification delivery
-    host=<%=scope.function_hdp_template_var("::hdp-hue::params::hue_smtp_host")%>
-    port=<%=scope.function_hdp_template_var("::hdp-hue::params::hue_smtp_port")%>
-    user=<%=scope.function_hdp_template_var("::hdp-hue::params::hue_smtp_user")%>
-    password=<%=scope.function_hdp_template_var("::hdp-hue::params::hue_smtp_password")%>
-  <% end %>
-
-    # Whether to use a TLS (secure) connection when talking to the SMTP server
-    tls=<%=scope.function_hdp_template_var("::hdp-hue::params::hue_smtp_tls")%>
-
-    # Default email address to use for various automated notification from Hue
-    default_from_email=<%=scope.function_hdp_template_var("::hdp-hue::params::hue_default_from_email")%>
-
-
-  # Configuration options for Kerberos integration for secured Hadoop clusters
-  # ------------------------------------------------------------------------
-  [[kerberos]]
-
-  <% if scope.function_hdp_template_var("::hdp::params::security_enabled") == true %>
-    # Path to Hue's Kerberos keytab file
-    hue_keytab=<%=scope.function_hdp_template_var("::hdp-hue::params::hue_keytab_path")%>
-    # Kerberos principal name for Hue
-    hue_principal=<%=scope.function_hdp_template_var("::hdp-hue::params::hue_principal")%>
-    # Path to kinit
-    kinit_path=<%=scope.function_hdp_template_var("::hdp::params::kinit_path_local")%>
-  <% end %>
-
-###########################################################################
-# Settings to configure your Hadoop cluster.
-###########################################################################
-
-[hadoop]
-
-  # Configuration for HDFS NameNode
-  # ------------------------------------------------------------------------
-  [[hdfs_clusters]]
-
-    [[[default]]]
-      # Enter the filesystem uri
-      fs_defaultfs=<%=scope.function_hdp_template_var("::hdp-hue::params::hue_hadoop_fs_defaultfs")%>
-
-      # Use WebHdfs/HttpFs as the communication mechanism. To fallback to
-      # using the Thrift plugin (used in Hue 1.x), this must be uncommented
-      # and explicitly set to the empty value.
-      webhdfs_url=<%=scope.function_hdp_template_var("::hdp-hue::params::hue_hadoop_webhdfs_url")%>
-
-      security_enabled=<%=scope.function_hdp_template_var("::hdp-hue::params::security_enabled")%>
-
-      # Settings about this HDFS cluster. If you install HDFS in a
-      # different location, you need to set the following.
-
-      # Defaults to $HADOOP_HDFS_HOME or /usr/lib/hadoop-hdfs
-      hadoop_hdfs_home=<%=scope.function_hdp_template_var("::hdp-hue::params::hue_hadoop_home")%>
-
-      # Defaults to $HADOOP_BIN or /usr/bin/hadoop
-      ## hadoop_bin=/usr/bin/hadoop
-
-      # Defaults to $HADOOP_CONF_DIR or /etc/hadoop/conf
-      ## hadoop_conf_dir=/etc/hadoop/conf
-
-  # Configuration for MapReduce JobTracker
-  # ------------------------------------------------------------------------
-  [[mapred_clusters]]
-
-    [[[default]]]
-      # Enter the host on which you are running the Hadoop JobTracker
-      jobtracker_host=<%=scope.function_hdp_template_var("::hdp-hue::params::hue_hadoop_jt_host")%>
-      # The port where the JobTracker IPC listens on
-      jobtracker_port=<%=scope.function_hdp_template_var("::hdp-hue::params::hue_hadoop_jt_port")%>
-      # Thrift plug-in port for the JobTracker
-      ## thrift_port=9290
-      # Whether to submit jobs to this cluster
-      ## submit_to=False
-
-      ## security_enabled=false
-
-      # Settings about this MR1 cluster. If you install MR1 in a
-      # different location, you need to set the following.
-
-      # Defaults to $HADOOP_MR1_HOME or /usr/lib/hadoop-0.20-mapreduce
-      hadoop_mapred_home=<%=scope.function_hdp_template_var("::hdp-hue::params::hue_hadoop_mapred_home")%>
-
-      # Defaults to $HADOOP_BIN or /usr/bin/hadoop
-      ## hadoop_bin=/usr/bin/hadoop
-
-      # Defaults to $HADOOP_CONF_DIR or /etc/hadoop/conf
-      ## hadoop_conf_dir=/etc/hadoop/conf
-
-  # Configuration for Yarn
-  # ------------------------------------------------------------------------
-  [[yarn_clusters]]
-
-    [[[default]]]
-      # Enter the host on which you are running the ResourceManager
-      resourcemanager_host=<%=scope.function_hdp_template_var("::hdp-hue::params::hue_hadoop_yarn_host")%>
-      # The port where the ResourceManager IPC listens on
-      resourcemanager_port=<%=scope.function_hdp_template_var("::hdp-hue::params::hue_hadoop_yarn_port")%>
-      # Whether to submit jobs to this cluster
-      ## submit_to=False
-
-      ## security_enabled=false
-
-      # Settings about this MR2 cluster. If you install MR2 in a
-      # different location, you need to set the following.
-
-      # Defaults to $HADOOP_MR2_HOME or /usr/lib/hadoop-mapreduce
-      hadoop_mapred_home=/usr/lib/hadoop/lib
-
-      # Defaults to $HADOOP_BIN or /usr/bin/hadoop
-      ## hadoop_bin=/usr/bin/hadoop
-
-      # Defaults to $HADOOP_CONF_DIR or /etc/hadoop/conf
-      ## hadoop_conf_dir=/etc/hadoop/conf
-
-
-###########################################################################
-# Settings to configure liboozie
-###########################################################################
-
-[liboozie]
-  # The URL where the Oozie service runs on. This is required in order for
-  # users to submit jobs.
-  ## oozie_url=http://localhost:11000/oozie
-
-  ## security_enabled=false
-
-  # Location on HDFS where the workflows/coordinator are deployed when submitted.
-  ## remote_deployement_dir=/user/hue/oozie/deployments
-
-
-###########################################################################
-# Settings to configure the Oozie app
-###########################################################################
-
-[oozie]
-  # Location on local FS where the examples are stored.
-  ## local_data_dir=..../examples
-
-  # Location on local FS where the data for the examples is stored.
-  ## sample_data_dir=...thirdparty/sample_data
-
-  # Location on HDFS where the oozie examples and workflows are stored.
-  ## remote_data_dir=/user/hue/oozie/workspaces
-
-  # Share workflows and coordinators information with all users. If set to false,
-  # they will be visible only to the owner and administrators.
-  ## share_jobs=True
-
-  # Maximum of Oozie workflows or coodinators to retrieve in one API call.
-  ## oozie_jobs_count=100
-
-
-###########################################################################
-# Settings to configure Beeswax
-###########################################################################
-
-[beeswax]
-
-  # Deprecated! Will be removed in Hue 3
-  # Multiple sections are now available in query_servers
-  # Host where Beeswax internal metastore Thrift daemon is running
-  ## beeswax_meta_server_host=localhost
-
-  # Deprecated! Will be removed in Hue 3
-  # Multiple sections are now available in query_servers
-  # Configure the port the internal metastore daemon runs on. Used only if
-  # hive.metastore.local is true.
-  ## beeswax_meta_server_port=8003
-
-  # Host where Beeswax internal metastore Thrift daemon is running
-  ## beeswax_meta_server_host=localhost
-
-  # Configure the port the internal metastore daemon runs on. Used only if
-  # hive.metastore.local is true.
-  ## beeswax_meta_server_port=8003
-
-  # Hive home directory
-  hive_home_dir=<%=scope.function_hdp_template_var("::hdp-hue::params::hue_hive_home_dir")%>
-
-  # Hive configuration directory, where hive-site.xml is located
-  hive_conf_dir=<%=scope.function_hdp_template_var("::hdp-hue::params::hue_hive_conf_dir")%>
-
-  # Timeout in seconds for thrift calls to beeswax service
-  ## beeswax_server_conn_timeout=120
-
-  # Timeout in seconds for thrift calls to the hive metastore
-  ## metastore_conn_timeout=10
-
-  # Maximum Java heapsize (in megabytes) used by Beeswax Server.
-  # Note that the setting of HADOOP_HEAPSIZE in $HADOOP_CONF_DIR/hadoop-env.sh
-  # may override this setting.
-  ## beeswax_server_heapsize=1000
-
-  # Share saved queries with all users. If set to false, saved queries are
-  # visible only to the owner and administrators.
-  ## share_saved_queries=true
-
- # One entry for each Query Server that can execute some SQL queries.
- # This must be in the bottom of the [beewax] section.
- [[query_servers]]
-
-   [[[default]]]
-   # Host where the Query Server Thrift daemon is running
-   ## server_host=localhost
-   # Configure the port the Query Server Thrift server
-   ## server_port=8002
-   # If DDL queries are supported (e.g. DROP can be sent directly to this server)
-   ## support_ddl=True
-
-###########################################################################
-# Settings to configure Job Designer
-###########################################################################
-
-[jobsub]
-  # Location on HDFS where the jobsub examples and templates are stored.
-  ## remote_data_dir=/user/hue/jobsub
-
-  # Location on local FS where examples and template are stored.
-  ## local_data_dir=..../data
-
-  # Location on local FS where sample data is stored
-  ## sample_data_dir=...thirdparty/sample_data
-
-
-###########################################################################
-# Settings to configure Job Browser
-###########################################################################
-
-[jobbrowser]
-  # Share submitted jobs information with all users. If set to false,
-  # submitted jobs are visible only to the owner and administrators.
-  ## share_jobs=true
-
-
-###########################################################################
-# Settings to configure the Shell application
-###########################################################################
-
-[shell]
-  # The shell_buffer_amount specifies the number of bytes of output per shell
-  # that the Shell app will keep in memory. If not specified, it defaults to
-  # 524288 (512 MiB).
-  ## shell_buffer_amount=100
-
-  # If you run Hue against a Hadoop cluster with Kerberos security enabled, the
-  # Shell app needs to acquire delegation tokens for the subprocesses to work
-  # correctly. These delegation tokens are stored as temporary files in some
-  # directory. You can configure this directory here. If not specified, it
-  # defaults to /tmp/hue_delegation_tokens.
-  ## shell_delegation_token_dir=/tmp/hue_delegation_tokens
-
-  [[ shelltypes ]]
-
-    # Define and configure a new shell type "flume"
-    # ------------------------------------------------------------------------
-    #[[[ flume ]]]
-    #  nice_name = "Flume Shell"
-    # command = "/usr/bin/flume shell"
-    # help = "The command-line Flume client interface."
-
-    #  [[[[ environment ]]]]
-        # You can specify environment variables for the Flume shell
-        # in this section.
-
-    # Define and configure a new shell type "pig"
-    # ------------------------------------------------------------------------
-    [[[ pig ]]]
-      nice_name = "Pig Shell (Grunt)"
-      command = <%=scope.function_hdp_template_var("::hdp-hue::params::hue_pig_shell_command")%>
-      help = "The command-line interpreter for Pig"
-
-      [[[[ environment ]]]]
-        # You can specify environment variables for the Pig shell
-        # in this section. Note that JAVA_HOME must be configured
-        # for the Pig shell to run.
-
-        [[[[[ JAVA_HOME ]]]]]
-          value = <%=scope.function_hdp_template_var("::hdp-hue::params::hue_pig_java_home")%>
-
-    # Define and configure a new shell type "hbase"
-    # ------------------------------------------------------------------------
-    [[[ hbase ]]]
-      nice_name = "HBase Shell"
-      command = <%=scope.function_hdp_template_var("::hdp-hue::params::hue_hbase_shell_command")%>
-      help = "The command-line HBase client interface."
-
-      [[[[ environment ]]]]
-        # You can configure environment variables for the HBase shell
-        # in this section.
-
-    # Define and configure a new shell type "bash" for testing only
-    # ------------------------------------------------------------------------
-    [[[ bash ]]]
-      nice_name = "Bash (Test only!!!)"
-      command = <%=scope.function_hdp_template_var("::hdp-hue::params::hue_bash_shell_command")%>
-      help = "A shell that does not depend on Hadoop components"
-
-
-###########################################################################
-# Settings for the User Admin application
-###########################################################################
-
-[useradmin]
-  # The name of the default user group that users will be a member of
-  ## default_user_group=default
-
-[hcatalog]
-  templeton_url=<%=scope.function_hdp_template_var("::hdp-hue::params::hue_templeton_url")%>
-
-[proxy]
-whitelist=<%=scope.function_hdp_template_var("::hdp-hue::params::hue_whitelist")%>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-kerberos/lib/facter/kadm_keytab.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-kerberos/lib/facter/kadm_keytab.rb b/ambari-agent/src/main/puppet/modules/hdp-kerberos/lib/facter/kadm_keytab.rb
deleted file mode 100644
index 0b63bf9..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-kerberos/lib/facter/kadm_keytab.rb
+++ /dev/null
@@ -1,21 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-require 'facter'
-Facter.add("kadm_keytab") do
-  setcode do
-     %x{[ -f /etc/kadm5.keytab ] && base64 </etc/kadm5.keytab 2>/dev/null} + "\n"
-  end
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-kerberos/lib/puppet/parser/functions/kerberos_keytabs_input.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-kerberos/lib/puppet/parser/functions/kerberos_keytabs_input.rb b/ambari-agent/src/main/puppet/modules/hdp-kerberos/lib/puppet/parser/functions/kerberos_keytabs_input.rb
deleted file mode 100644
index 406cb2c..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-kerberos/lib/puppet/parser/functions/kerberos_keytabs_input.rb
+++ /dev/null
@@ -1,34 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-module Puppet::Parser::Functions
-  newfunction(:kerberos_keytabs_input, :type => :rvalue) do |args|
-    fqdn,node_components,keytab_map = args 
-    ndx_ret = Hash.new
-    node_components.each do |cmp|
-      if info = keytab_map[cmp]
-        keytab = info["keytab"]
-        ndx_ret[keytab] ||= {"keytab" => keytab, "principals" => info["primaries"].map{|p|"#{p}/#{fqdn}"}}
-      end
-    end
-    ndx_ret.values
-  end
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/adminclient.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/adminclient.pp b/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/adminclient.pp
deleted file mode 100644
index 9176de3..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/adminclient.pp
+++ /dev/null
@@ -1,140 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-class hdp-kerberos::adminclient(
-  $service_state = $hdp::params::cluster_service_state
-) inherits hdp-kerberos::params
-{
-  import 'hdp'
-
-  $kadmin_pw = "bla123"
-  $kadmin_admin = "kadmin/admin"
-  $realm = $kerberos_domain
-  $krb_realm = $kerberos_domain
-  $hdp::params::service_exists['hdp-kerberos::adminclient'] = true
-  $krbContext = {}
-  $krbContext['kadmin_pw'] = $kadmin_pw
-  $krbContext['kadmin_admin'] = $kadmin_admin
-  $krbContext['realm' ] = $kerberos_domain
-  $krbContext['local_or_remote'] = 'remote'
-  $krbContext['principals_to_create'] = $principals_to_create
-  $krbContext['keytabs_to_create'] = $keytabs_to_create
-  $krbContext['principals_in_keytabs'] = $principals_in_keytabs
-
-  $kdc_server = $kdc_host
-
-  package { $package_name_client:
-    ensure => installed,
-  }
-  if ($hdp::params::service_exists['hdp-kerberos::server'] != true) {
-    file { "/etc/krb5.conf":
-      content => template('hdp-kerberos/krb5.conf'),
-      owner => "root",
-      group => "root",
-      mode => "0644",
-      require => Package[$package_name_client],
-    }
-  }
- 
-  if ($create_principals_keytabs == "yes") {
-    notice("Creating principals and keytabs..")
-    hdp-kerberos::principals_and_keytabs::services { 'alphabeta': 
-      krb_context => $krbContext
-    }
-  }
-}
-
-
-define hdp-kerberos::principals_and_keytabs::services(
-  $krb_context
-)
-{
-  include hdp-kerberos::params
-  $principals_to_create = $krb_context[principals_to_create]
-  $keytabs_to_create = $krb_context[keytabs_to_create]
-
-  hdp-kerberos::principal {$principals_to_create:
-    krb_context => $krb_context,
-  }
-  
-  hdp-kerberos::keytab { $keytabs_to_create :
-    krb_context => $krb_context,
-    require => Hdp-kerberos::Principal[$principals_to_create]
-  }
-}
-
-define hdp-kerberos::keytab(
-  $krb_context,
-  $keytable_file_owner = undef,
-  $keytable_file_mode  = undef
-)
-{
-  include hdp-kerberos::params
-  $keytab = $name
-  $realm = $krb_context['realm']
-  $local_or_remote = $krb_context['local_or_remote']
-  $kadmin_pw = $krb_context['kadmin_pw']
-  $kadmin_admin = $krb_context['kadmin_admin']
-  $kadmin_cmd = "kadmin -w ${kadmin_pw} -p ${kadmin_admin}"
-  if ($local_or_remote == 'local') {
-    $kadmin_cmd = 'kadmin.local'
-  }
-  $principals_in_keytabs = $krb_context['principals_in_keytabs']
-
-  $principals = $principals_in_keytabs[$keytab]
-  $principals_list = inline_template("<%= principals.join(' ')%>")
-  $keytab_filename = $keytab
-
-  exec { "xst ${keytab}":
-    command => "rm -rf ${keytab_filename}; ${kadmin_cmd} -q 'xst -k ${keytab_filename} ${principals_list}'; chown puppet:apache ${keytab_filename}",
-    unless  => "klist -kt ${keytab_filename} 2>/dev/null | grep -q ' ${principals[0]}'", #TODO may make more robust test
-    path   => $hdp-kerberos::params::exec_path,
-  }
-
-  if (($keytable_file_owner != undef) or ($keytable_file_mode != undef)) {
-    file { $keytab_filename:
-      owner => $keytable_file_owner,
-      mode  => $keytable_file_mode,
-      require => Exec["xst ${keytab}"]
-    }
-  }
-}
-
-define hdp-kerberos::principal(
-  $krb_context
-)
-{
-  include hdp-kerberos::params
-  $realm = $krb_context['realm']
-  $local_or_remote = $krb_context['local_or_remote']
-  $kadmin_pw = $krb_context['kadmin_pw']
-  $kadmin_admin = $krb_context['kadmin_admin']
-  $kadmin_cmd =  "kadmin -w ${kadmin_pw} -p ${kadmin_admin}"
-  if ($local_or_remote == 'local') {
-    $kadmin_cmd = 'kadmin.local'
-  }
-  $principal = $name
-  exec { "addprinc ${principal}":
-    command => "${kadmin_cmd} -q 'addprinc -randkey ${principal}'",
-    unless => "${kadmin_cmd} -q listprincs | grep -q '^${principal}$'",
-    path => $hdp-kerberos::params::exec_path
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/bigtop/init.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/bigtop/init.pp b/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/bigtop/init.pp
deleted file mode 100644
index 2b3e162..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/bigtop/init.pp
+++ /dev/null
@@ -1,217 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-class kerberos {
-  class site {
-    # The following is our interface to the world. This is what we allow
-    # users to tweak from the outside (see tests/init.pp for a complete
-    # example) before instantiating target classes.
-    # Once we migrate to Puppet 2.6 we can potentially start using 
-    # parametrized classes instead.
-    $domain     = $kerberos_domain     ? { '' => inline_template('<%= domain %>'),
-                                           default => $kerberos_domain }
-    $realm      = $kerberos_realm      ? { '' => inline_template('<%= domain.upcase %>'),
-                                           default => $kerberos_realm } 
-    $kdc_server = $kerberos_kdc_server ? { '' => 'localhost',
-                                           default => $kerberos_kdc_server }
-    $kdc_port   = $kerberos_kdc_port   ? { '' => '88', 
-                                           default => $kerberos_kdc_port } 
-    $admin_port = 749 /* BUG: linux daemon packaging doesn't let us tweak this */
-
-    $keytab_export_dir = "/var/lib/bigtop_keytabs"
-
-    case $operatingsystem {
-        'ubuntu': {
-            $package_name_kdc    = 'krb5-kdc'
-            $service_name_kdc    = 'krb5-kdc'
-            $package_name_admin  = 'krb5-admin-server'
-            $service_name_admin  = 'krb5-admin-server'
-            $package_name_client = 'krb5-user'
-            $exec_path           = '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'
-            $kdc_etc_path        = '/etc/krb5kdc/'
-        }
-        # default assumes CentOS, Redhat 5 series (just look at how random it all looks :-()
-        default: {
-            $package_name_kdc    = 'krb5-server'
-            $service_name_kdc    = 'krb5kdc'
-            $package_name_admin  = 'krb5-libs'
-            $service_name_admin  = 'kadmin'
-            $package_name_client = 'krb5-workstation'
-            $exec_path           = '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/kerberos/sbin:/usr/kerberos/bin'
-            $kdc_etc_path        = '/var/kerberos/krb5kdc/'
-        }
-    }
-
-    file { "/etc/krb5.conf":
-      content => template('kerberos/krb5.conf'),
-      owner => "root",
-      group => "root",
-      mode => "0644",
-    }
-
-    @file { $keytab_export_dir:
-      ensure => directory,
-      owner  => "root",
-      group  => "root",
-    }
-
-    # Required for SPNEGO
-    @principal { "HTTP": 
-
-    }
-  }
-
-  class kdc inherits kerberos::site {
-    package { $package_name_kdc:
-      ensure => installed,
-    }
-
-    file { $kdc_etc_path:
-    	ensure => directory,
-        owner => root,
-        group => root,
-        mode => "0700",
-        require => Package["$package_name_kdc"],
-    }
-    file { "${kdc_etc_path}/kdc.conf":
-      content => template('kerberos/kdc.conf'),
-      require => Package["$package_name_kdc"],
-      owner => "root",
-      group => "root",
-      mode => "0644",
-    }
-    file { "${kdc_etc_path}/kadm5.acl":
-      content => template('kerberos/kadm5.acl'),
-      require => Package["$package_name_kdc"],
-      owner => "root",
-      group => "root",
-      mode => "0644",
-    }
-
-    exec { "kdb5_util":
-      path => $exec_path,
-      command => "rm -f /etc/kadm5.keytab ; kdb5_util -P cthulhu -r ${realm} create -s && kadmin.local -q 'cpw -pw secure kadmin/admin'",
-      
-      creates => "${kdc_etc_path}/stash",
-
-      subscribe => File["${kdc_etc_path}/kdc.conf"],
-      # refreshonly => true, 
-
-      require => [Package["$package_name_kdc"], File["${kdc_etc_path}/kdc.conf"], File["/etc/krb5.conf"]],
-    }
-
-    service { $service_name_kdc:
-      ensure => running,
-      require => [Package["$package_name_kdc"], File["${kdc_etc_path}/kdc.conf"], Exec["kdb5_util"]],
-      subscribe => File["${kdc_etc_path}/kdc.conf"],
-      hasrestart => true,
-    }
-
-
-    class admin_server inherits kerberos::kdc {
-      $se_hack = "setsebool -P kadmind_disable_trans  1 ; setsebool -P krb5kdc_disable_trans 1"
-
-      package { "$package_name_admin":
-        ensure => installed,
-        require => Package["$package_name_kdc"],
-      } 
-  
-      service { "$service_name_admin":
-        ensure => running,
-        require => [Package["$package_name_admin"], Service["$service_name_kdc"]],
-        hasrestart => true,
-        restart => "${se_hack} ; service ${service_name_admin} restart",
-        start => "${se_hack} ; service ${service_name_admin} start",
-      }
-    }
-  }
-
-  class client inherits kerberos::site {
-    package { $package_name_client:
-      ensure => installed,
-    }
-  }
-
-  class server {
-    include kerberos::client
-
-    class { "kerberos::kdc": } 
-    ->
-    Class["kerberos::client"] 
-
-    class { "kerberos::kdc::admin_server": }
-    -> 
-    Class["kerberos::client"]
-  }
-
-  define principal {
-    require "kerberos::client"
-
-    realize(File[$kerberos::site::keytab_export_dir])
-
-    $principal = "$title/$hdp::params::hostname"
-    $keytab    = "$kerberos::site::keytab_export_dir/$title.keytab"
-
-    exec { "addprinc.$title":
-      path => $kerberos::site::exec_path,
-      command => "kadmin -w secure -p kadmin/admin -q 'addprinc -randkey $principal'",
-      unless => "kadmin -w secure -p kadmin/admin -q listprincs | grep -q $principal",
-      require => Package[$kerberos::site::package_name_client],
-    } 
-    ->
-    exec { "xst.$title":
-      path    => $kerberos::site::exec_path, 
-      command => "kadmin -w secure -p kadmin/admin -q 'xst -k $keytab $principal'",
-      unless  => "klist -kt $keytab 2>/dev/null | grep -q $principal",
-      require => File[$kerberos::site::keytab_export_dir],
-    }
-  }
-
-  define host_keytab($princs = undef, $spnego = disabled) {
-    $keytab = "/etc/$title.keytab"
-
-    $requested_princs = $princs ? { 
-      undef   => [ $title ],
-      default => $princs,
-    }
-
-    $internal_princs = $spnego ? {
-      /(true|enabled)/ => [ 'HTTP' ],
-      default          => [ ],
-    }
-    realize(Kerberos::Principal[$internal_princs])
-
-    $includes = inline_template("<%=
-      [requested_princs, internal_princs].flatten.map { |x|
-        \"rkt $kerberos::site::keytab_export_dir/#{x}.keytab\"
-      }.join(\"\n\")
-    %>")
-
-    kerberos::principal { $requested_princs:
-    }
-
-    exec { "ktinject.$title":
-      path     => $kerberos::site::exec_path,
-      command  => "/usr/bin/ktutil <<EOF
-        $includes
-        wkt $keytab
-EOF
-        chown $title $keytab",
-      creates => $keytab,
-      require => [ Kerberos::Principal[$requested_princs],
-                   Kerberos::Principal[$internal_princs] ],
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/client.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/client.pp b/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/client.pp
deleted file mode 100644
index b77585f..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/client.pp
+++ /dev/null
@@ -1,50 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-class hdp-kerberos::client(
-  $service_state = $hdp::params::cluster_service_state
-) inherits hdp-kerberos::params
-{
-  import 'hdp'
-
-  $hdp::params::service_exists['hdp-kerberos::client'] = true
-
-  $kdc_server = $kdc_host
-  $krb_realm = $kerberos_domain
-  $realm = $kerberos_domain
-
-  if ($hdp::params::service_exists['hdp-kerberos::adminclient'] != true)  {
-    package { $package_name_client:
-      ensure => installed,
-    }
-  }
-
-  if (($hdp::params::service_exists['hdp-kerberos::server'] != true) and
-      ($hdp::params::service_exists['hdp-kerberos::adminclient'] != true) ) {
-    file { "/etc/krb5.conf":
-      content => template('hdp-kerberos/krb5.conf'),
-      owner => "root",
-      group => "root",
-      mode => "0644",
-      require => Package[$package_name_client],
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/init.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/init.pp b/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/init.pp
deleted file mode 100644
index 70ed6ef..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/init.pp
+++ /dev/null
@@ -1,25 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-class hdp-kerberos()
-{
-}
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/params.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/params.pp b/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/params.pp
deleted file mode 100644
index 599b8e0..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/params.pp
+++ /dev/null
@@ -1,70 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-class hdp-kerberos::params(
-) inherits hdp::params
-{
-  $domain  = 'hadoop.com'
-  $realm = inline_template('<%= @domain.upcase %>')
-  $kdc_server = $hdp::params::hostname
-  $kdc_port = 88
-  $keytab_export_base_dir = '/etc/security/'
-  $keytab_export_dir = "${keytab_export_base_dir}/keytabs"
-
-  $keytab_map = {
-    'hdp-hadoop::namenode' =>  
-      {keytab    => 'nn.service.keytab',
-       primaries => ['nn', 'host', 'HTTP']},
-    'hdp-hadoop::snamenode' =>  
-      {keytab    => 'nn.service.keytab',
-       primaries => ['nn', 'host', 'HTTP']},
-    'hdp-hadoop::datanode' =>  
-      {keytab    => 'dn.service.keytab',
-       primaries => ['dn']},
-    'hdp-hadoop::jobtracker' =>  
-      {keytab    => 'jt.service.keytab',
-       primaries => ['jt']},
-    'hdp-hadoop::tasktracker' =>  
-      {keytab    => 'tt.service.keytab',
-       primaries => ['tt']}
-  }
-
-  case $::operatingsystem {
-    'ubuntu': {
-      $package_name_kdc    = 'krb5-kdc'
-      $service_name_kdc    = 'krb5-kdc'
-      $package_name_admin  = 'krb5-admin-server'
-      $service_name_admin  = 'krb5-admin-server'
-      $package_name_client = 'krb5-user'
-      $exec_path           = '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'
-      $kdc_etc_path        = '/etc/krb5kdc/'
-     }
-     default: {
-       $package_name_kdc    = 'krb5-server'
-       $service_name_kdc    = 'krb5kdc'
-       $package_name_admin  = 'krb5-libs'
-       $service_name_admin  = 'kadmin'
-       $package_name_client = 'krb5-workstation' 
-       $exec_path           = '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/kerberos/sbin:/usr/kerberos/bin'
-       $kdc_etc_path        = '/var/kerberos/krb5kdc/'
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/server.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/server.pp b/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/server.pp
deleted file mode 100644
index ae2f421..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/server.pp
+++ /dev/null
@@ -1,116 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-class hdp-kerberos::server(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-kerberos::params
-{ 
-  import 'hdp'
-
-  $hdp::params::service_exists['hdp-kerberos::server'] = true
-
-  $krb_realm = $kerberos_domain
-  $kadmin_pw = "bla123"
-  $kadmin_admin = "kadmin/admin"
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured']) {
-    # Install kdc server and client
-    package { $package_name_kdc:
-      ensure => installed
-    }
-
-    # set the realm
-    $realm = $krb_realm
-    # SUHAS: This should be set on all the nodes in addition to kdc server
-    file { "/etc/krb5.conf":
-      content => template('hdp-kerberos/krb5.conf'),
-      owner => "root",
-      group => "root",
-      mode => "0644",
-      require => Package[$package_name_kdc],
-      }
-
-    file { $kdc_etc_path:
-      ensure => directory,
-      owner => root,
-      group => root,
-      mode => "0700",
-      require => Package[$package_name_kdc],
-    }
-
-    file { "${kdc_etc_path}/kdc.conf":
-      content => template('hdp-kerberos/kdc.conf'),
-      require => Package["$package_name_kdc"],
-      owner => "root",
-      group => "root",
-      mode => "0644",
-    }
-
-    # SUHAS: kadm5.acl file template is missing in gsInsaller
-    # SUHAS: gsInstaller stops stopIptables at this point (sequence is not relevant here).
-    file { "${kdc_etc_path}/kadm5.acl":
-      content => template('hdp-kerberos/kadm5.acl'),
-      require => Package["$package_name_kdc"],
-      owner => "root",
-      group => "root",
-      mode => "0644",
-    }
-
-    exec { "kdb5_util":
-      path => $exec_path,
-      command => "rm -f ${kdc_etc_path}/kadm5.keytab; kdb5_util -P x86yzh12 -r ${realm} create -s && kadmin.local -q 'cpw -pw ${kadmin_pw} ${kadmin_admin}'",
-      creates => "${kdc_etc_path}/stash",
-      subscribe => File["${kdc_etc_path}/kdc.conf"],
-      require => [Package[$package_name_kdc], File["${kdc_etc_path}/kdc.conf"], File["/etc/krb5.conf"]]
-    }
-
-    # SUHAS: gsInstaller has checkconfig_on
-    exec { "chkconfig_krb5kdc_on":
-      path => $exec_path,
-      command => "chkconfig krb5kdc on",
-      require => [Package["$package_name_kdc"], File["${kdc_etc_path}/kdc.conf"], Exec["kdb5_util"]],
-    }
-    
-    # Start KDC Server
-    if ($service_state in ['running','stopped']) {
-      service { $service_name_kdc:
-        ensure => $service_state,
-        require => [Exec["chkconfig_krb5kdc_on"]],
-        subscribe => File["${kdc_etc_path}/kdc.conf"],
-        hasrestart => true,
-      }
-
-      # SUHAS: This is to be done on HMC not KDC Server??
-      $se_hack = "setsebool -P kadmind_disable_trans  1 ; setsebool -P krb5kdc_disable_trans 1"
-      service { $service_name_admin:
-        ensure => $service_state,
-        require => Service[$service_name_kdc],
-        hasrestart => true,
-        restart => "${se_hack} ; service ${service_name_admin} restart",
-        start => "${se_hack} ; service ${service_name_admin} start",
-      }
-    }
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-kerberos/templates/kadm5.acl
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-kerberos/templates/kadm5.acl b/ambari-agent/src/main/puppet/modules/hdp-kerberos/templates/kadm5.acl
deleted file mode 100644
index d91d076..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-kerberos/templates/kadm5.acl
+++ /dev/null
@@ -1,21 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This file Is the access control list for krb5 administration.
-# When this file is edited run /etc/init.d/krb5-admin-server restart to activate
-# One common way to set up Kerberos administration is to allow any principal 
-# ending in /admin  is given full administrative rights.
-# To enable this, uncomment the following line:
-*/admin *

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-kerberos/templates/kdc.conf
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-kerberos/templates/kdc.conf b/ambari-agent/src/main/puppet/modules/hdp-kerberos/templates/kdc.conf
deleted file mode 100644
index 18f15d5..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-kerberos/templates/kdc.conf
+++ /dev/null
@@ -1,36 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-default_realm = <%= realm %>
-
-[kdcdefaults]
-    v4_mode = nopreauth
-    kdc_ports = 0
-    kdc_tcp_ports = 88 
-
-[realms]
-    <%= realm %> = {
-        acl_file = <%= kdc_etc_path %>/kadm5.acl
-        dict_file = /usr/share/dict/words
-        admin_keytab = <%= kdc_etc_path %>/kadm5.keytab
-        supported_enctypes = des3-hmac-sha1:normal arcfour-hmac:normal des-hmac-sha1:normal des-cbc-md5:normal des-cbc-crc:normal des-cbc-crc:v4 des-cbc-crc:afs3
-        kdc_ports = <%= kdc_port %>
-        database_name = <%= kdc_etc_path %>/principal
-        key_stash_file = <%= kdc_etc_path %>/stash
-        max_life = 10h 0m 0s
-        max_renewable_life = 7d 0h 0m 0s
-        master_key_type = des3-hmac-sha1
-        default_principal_flags = +preauth
-    }

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-kerberos/templates/krb5.conf
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-kerberos/templates/krb5.conf b/ambari-agent/src/main/puppet/modules/hdp-kerberos/templates/krb5.conf
deleted file mode 100644
index 04ce978..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-kerberos/templates/krb5.conf
+++ /dev/null
@@ -1,47 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-[libdefaults]
-    default_realm = <%= realm %>
-    dns_lookup_realm = false
-    dns_lookup_kdc = false
-    ticket_lifetime = 24h
-    forwardable = yes
-    udp_preference_limit = 1
-
-[realms]
-    <%= realm %> = {
-        kdc = <%= kdc_server %>:<%= kdc_port %>
-        admin_server = <%= kdc_server %>:749
-        default_domain = <%= domain %>
-    }
-
-[appdefaults] 
-    pam = {
-        debug = false 
-        ticket_lifetime = 36000 
-        renew_lifetime = 36000 
-        forwardable = true 
-        krb4_convert = false 
-    }
-
-[domain_realm]
-    .<%= domain %> = <%= realm %>
-     <%= domain %> = <%= realm %>
-
-[logging]
-    default = FILE:/var/log/krb5libs.log
-    kdc = FILE:/var/log/krb5kdc.log
-    admin_server = FILE:/var/log/kadmind.log

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-kerberos/tests/init.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-kerberos/tests/init.pp b/ambari-agent/src/main/puppet/modules/hdp-kerberos/tests/init.pp
deleted file mode 100644
index fb8434b..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-kerberos/tests/init.pp
+++ /dev/null
@@ -1,31 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-$kerberos_domain = "krb.test.com"
-$kerberos_realm = "KRB.TEST.COM"
-$kerberos_kdc_server = "localhost"
-$kerberos_kdc_port = 88
-# the following turns a node into a fully functional KDC 
-include kerberos::kdc
-# the following opens up KDC principle datbase for remote
-# administration (it really should be optional, but it is
-# required for now in order to make kerberos::client::host_keytab
-# work)
-include kerberos::kdc::admin_server
-
-# the following turns a node into a Kerberos client hosts with.. 
-include kerberos::client
-# ...an optional host_keytab for as many services as you want:
-kerberos::client::host_keytab { ["host", "hdfs", "mapred"]: }

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-monitor-webserver/manifests/init.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-monitor-webserver/manifests/init.pp b/ambari-agent/src/main/puppet/modules/hdp-monitor-webserver/manifests/init.pp
deleted file mode 100644
index 6c5cc1d..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-monitor-webserver/manifests/init.pp
+++ /dev/null
@@ -1,123 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-monitor-webserver( 
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp::params
-{
-
-  
-  if hdp_is_empty($hdp::params::services_names[httpd]) {
-      hdp_fail("There is no service name for service httpd")
-    }
-    else {
-      $service_name_by_os = $hdp::params::services_names[httpd]
-    }
-
-    if hdp_is_empty($service_name_by_os[$hdp::params::hdp_os_type]) {
-      
-      if hdp_is_empty($service_name_by_os['ALL']) {
-        hdp_fail("There is no service name for service httpd")
-      }
-      else {
-        $service_name = $service_name_by_os['ALL']
-      }
-    }
-    else {
-      $service_name = $service_name_by_os[$hdp::params::hdp_os_type]
-    }
-
-    if hdp_is_empty($hdp::params::pathes[httpd_conf_dir]) {
-      hdp_fail("There is no config dir path for service httpd")
-    }
-    else {
-      $path_by_os = $hdp::params::pathes[httpd_conf_dir]
-    }
-
-    if hdp_is_empty($path_by_os[$hdp::params::hdp_os_type]) {
-      
-      if hdp_is_empty($path_by_os['ALL']) {
-        hdp_fail("There is no config dir path for service httpd")
-      }
-      else {
-        $httpd_conf_dir = $path_by_os['ALL']
-      }
-    }
-    else {
-      $httpd_conf_dir = $path_by_os[$hdp::params::hdp_os_type]
-    }
-
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured', 'restart']) {
-
-
-    if ($service_state == 'running') {
-      #TODO: refine by using notify/subscribe
-      hdp::exec { 'monitor webserver start':
-        command => "/etc/init.d/$service_name start",
-        unless => "/etc/init.d/$service_name status",
-        require => Hdp::Exec['enabling keepalive for httpd']
-
-      } 
-
-      hdp::package { 'httpd' :
-        size   => 64
-      }
-    hdp::exec {'enabling keepalive for httpd':
-      command     => "grep -E 'KeepAlive (On|Off)' ${httpd_conf_dir}/httpd.conf && sed -i 's/KeepAlive Off/KeepAlive On/' ${httpd_conf_dir}/httpd.conf || echo 'KeepAlive On' >> ${httpd_conf_dir}/httpd.conf",
-      require => Hdp::Package['httpd']
-
-    }
-
-    } elsif ($service_state == 'stopped') {
-      # stop should never fail if process already stopped
-      hdp::exec { 'monitor webserver stop':
-        command => "/etc/init.d/$service_name stop"
-      }
-    } elsif ($service_state == 'restart') {
-      hdp::exec { 'monitor webserver restart':
-        command => "/etc/init.d/$service_name restart",
-        require => Hdp::Exec['enabling keepalive for httpd']
-      }
-      hdp::package { 'httpd' :
-        size   => 64
-      }
-
-    hdp::exec {'enabling keepalive for httpd':
-      command     => "grep -E 'KeepAlive (On|Off)' ${httpd_conf_dir}/httpd.conf && sed -i 's/KeepAlive Off/KeepAlive On/' ${httpd_conf_dir}/httpd.conf || echo 'KeepAlive On' >> ${httpd_conf_dir}/httpd.conf",
-      require => Hdp::Package['httpd']
-    }
-
-    } elsif ($service_state == 'installed_and_configured') {
-      hdp::package { 'httpd' :
-        size   => 64
-      }
-
-    hdp::exec {'enabling keepalive for httpd':
-      command     => "grep -E 'KeepAlive (On|Off)' ${httpd_conf_dir}/httpd.conf && sed -i 's/KeepAlive Off/KeepAlive On/' ${httpd_conf_dir}/httpd.conf || echo 'KeepAlive On' >> ${httpd_conf_dir}/httpd.conf",
-      require => Hdp::Package['httpd']
-    }
-    }
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-mysql/files/addMysqlUser.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-mysql/files/addMysqlUser.sh b/ambari-agent/src/main/puppet/modules/hdp-mysql/files/addMysqlUser.sh
deleted file mode 100644
index 8c12260..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-mysql/files/addMysqlUser.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-mysqldservice=$1
-mysqldbuser=$2
-mysqldbpasswd=$3
-mysqldbhost=$4
-myhostname=$(hostname -f)
-
-service $mysqldservice start
-echo "Adding user $mysqldbuser@$mysqldbhost and $mysqldbuser@localhost"
-mysql -u root -e "CREATE USER '$mysqldbuser'@'$mysqldbhost' IDENTIFIED BY '$mysqldbpasswd';"
-mysql -u root -e "CREATE USER '$mysqldbuser'@'localhost' IDENTIFIED BY '$mysqldbpasswd';"
-mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'$mysqldbhost';"
-mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'localhost';"
-if [ '$(mysql -u root -e "select user from mysql.user where user='$mysqldbuser' and host='$myhostname'" | grep "$mysqldbuser")' != '0' ]; then
-  echo "Adding user $mysqldbuser@$myhostname";
-  mysql -u root -e "CREATE USER '$mysqldbuser'@'$myhostname' IDENTIFIED BY '$mysqldbpasswd';";
-  mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'$myhostname';";
-fi
-mysql -u root -e "flush privileges;"
-service $mysqldservice stop
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-mysql/manifests/init.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-mysql/manifests/init.pp b/ambari-agent/src/main/puppet/modules/hdp-mysql/manifests/init.pp
deleted file mode 100644
index 2af7c53..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-mysql/manifests/init.pp
+++ /dev/null
@@ -1,22 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-mysql(){}
-


[05/15] AMBARI-5747 Remove facter-1.6.10, Ruby, Puppet dependencies from pom.xml and src (dsen)

Posted by ds...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/historyserver.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/historyserver.pp b/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/historyserver.pp
deleted file mode 100644
index 07a9376..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/historyserver.pp
+++ /dev/null
@@ -1,48 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-yarn::historyserver(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-yarn::params
-{
-  $mapred_user = $hdp-yarn::params::mapred_user
-  
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in 'installed_and_configured') {
-  
-    include hdp-yarn::initialize
-
-    ##Process package
-    hdp-yarn::package{'mapreduce-historyserver':}
-
-  } elsif ($service_state in ['running','stopped']) {
-
-    include hdp-yarn::initialize
- 
-    hdp-yarn::service{ 'historyserver':
-      ensure       => $service_state,
-      user         => $mapred_user
-    }
-
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/historyserver/service_check.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/historyserver/service_check.pp b/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/historyserver/service_check.pp
deleted file mode 100644
index 885e24b..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/historyserver/service_check.pp
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-yarn::historyserver::service_check() inherits hdp-yarn::params
-{
-  hdp-yarn::smoketest{'hdp-yarn::smoketest:rm': component_name => 'historyserver'}
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/init.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/init.pp b/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/init.pp
deleted file mode 100644
index 5d74f86..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/init.pp
+++ /dev/null
@@ -1,166 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-class hdp-yarn::initialize()
-{
-  $mapred_user = $hdp-yarn::params::mapred_user
-  $hdfs_user = $hdp::params::hdfs_user
-  $yarn_user = $hdp-yarn::params::yarn_user
-
-  ##Process package
-  hdp-yarn::package{'yarn-common':}
-
-  #Replace limits config file
-  hdp::configfile {"${hdp::params::limits_conf_dir}/yarn.conf":
-    component => 'yarn',
-    owner => 'root',
-    group => 'root',
-    mode => 644    
-  }
-
-  # Create users
-  hdp::user { 'yarn_mapred_user':
-     user_name => $mapred_user
-  }
-
-  hdp::user { 'yarn_hdfs_user':
-     user_name => $hdfs_user
-  }
-
-  hdp::user { 'yarn_yarn_user':
-     user_name => $yarn_user
-  }
-
-  #Generate common configs
-  hdp-yarn::generate_common_configs{'yarn-common-configs':}
-
-  anchor{ 'hdp-yarn::initialize::begin': } Hdp::Package['yarn-common'] -> Hdp::Configfile ["${hdp::params::limits_conf_dir}/yarn.conf"] ->
-    Hdp::User<|title == 'yarn_hdfs_user' or title == 'yarn_mapred_user' or title == 'yarn_yarn_user'|> ->
-      Hdp-yarn::Generate_common_configs['yarn-common-configs'] -> anchor{ 'hdp-yarn::initialize::end': }
-}
-
-define hdp-yarn::generate_common_configs() {
-
-  $yarn_config_dir = $hdp-yarn::params::conf_dir
-
-  # Generate configs
-  if has_key($::configuration, 'core-site') {
-      configgenerator::configfile{'core-site':
-        modulespath => $yarn_config_dir,
-        filename => 'core-site.xml',
-        module => 'hdp-hadoop',
-        configuration => $::configuration['core-site'],
-        owner => $hdp::params::hdfs_user,
-        group => $hdp::params::user_group,
-        mode => 644
-      }
-  } else { # Manually overriding ownership of file installed by hadoop package
-    file { "${yarn_config_dir}/core-site.xml":
-      owner => $hdp::params::hdfs_user,
-      group => $hdp::params::user_group,
-      mode => 644
-    }
-  }
-
-  if has_key($::configuration, 'mapred-site') {
-    configgenerator::configfile{'mapred-site': 
-      modulespath => $yarn_config_dir,
-      filename => 'mapred-site.xml',
-      module => 'hdp-yarn',
-      configuration => $::configuration['mapred-site'],
-      owner => $hdp-yarn::params::yarn_user,
-      group => $hdp::params::user_group,
-      mode => 644
-    }
-  } else { # Manually overriding ownership of file installed by hadoop package
-    file { "${yarn_config_dir}/mapred-site.xml":
-      owner => $hdp-yarn::params::yarn_user,
-      group => $hdp::params::user_group,
-      mode => 644
-    }
-  }
-  
-  if has_key($::configuration, 'yarn-site') {
-    configgenerator::configfile{'yarn-site': 
-      modulespath => $yarn_config_dir,
-      filename => 'yarn-site.xml',
-      module => 'hdp-yarn',
-      configuration => $::configuration['yarn-site'],
-      owner => $hdp-yarn::params::yarn_user,
-      group => $hdp::params::user_group,
-      mode => 644
-    }
-  } else { # Manually overriding ownership of file installed by hadoop package
-    file { "${yarn_config_dir}/yarn-site.xml":
-      owner => $hdp-yarn::params::yarn_user,
-      group => $hdp::params::user_group,
-      mode => 644
-    }
-  }
-
-  if has_key($::configuration, 'capacity-scheduler') {
-    configgenerator::configfile{'capacity-scheduler': 
-      modulespath => $yarn_config_dir,
-      filename => 'capacity-scheduler.xml',
-      module => 'hdp-yarn',
-      configuration => $::configuration['capacity-scheduler'],
-      owner => $hdp-yarn::params::yarn_user,
-      group => $hdp::params::user_group,
-      mode => 644
-    }
-  } else { # Manually overriding ownership of file installed by hadoop package
-    file { "${yarn_config_dir}/capacity-scheduler.xml":
-      owner => $hdp-yarn::params::yarn_user,
-      group => $hdp::params::user_group,
-      mode => 644
-    }
-  }
-
-  hdp::configfile {"${yarn_config_dir}/yarn-env.sh":
-    component => 'yarn',
-    owner => $hdp-yarn::params::yarn_user,
-    group => $hdp::params::user_group,
-    mode => 755
-  }
-
-  hdp::configfile { "${yarn_config_dir}/hadoop-env.sh":
-    mode => 755,
-    owner => $hdp::params::hdfs_user,
-    group => $hdp::params::user_group,
-    component => 'hadoop'
-  }
-
-  if ($hdp::params::security_enabled == true) {
-    $container_executor = "${hdp::params::yarn_container_bin}/container-executor"
-    file { $container_executor:
-      ensure => present,
-      group => $hdp-yarn::params::yarn_executor_container_group,
-      mode => 6050
-    }
-
-    hdp::configfile { "${yarn_config_dir}/container-executor.cfg" :
-      component => 'yarn',
-      owner => 'root',
-      group   => $hdp::params::user_group,
-      mode  => '0644'
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/mapred2/service_check.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/mapred2/service_check.pp b/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/mapred2/service_check.pp
deleted file mode 100644
index 3ada16e..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/mapred2/service_check.pp
+++ /dev/null
@@ -1,69 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-yarn::mapred2::service_check() inherits hdp-yarn::params
-{
-  $smoke_test_user = $hdp::params::smokeuser
-  $hadoopMapredExamplesJarName = $hdp-yarn::params::hadoopMapredExamplesJarName
-  $jar_path = "$hdp::params::hadoop_mapred2_jar_location/$hadoopMapredExamplesJarName"
-  $input_file = "/user/${smoke_test_user}/mapredsmokeinput"
-  $output_file = "/user/${smoke_test_user}/mapredsmokeoutput"
-  $hadoop_conf_dir = $hdp::params::hadoop_conf_dir
-
-  $cleanup_cmd = "fs -rm -r -f ${output_file} ${input_file}"
-  $create_file_cmd = "fs -put /etc/passwd ${input_file}"
-  $test_cmd = "fs -test -e ${output_file}"
-  $run_wordcount_job = "jar $jar_path wordcount ${input_file} ${output_file}"
-
-  anchor { 'hdp-yarn::mapred2::service_check::begin':}
-
-  hdp-hadoop::exec-hadoop { 'mapred::service_check::cleanup_before':
-    command   => $cleanup_cmd,
-    tries     => 1,
-    try_sleep => 5,
-    user      => $smoke_test_user
-  }
-
-  hdp-hadoop::exec-hadoop { 'mapred::service_check::create_file':
-    command   => $create_file_cmd,
-    tries     => 1,
-    try_sleep => 5,
-    user      => $smoke_test_user
-  }
-
-  hdp-hadoop::exec-hadoop { 'mapred::service_check::run_wordcount':
-    command   => $run_wordcount_job,
-    tries     => 1,
-    try_sleep => 5,
-    user      => $smoke_test_user,
-    logoutput => "true"
-  }
-
-  hdp-hadoop::exec-hadoop { 'mapred::service_check::test':
-    command     => $test_cmd,
-    refreshonly => true,
-    user        => $smoke_test_user
-  }
-
-  anchor { 'hdp-yarn::mapred2::service_check::end':}
-
-  Anchor['hdp-yarn::mapred2::service_check::begin'] -> Hdp-hadoop::Exec-hadoop['mapred::service_check::cleanup_before'] -> Hdp-hadoop::Exec-hadoop['mapred::service_check::create_file'] -> Hdp-hadoop::Exec-hadoop['mapred::service_check::run_wordcount'] -> Hdp-hadoop::Exec-hadoop['mapred::service_check::test'] -> Anchor['hdp-yarn::mapred2::service_check::end']
-
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/mapreducev2_client.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/mapreducev2_client.pp b/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/mapreducev2_client.pp
deleted file mode 100644
index 2cf607d..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/mapreducev2_client.pp
+++ /dev/null
@@ -1,43 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-class hdp-yarn::mapreducev2_client(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-yarn::params
-{
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in 'installed_and_configured') {
-
-    include hdp-yarn::initialize
-
-    hdp-yarn::package{'hadoop-mapreduce-client':}
-
-    hdp::configfile {"${hdp::params::limits_conf_dir}/mapreduce.conf":
-      component => 'yarn',
-      owner => 'root',
-      group => 'root',
-      mode => 644,
-      require => Hdp-yarn::Package['hadoop-mapreduce-client']
-    }
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/nodemanager.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/nodemanager.pp b/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/nodemanager.pp
deleted file mode 100644
index 67856bb..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/nodemanager.pp
+++ /dev/null
@@ -1,82 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-yarn::nodemanager(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-yarn::params
-{
-  $yarn_user = $hdp-yarn::params::yarn_user
-  $nm_local_dirs = $hdp-yarn::params::nm_local_dirs
-  $nm_log_dirs = $hdp-yarn::params::nm_log_dirs
-  $yarn_log_dir_prefix = $hdp-yarn::params::yarn_log_dir_prefix
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in 'installed_and_configured') {
-  
-    include hdp-yarn::initialize
-
-    ##Process package
-    hdp-yarn::package{'yarn-nodemanager':}
-
-    hdp::configfile {"${hdp::params::limits_conf_dir}/mapreduce.conf":
-      component => 'yarn',
-      owner => 'root',
-      group => 'root',
-      mode => 644,
-      require => Hdp-yarn::Package['yarn-nodemanager']
-    }
-
-  } elsif ($service_state in ['running','stopped']) {
-
-    include hdp-yarn::initialize
-
-    # To avoid duplicate resource definitions
-    $nm_dirs = hdp_set_from_comma_list("${nm_local_dirs},${nm_log_dirs}", "$yarn_log_dir_prefix")
-
-    hdp-yarn::nodemanager::create_nm_dirs { $nm_dirs:
-      service_state => $service_state
-    }
-
-    hdp-yarn::service{ 'nodemanager':
-      ensure       => $service_state,
-      user         => $yarn_user
-    }
-
-    anchor{"hdp-yarn::nodemanager::begin" : } ->
-    Hdp-yarn::Nodemanager::Create_nm_dirs<||> ->
-    Hdp-yarn::Service['nodemanager'] ->
-    anchor{"hdp-yarn::nodemanager::end": }
-
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
-
-
-define hdp-yarn::nodemanager::create_nm_dirs($service_state) {
-  $dirs = hdp_array_from_comma_list($name)
-  hdp::directory_recursive_create_ignore_failure { $dirs :
-    owner => $hdp-yarn::params::yarn_user,
-    context_tag => 'yarn_service',
-    service_state => $service_state,
-    force => true
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/package.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/package.pp b/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/package.pp
deleted file mode 100644
index 154959d..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/package.pp
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-define hdp-yarn::package()
-{
-  hdp::package{ $name:
-    ensure       => present,
-    package_type => $package
-  }
-  anchor{ "hdp-yarn::package::${name}::begin": } -> Hdp::Package[$name] -> anchor{ "hdp-yarn::package::${name}::end": }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/params.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/params.pp b/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/params.pp
deleted file mode 100644
index 9fa897a..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/params.pp
+++ /dev/null
@@ -1,59 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-yarn::params(
-) inherits hdp-hadoop::params 
-{
-
-  $conf_dir = $hdp::params::yarn_conf_dir
-  $stack_version = $hdp::params::stack_version
-  $smoke_test_user = $hdp::params::smokeuser
-  ## security params
-  $security_enabled = $hdp::params::security_enabled
-  $smoke_user_keytab = $hdp::params::smokeuser_keytab
-  $yarn_executor_container_group = hdp_default("yarn-site/yarn.nodemanager.linux-container-executor.group","hadoop")
-  $kinit_cmd = "${hdp::params::kinit_path_local} -kt ${smoke_user_keytab} ${smoke_test_user};"
-  $rm_host = $hdp::params::rm_host
-  $rm_port = $hdp::rm_port
-  $rm_https_port = $hdp::rm_https_port
-
-  ## yarn-env 
-  $hadoop_libexec_dir = $hdp-hadoop::params::hadoop_libexec_dir
-  $hadoop_yarn_home = hdp_default("hadoop_yarn_home","/usr/lib/hadoop-yarn")
-  $yarn_heapsize = hdp_default("yarn_heapsize","1024")
-  $resourcemanager_heapsize = hdp_default("resourcemanager_heapsize","1024")
-  $nodemanager_heapsize = hdp_default("nodemanager_heapsize","1024")
-
-  $yarn_log_dir_prefix = hdp_default("yarn_log_dir_prefix","/var/log/hadoop-yarn")
-  $yarn_pid_dir_prefix = hdp_default("yarn_pid_dir_prefix","/var/run/hadoop-yarn")
-  
-  ## yarn-site
-  $rm_webui_address = "${rm_host}:${rm_port}"
-  $rm_webui_https_address = "${rm_host}:${rm_https_port}"
-  $nm_webui_address = hdp_default("yarn-site/yarn.nodemanager.webapp.address", "0.0.0.0:8042")
-  $hs_webui_address = hdp_default("mapred-site/mapreduce.jobhistory.webapp.address", "0.0.0.0:19888")
-  
-  $nm_local_dirs = hdp_default("yarn-site/yarn.nodemanager.local-dirs", "${hadoop_tmp_dir}/nm-local-dir")
-  $nm_log_dirs = hdp_default("yarn-site/yarn.nodemanager.log-dirs", "/var/log/hadoop-yarn/yarn")
-
-  ##smoke test configs
-  $distrAppJarName = "hadoop-yarn-applications-distributedshell-2.*.jar"
-  $hadoopMapredExamplesJarName = "hadoop-mapreduce-examples-2.*.jar"
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/resourcemanager.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/resourcemanager.pp b/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/resourcemanager.pp
deleted file mode 100644
index 2ecf442..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/resourcemanager.pp
+++ /dev/null
@@ -1,56 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-yarn::resourcemanager(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-yarn::params
-{
-  $yarn_user = $hdp-yarn::params::yarn_user
-  
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in 'installed_and_configured') {
-  
-    include hdp-yarn::initialize
-
-    ##Process package
-    hdp-yarn::package{'yarn-resourcemanager':}
-
-    hdp::configfile {"${hdp::params::limits_conf_dir}/mapreduce.conf":
-      component => 'yarn',
-      owner => 'root',
-      group => 'root',
-      mode => 644,
-      require => Hdp-yarn::Package['yarn-resourcemanager']
-    }
-
-  } elsif ($service_state in ['running','stopped']) {
-  
-    include hdp-yarn::initialize
- 
-    hdp-yarn::service{ 'resourcemanager':
-      ensure       => $service_state,
-      user         => $yarn_user
-    }
-
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/resourcemanager/service_check.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/resourcemanager/service_check.pp b/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/resourcemanager/service_check.pp
deleted file mode 100644
index c5386c3..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/resourcemanager/service_check.pp
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-yarn::resourcemanager::service_check() inherits hdp-yarn::params
-{
-  hdp-yarn::smoketest{'hdp-yarn::smoketest:rm': component_name => 'resourcemanager'}
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/service.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/service.pp b/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/service.pp
deleted file mode 100644
index 3589829..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/service.pp
+++ /dev/null
@@ -1,116 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-define hdp-yarn::service(
-  $ensure = 'running',
-  $user,
-  $initial_wait = undef,
-  $create_pid_dir = true,
-  $create_log_dir = true
-)
-{
-
-  $security_enabled = $hdp::params::security_enabled
-  
-  if ($name == 'historyserver') {
-    $log_dir = "${hdp-yarn::params::mapred_log_dir_prefix}/${user}"
-    $pid_dir = "${hdp-yarn::params::mapred_pid_dir_prefix}/${user}"
-    $daemon = "${hdp::params::mapred_bin}/mr-jobhistory-daemon.sh"
-    $pid_file = "${pid_dir}/mapred-${user}-${name}.pid"
-    $job_summary_log = "${hdp-yarn::params::mapred_log_dir_prefix}/${user}/hadoop-mapreduce.jobsummary.log"
-  } else {
-    $log_dir = "${hdp-yarn::params::yarn_log_dir_prefix}/${user}"
-    $pid_dir = "${hdp-yarn::params::yarn_pid_dir_prefix}/${user}"
-    $daemon = "${hdp::params::yarn_bin}/yarn-daemon.sh"
-    $pid_file = "${pid_dir}/yarn-${user}-${name}.pid"
-    $job_summary_log = "${hdp-yarn::params::yarn_log_dir_prefix}/${user}/hadoop-mapreduce.jobsummary.log"
-  }
-  
-  $hadoop_libexec_dir = $hdp-yarn::params::hadoop_libexec_dir
-   
-  $cmd = "export HADOOP_LIBEXEC_DIR=${hadoop_libexec_dir} && ${daemon} --config ${hdp-yarn::params::conf_dir}"
-  
-  if ($ensure == 'running') {
-    if ($run_as_root == true) {
-      $daemon_cmd = "${cmd} start ${name}"
-    } else {
-      $daemon_cmd = "su - ${user} -c  '${cmd} start ${name}'"
-    }
-    $service_is_up = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
-  } elsif ($ensure == 'stopped') {
-    if ($run_as_root == true) {
-      $daemon_cmd = "${cmd} stop ${name} && rm -f ${pid_file}"
-    } else {
-      $daemon_cmd = "su - ${user} -c  '${cmd} stop ${name}' && rm -f ${pid_file}"
-    }
-    $service_is_up = undef
-  } else {
-    $daemon_cmd = undef
-  }
- 
-   if ($create_pid_dir == true) {
-    hdp::directory_recursive_create { $pid_dir: 
-      owner       => $user,
-      context_tag => 'yarn_service',
-      service_state => $ensure,
-      force => true
-    }
-  }
- 
-  if ($create_log_dir == true) {
-    hdp::directory_recursive_create { $log_dir: 
-      owner       => $user,
-      context_tag => 'yarn_service',
-      service_state => $ensure,
-      force => true
-    }
-
-    file {$job_summary_log:
-      path => $job_summary_log,
-      owner => $user,
-    }
-  }
- 
-  if ($daemon_cmd != undef) {  
-    hdp::exec { $daemon_cmd:
-      command      => $daemon_cmd,
-      unless       => $service_is_up,
-      initial_wait => $initial_wait
-    }
-  }
-
-  anchor{"hdp-yarn::service::${name}::begin":}
-  anchor{"hdp-yarn::service::${name}::end":}
-  if ($daemon_cmd != undef) {
-    Anchor["hdp-yarn::service::${name}::begin"] -> Hdp::Directory_recursive_create<|title == $pid_dir or title == $log_dir|> -> File[$job_summary_log] -> Hdp::Exec[$daemon_cmd] -> Anchor["hdp-yarn::service::${name}::end"]
-
-  }
-  if ($ensure == 'running') {
-    #TODO: look at Puppet resource retry and retry_sleep
-    #TODO: can make sleep contingent on $name
-    $sleep = 5
-    $post_check = "sleep ${sleep}; ${service_is_up}"
-    hdp::exec { $post_check:
-      command => $post_check,
-      unless  => $service_is_up
-    }
-    Hdp::Exec[$daemon_cmd] -> Hdp::Exec[$post_check] -> Anchor["hdp-yarn::service::${name}::end"]
-  }  
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/smoketest.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/smoketest.pp b/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/smoketest.pp
deleted file mode 100644
index e2faef4..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/smoketest.pp
+++ /dev/null
@@ -1,82 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-define hdp-yarn::smoketest(
-  $component_name = undef
-)
-{
-  $rm_webui_address = $hdp-yarn::params::rm_webui_address
-  $rm_webui_https_address = $hdp-yarn::params::rm_webui_https_address
-  $nm_webui_address = $hdp-yarn::params::nm_webui_address
-  $hs_webui_address = $hdp-yarn::params::hs_webui_address
-  
-  $hadoop_ssl_enabled = $hdp-hadoop::params::hadoop_ssl_enabled
-
-  if ($component_name == 'resourcemanager') {
-    $component_type = 'rm'
-    if ($hadoop_ssl_enabled == "true") {
-      $component_address = $rm_webui_https_address
-    } else {
-      $component_address = $rm_webui_address
-    }
-  } elsif ($component_name == 'nodemanager') {
-    $component_type = 'nm'
-    $component_address = $nm_webui_address
-  } elsif ($component_name == 'historyserver') {
-    $component_type = 'hs'
-    $component_address = $hs_webui_address
-  } else {
-    hdp_fail("Unsupported component name: $component_name")
-  }
-
-  $security_enabled = $hdp::params::security_enabled
-  $smoke_user_keytab = $hdp::params::smokeuser_keytab
-  $smoke_test_user = $hdp::params::smokeuser
-  $kinit_cmd = "${hdp::params::kinit_path_local} -kt ${smoke_user_keytab} ${smoke_test_user};"
-
-
-  $validateStatusFileName = "validateYarnComponentStatus.py"
-  $validateStatusFilePath = "/tmp/$validateStatusFileName"
-
-  $validateStatusCmd = "$validateStatusFilePath $component_type -p $component_address -s $hadoop_ssl_enabled"
-
-    if ($security_enabled == true) {
-         $smoke_cmd = "${kinit_cmd}  $validateStatusCmd"
-        } else {
-          $smoke_cmd = $validateStatusCmd
-        }
-
-
-  file { $validateStatusFilePath:
-    ensure => present,
-    source => "puppet:///modules/hdp-yarn/$validateStatusFileName",
-    mode => '0755'
-  }
-
-  exec { $validateStatusFilePath:
-    command   => $smoke_cmd,
-    tries     => 3,
-    try_sleep => 5,
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    logoutput => "true",
-    user     =>  $smoke_test_user
-}
-  anchor{"hdp-yarn::smoketest::begin":} -> File[$validateStatusFilePath] -> Exec[$validateStatusFilePath] -> anchor{"hdp-yarn::smoketest::end":}
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/yarn/service_check.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/yarn/service_check.pp b/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/yarn/service_check.pp
deleted file mode 100644
index 4f2fffe..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/yarn/service_check.pp
+++ /dev/null
@@ -1,38 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-yarn::yarn::service_check() inherits hdp-yarn::params
-{
-
-  $jar_path = "$hadoop_yarn_home/$distrAppJarName"
-  $run_yarn_check_cmd = "node -list"
-  
-  ## Check availability of REST api
-  hdp-yarn::smoketest{'hdp-yarn::smoketest:rm': component_name => 'resourcemanager'}
-  
-  ## Run distributed shell application check
-  hdp-hadoop::exec-hadoop { 'hdp-yarn::yarn::service_check':
-    path        => '/usr/bin/yarn',
-    command     => $run_yarn_check_cmd,
-    user        => $smoke_test_user
-  }
-  
-  anchor{"hdp-yarn::yarn::service_check::begin":} -> Hdp-yarn::Smoketest['hdp-yarn::smoketest:rm'] ->  Hdp-hadoop::Exec-hadoop['hdp-yarn::yarn::service_check'] -> anchor{"hdp-yarn::yarn::service_check::end":}
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/yarn_client.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/yarn_client.pp b/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/yarn_client.pp
deleted file mode 100644
index 8914233..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-yarn/manifests/yarn_client.pp
+++ /dev/null
@@ -1,34 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-yarn::yarn_client(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-yarn::params
-{  
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in 'installed_and_configured') {
-  
-    include hdp-yarn::initialize
-
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-yarn/templates/container-executor.cfg.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-yarn/templates/container-executor.cfg.erb b/ambari-agent/src/main/puppet/modules/hdp-yarn/templates/container-executor.cfg.erb
deleted file mode 100644
index c6a2479..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-yarn/templates/container-executor.cfg.erb
+++ /dev/null
@@ -1,22 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-yarn.nodemanager.local-dirs=<%=scope.function_hdp_default(["yarn-site/yarn.nodemanager.local-dirs","/hadoop/yarn"])%>
-yarn.nodemanager.log-dirs=<%=scope.function_hdp_default(["yarn-site/yarn.nodemanager.log-dirs","/var/log/hadoop/yarn"])%>
-yarn.nodemanager.linux-container-executor.group=<%=scope.function_hdp_default(["yarn-site/yarn.nodemanager.linux-container-executor.group","hadoop"])%>
-banned.users = hdfs,yarn,mapred,bin
-min.user.id=1000

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-yarn/templates/mapreduce.conf.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-yarn/templates/mapreduce.conf.erb b/ambari-agent/src/main/puppet/modules/hdp-yarn/templates/mapreduce.conf.erb
deleted file mode 100644
index 222938e..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-yarn/templates/mapreduce.conf.erb
+++ /dev/null
@@ -1,17 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-<%=scope.function_hdp_template_var("mapred_user")%>   - nofile 32768
-<%=scope.function_hdp_template_var("mapred_user")%>   - nproc  65536

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-yarn/templates/yarn-env.sh.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-yarn/templates/yarn-env.sh.erb b/ambari-agent/src/main/puppet/modules/hdp-yarn/templates/yarn-env.sh.erb
deleted file mode 100644
index b9e9398..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-yarn/templates/yarn-env.sh.erb
+++ /dev/null
@@ -1,119 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-export HADOOP_YARN_HOME=<%=scope.function_hdp_template_var("hadoop_yarn_home")%>
-export YARN_LOG_DIR=<%=scope.function_hdp_template_var("yarn_log_dir_prefix")%>/$USER
-export YARN_PID_DIR=<%=scope.function_hdp_template_var("yarn_pid_dir_prefix")%>/$USER
-export HADOOP_LIBEXEC_DIR=<%=scope.function_hdp_template_var("hadoop_libexec_dir")%>
-export JAVA_HOME=<%=scope.function_hdp_template_var("::hdp::params::java64_home")%>
-
-# User for YARN daemons
-export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
-
-# resolve links - $0 may be a softlink
-export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}"
-
-# some Java parameters
-# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
-if [ "$JAVA_HOME" != "" ]; then
-  #echo "run java in $JAVA_HOME"
-  JAVA_HOME=$JAVA_HOME
-fi
-
-if [ "$JAVA_HOME" = "" ]; then
-  echo "Error: JAVA_HOME is not set."
-  exit 1
-fi
-
-JAVA=$JAVA_HOME/bin/java
-JAVA_HEAP_MAX=-Xmx1000m
-
-# For setting YARN specific HEAP sizes please use this
-# Parameter and set appropriately
-YARN_HEAPSIZE=<%=scope.function_hdp_template_var("yarn_heapsize")%>
-
-# check envvars which might override default args
-if [ "$YARN_HEAPSIZE" != "" ]; then
-  JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
-fi
-
-# Resource Manager specific parameters
-
-# Specify the max Heapsize for the ResourceManager using a numerical value
-# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
-# the value to 1000.
-# This value will be overridden by an Xmx setting specified in either YARN_OPTS
-# and/or YARN_RESOURCEMANAGER_OPTS.
-# If not specified, the default value will be picked from either YARN_HEAPMAX
-# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
-export YARN_RESOURCEMANAGER_HEAPSIZE=<%=scope.function_hdp_template_var("resourcemanager_heapsize")%>
-
-# Specify the JVM options to be used when starting the ResourceManager.
-# These options will be appended to the options specified as YARN_OPTS
-# and therefore may override any similar flags set in YARN_OPTS
-#export YARN_RESOURCEMANAGER_OPTS=
-
-# Node Manager specific parameters
-
-# Specify the max Heapsize for the NodeManager using a numerical value
-# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
-# the value to 1000.
-# This value will be overridden by an Xmx setting specified in either YARN_OPTS
-# and/or YARN_NODEMANAGER_OPTS.
-# If not specified, the default value will be picked from either YARN_HEAPMAX
-# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
-export YARN_NODEMANAGER_HEAPSIZE=<%=scope.function_hdp_template_var("nodemanager_heapsize")%>
-
-# Specify the JVM options to be used when starting the NodeManager.
-# These options will be appended to the options specified as YARN_OPTS
-# and therefore may override any similar flags set in YARN_OPTS
-#export YARN_NODEMANAGER_OPTS=
-
-# so that filenames w/ spaces are handled correctly in loops below
-IFS=
-
-
-# default log directory & file
-if [ "$YARN_LOG_DIR" = "" ]; then
-  YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
-fi
-if [ "$YARN_LOGFILE" = "" ]; then
-  YARN_LOGFILE='yarn.log'
-fi
-
-# default policy file for service-level authorization
-if [ "$YARN_POLICYFILE" = "" ]; then
-  YARN_POLICYFILE="hadoop-policy.xml"
-fi
-
-# restore ordinary behaviour
-unset IFS
-
-
-YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
-YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
-YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
-YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
-YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME"
-YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING"
-YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
-YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
-if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
-  YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
-fi
-YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-yarn/templates/yarn.conf.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-yarn/templates/yarn.conf.erb b/ambari-agent/src/main/puppet/modules/hdp-yarn/templates/yarn.conf.erb
deleted file mode 100644
index bb0c951..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-yarn/templates/yarn.conf.erb
+++ /dev/null
@@ -1,17 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-<%=scope.function_hdp_template_var("yarn_user")%>   - nofile 32768
-<%=scope.function_hdp_template_var("yarn_user")%>   - nproc  65536

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-zookeeper/files/zkEnv.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-zookeeper/files/zkEnv.sh b/ambari-agent/src/main/puppet/modules/hdp-zookeeper/files/zkEnv.sh
deleted file mode 100644
index 07017e1..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-zookeeper/files/zkEnv.sh
+++ /dev/null
@@ -1,96 +0,0 @@
-#!/bin/sh
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This script should be sourced into other zookeeper
-# scripts to setup the env variables
-
-# We use ZOOCFGDIR if defined,
-# otherwise we use /etc/zookeeper
-# or the conf directory that is
-# a sibling of this script's directory
-if [ "x$ZOOCFGDIR" = "x" ]
-then
-    if [ -d "/etc/zookeeper" ]
-    then
-        ZOOCFGDIR="/etc/zookeeper"
-    else
-        ZOOCFGDIR="$ZOOBINDIR/../conf"
-    fi
-fi
-
-if [ "x$ZOOCFG" = "x" ]
-then
-    ZOOCFG="zoo.cfg"
-fi
-
-ZOOCFG="$ZOOCFGDIR/$ZOOCFG"
-
-if [ -e "$ZOOCFGDIR/zookeeper-env.sh" ]
-then
-    . "$ZOOCFGDIR/zookeeper-env.sh"
-fi
-
-if [ "x${ZOO_LOG_DIR}" = "x" ]
-then
-    ZOO_LOG_DIR="."
-fi
-
-if [ "x${ZOO_LOG4J_PROP}" = "x" ]
-then
-    ZOO_LOG4J_PROP="INFO,CONSOLE"
-fi
-
-#add the zoocfg dir to classpath
-CLASSPATH="$ZOOCFGDIR:$CLASSPATH"
-
-for i in "$ZOOBINDIR"/../src/java/lib/*.jar
-do
-    CLASSPATH="$i:$CLASSPATH"
-done
-
-#make it work in the release
-for i in "$ZOOBINDIR"/../lib/*.jar
-do
-    CLASSPATH="$i:$CLASSPATH"
-done
-
-#make it work in the release
-for i in "$ZOOBINDIR"/../zookeeper-*.jar
-do
-    CLASSPATH="$i:$CLASSPATH"
-done
-
-#make it work for developers
-for d in "$ZOOBINDIR"/../build/lib/*.jar
-do
-   CLASSPATH="$d:$CLASSPATH"
-done
-
-#make it work for developers
-CLASSPATH="$ZOOBINDIR/../build/classes:$CLASSPATH"
-
-case "`uname`" in
-    CYGWIN*) cygwin=true ;;
-    *) cygwin=false ;;
-esac
-
-if $cygwin
-then
-    CLASSPATH=`cygpath -wp "$CLASSPATH"`
-fi
-
-#echo "CLASSPATH=$CLASSPATH"

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-zookeeper/files/zkServer.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-zookeeper/files/zkServer.sh b/ambari-agent/src/main/puppet/modules/hdp-zookeeper/files/zkServer.sh
deleted file mode 100644
index 49ceb4d..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-zookeeper/files/zkServer.sh
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/bin/sh
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# If this scripted is run out of /usr/bin or some other system bin directory
-# it should be linked to and not copied. Things like java jar files are found
-# relative to the canonical path of this script.
-#
-
-# See the following page for extensive details on setting
-# up the JVM to accept JMX remote management:
-# http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
-# by default we allow local JMX connections
-if [ "x$JMXLOCALONLY" = "x" ]
-then
-    JMXLOCALONLY=false
-fi
-
-if [ "x$JMXDISABLE" = "x" ]
-then
-    echo "JMX enabled by default"
-    # for some reason these two options are necessary on jdk6 on Ubuntu
-    #   accord to the docs they are not necessary, but otw jconsole cannot
-    #   do a local attach
-    ZOOMAIN="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.local.only=$JMXLOCALONLY org.apache.zookeeper.server.quorum.QuorumPeerMain"
-else
-    echo "JMX disabled by user request"
-    ZOOMAIN="org.apache.zookeeper.server.quorum.QuorumPeerMain"
-fi
-
-# Only follow symlinks if readlink supports it
-if readlink -f "$0" > /dev/null 2>&1
-then
-  ZOOBIN=`readlink -f "$0"`
-else
-  ZOOBIN="$0"
-fi
-ZOOBINDIR=`dirname "$ZOOBIN"`
-
-. "$ZOOBINDIR"/zkEnv.sh
-
-if [ "x$2" != "x" ]
-then
-    ZOOCFG="$ZOOCFGDIR/$2"
-fi
-
-if $cygwin
-then
-    ZOOCFG=`cygpath -wp "$ZOOCFG"`
-    # cygwin has a "kill" in the shell itself, gets confused
-    KILL=/bin/kill
-else
-    KILL=kill
-fi
-
-echo "Using config: $ZOOCFG"
-
-ZOOPIDFILE=$(grep dataDir "$ZOOCFG" | sed -e 's/.*=//')/zookeeper_server.pid
-
-
-case $1 in
-start)
-    echo  "Starting zookeeper ... "
-    $JAVA  "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
-    -cp "$CLASSPATH" $JVMFLAGS $ZOOMAIN "$ZOOCFG" &
-    /bin/echo -n $! > "$ZOOPIDFILE"
-    echo STARTED
-    ;;
-stop)
-    echo "Stopping zookeeper ... "
-    if [ ! -f "$ZOOPIDFILE" ]
-    then
-    echo "error: could not find file $ZOOPIDFILE"
-    exit 1
-    else
-    $KILL -9 $(cat "$ZOOPIDFILE")
-    rm "$ZOOPIDFILE"
-    echo STOPPED
-    fi
-    ;;
-upgrade)
-    shift
-    echo "upgrading the servers to 3.*"
-    java "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
-    -cp "$CLASSPATH" $JVMFLAGS org.apache.zookeeper.server.upgrade.UpgradeMain ${@}
-    echo "Upgrading ... "
-    ;;
-restart)
-    shift
-    "$0" stop ${@}
-    sleep 3
-    "$0" start ${@}
-    ;;
-status)
-    STAT=`echo stat | nc localhost $(grep clientPort "$ZOOCFG" | sed -e 's/.*=//') 2> /dev/null| grep Mode`
-    if [ "x$STAT" = "x" ]
-    then
-        echo "Error contacting service. It is probably not running."
-    else
-        echo $STAT
-    fi
-    ;;
-*)
-    echo "Usage: $0 {start|stop|restart|status}" >&2
-
-esac

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-zookeeper/files/zkService.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-zookeeper/files/zkService.sh b/ambari-agent/src/main/puppet/modules/hdp-zookeeper/files/zkService.sh
deleted file mode 100644
index 32dfce4..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-zookeeper/files/zkService.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-zkcli_script=$1
-user=$2
-conf_dir=$3
-su - $user -c "source $conf_dir/zookeeper-env.sh ; echo 'ls /' | $zkcli_script"

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-zookeeper/files/zkSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-zookeeper/files/zkSmoke.sh b/ambari-agent/src/main/puppet/modules/hdp-zookeeper/files/zkSmoke.sh
deleted file mode 100755
index 00350ff..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-zookeeper/files/zkSmoke.sh
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-smoke_script=$1
-smoke_user=$2
-conf_dir=$3
-client_port=$4
-security_enabled=$5
-kinit_path_local=$6
-smoke_user_keytab=$7
-export ZOOKEEPER_EXIT_CODE=0
-test_output_file=/tmp/zkSmoke.out
-errors_expr="ERROR|Exception"
-acceptable_expr="SecurityException"
-zkhosts=` grep "^server\.[[:digit:]]"  $conf_dir/zoo.cfg  | cut -f 2 -d '=' | cut -f 1 -d ':' | tr '\n' ' ' `
-zk_node1=`echo $zkhosts | tr ' ' '\n' | head -n 1`  
-echo "zk_node1=$zk_node1"
-if [[ $security_enabled == "true" ]]; then
-  kinitcmd="$kinit_path_local -kt $smoke_user_keytab $smoke_user"
-  su - $smoke_user -c "$kinitcmd"
-fi
-
-function verify_output() {
-  if [ -f $test_output_file ]; then
-    errors=`grep -E $errors_expr $test_output_file | grep -v $acceptable_expr`
-    if [ "$?" -eq 0 ]; then
-      echo "Error found in the zookeeper smoke test. Exiting."
-      echo $errors
-      exit 1
-    fi
-  fi
-}
-
-# Delete /zk_smoketest znode if exists
-su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ;  echo delete /zk_smoketest | ${smoke_script} -server $zk_node1:$client_port" 2>&1>$test_output_file
-# Create /zk_smoketest znode on one zookeeper server
-su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo create /zk_smoketest smoke_data | ${smoke_script} -server $zk_node1:$client_port" 2>&1>>$test_output_file
-verify_output
-
-for i in $zkhosts ; do
-  echo "Running test on host $i"
-  # Verify the data associated with znode across all the nodes in the zookeeper quorum
-  su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'get /zk_smoketest' | ${smoke_script} -server $i:$client_port"
-  su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'ls /' | ${smoke_script} -server $i:$client_port"
-  output=$(su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'get /zk_smoketest' | ${smoke_script} -server $i:$client_port")
-  echo $output | grep smoke_data
-  if [[ $? -ne 0 ]] ; then
-    echo "Data associated with znode /zk_smoketests is not consistent on host $i"
-    ((ZOOKEEPER_EXIT_CODE=$ZOOKEEPER_EXIT_CODE+1))
-  fi
-done
-
-su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'delete /zk_smoketest' | ${smoke_script} -server $zk_node1:$client_port"
-if [[ "$ZOOKEEPER_EXIT_CODE" -ne "0" ]] ; then
-  echo "Zookeeper Smoke Test: Failed" 
-else
-   echo "Zookeeper Smoke Test: Passed" 
-fi
-exit $ZOOKEEPER_EXIT_CODE

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/client.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/client.pp b/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/client.pp
deleted file mode 100644
index 23eb15b..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/client.pp
+++ /dev/null
@@ -1,47 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-zookeeper::client(
-  $service_state = $hdp::params::cluster_client_state
-) inherits hdp::params
-{
-  $package_type = $hdp::params::packages
-
-  if ($service_state == 'no_op') {
-  } elsif  ($service_state in ['installed_and_configured','uninstalled']) {
-      if ($package_type == 'hdp') {
-        $cmd = "ln -s /usr/libexec/zkEnv.sh /usr/bin/zkEnv.sh"
-        $test = "test -e /usr/bin/zkEnv.sh"
-        hdp::exec { $cmd :
-           command => $cmd,
-           unless  => $test,
-           require => Class['hdp-zookeeper']
-        }
-      } 
-      if ($hdp::params::service_exists['hdp-zookeeper'] != true) {
-        class { 'hdp-zookeeper' : 
-         type => 'client',
-         service_state => $service_state
-        } 
-      }
-    } else {
-   hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/init.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/init.pp b/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/init.pp
deleted file mode 100644
index 6540f96..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/init.pp
+++ /dev/null
@@ -1,142 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-zookeeper(
-  $type = server,
-  $service_state = $hdp::params::cluster_service_state,
-  $myid = 1,
-  $opts = {}
-) inherits hdp-zookeeper::params 
-{
-
- if ($service_state == 'no_op') {
-   if ($type == 'server') {
-     $hdp::params::service_exists['hdp-zookeeper'] = true
-  }
- } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
-   $zk_user = $hdp-zookeeper::params::zk_user
-   $zk_config_dir = $hdp-zookeeper::params::conf_dir
- 
-   anchor{'hdp-zookeeper::begin':}
-   anchor{'hdp-zookeeper::end':}
-
-   if ($service_state == 'uninstalled') {
-     if ($type == 'server') {
-       $hdp::params::service_exists['hdp-zookeeper'] = true
-    }
-     hdp::package { 'zookeeper':
-       ensure => 'uninstalled'
-     }
-     hdp::directory_recursive_create { $zk_config_dir:
-       service_state => $service_state,
-       force => true
-     }
-
-     if ($type == 'server') {
-        class { 'hdp-zookeeper::service':
-          ensure => $service_state,
-          myid   => $myid
-        }
-       }
-
-     if ($type == 'server') {
-       Anchor['hdp-zookeeper::begin'] -> Hdp::Package['zookeeper'] -> Hdp::Directory_recursive_create[$zk_config_dir] -> Class['hdp-zookeeper::service']  -> Anchor['hdp-zookeeper::end']
-     } else {
-       Anchor['hdp-zookeeper::begin'] -> Hdp::Package['zookeeper'] -> Hdp::Directory_recursive_create[$zk_config_dir] -> Anchor['hdp-zookeeper::end']
-     }
-   } else {
-     hdp::package { 'zookeeper':}
-
-     hdp::user{ 'zk_user':
-       user_name => $zk_user
-     }
-
-     hdp::directory_recursive_create { $zk_config_dir: 
-      service_state => $service_state,
-      force => true,
-      owner => $zk_user
-     }
-
-     hdp-zookeeper::configfile { ['zoo.cfg','zookeeper-env.sh','configuration.xsl']: }
-
-     if ($service_state == 'installed_and_configured') {
-       hdp-zookeeper::configfile { 'log4j.properties': }
-     }
- 
-     if ($hdp::params::update_zk_shell_files == true) {
-       hdp-zookeeper::shell_file{ ['zkServer.sh','zkEnv.sh']: }
-     }
-
-     if ($type == 'server') {
-       $hdp::params::service_exists['hdp-zookeeper'] = true
-       class { 'hdp-zookeeper::service': 
-         ensure => $service_state,
-         myid   => $myid
-       }
-      }
-
-      if ($security_enabled == true) {
-        if ($type == 'server') {
-          hdp-zookeeper::configfile { 'zookeeper_jaas.conf' : }
-          hdp-zookeeper::configfile { 'zookeeper_client_jaas.conf' : }
-        } else {
-          hdp-zookeeper::configfile { 'zookeeper_client_jaas.conf' : }
-        }
-      }
-
-     file { "${zk_config_dir}/zoo_sample.cfg":
-       owner => $zk_user,
-       group => $hdp::params::user_group
-     }
-
-      Anchor['hdp-zookeeper::begin'] -> Hdp::Package['zookeeper'] -> Hdp::User['zk_user'] -> 
-        Hdp::Directory_recursive_create[$zk_config_dir] -> Hdp-zookeeper::Configfile<||> -> File["${zk_config_dir}/zoo_sample.cfg"] -> Anchor['hdp-zookeeper::end']
-      if ($type == 'server') {
-        Hdp::Directory_recursive_create[$zk_config_dir] -> Hdp-zookeeper::Configfile<||> -> Class['hdp-zookeeper::service'] -> Anchor['hdp-zookeeper::end']
-      }
-      if ($hdp::params::update_zk_shell_files == true) {
-        Hdp::Package['zookeeper'] -> Hdp-zookeeper::Shell_file<||> -> Anchor['hdp-zookeeper::end']
-      }
-    }
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
-
-### config files
-define hdp-zookeeper::configfile(
-  $mode = undef
-) 
-{
-  hdp::configfile { "${hdp-zookeeper::params::conf_dir}/${name}":
-    component       => 'zookeeper',
-    owner           => $hdp-zookeeper::params::zk_user,
-    mode            => $mode
-  }
-}
-
-### 
-define hdp-zookeeper::shell_file()
-{
-  file { "${hdp::params::zk_bin}/${name}":
-    source => "puppet:///modules/hdp-zookeeper/${name}", 
-    mode => '0755'
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/params.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/params.pp b/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/params.pp
deleted file mode 100644
index 1102bf6..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/params.pp
+++ /dev/null
@@ -1,45 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-zookeeper::params() inherits hdp::params 
-{
-  $conf_dir = $hdp::params::zk_conf_dir
-  $zk_user = $hdp::params::zk_user
-  $hostname = $hdp::params::hostname
-  
-  $zk_log_dir = hdp_default("zk_log_dir","/var/log/zookeeper")
-  $zk_data_dir = hdp_default("zk_data_dir","/var/lib/zookeeper/data")
-  $zk_pid_dir = hdp_default("zk_pid_dir","/var/run/zookeeper")
-  $zk_pid_file = "${zk_pid_dir}/zookeeper_server.pid"
-  $zk_server_heapsize = hdp_default("zk_server_heapsize","-Xmx1024m")
-
-  $tickTime = hdp_default("tickTime","2000")
-  $initLimit = hdp_default("initLimit","10")
-  $syncLimit = hdp_default("syncLimit","5")
-  $clientPort = hdp_default("clientPort","2181")
-
-  $zk_primary_name = hdp_default("zookeeper_primary_name", "zookeeper")
-  $zk_principal_name = hdp_default("zookeeper_principal_name", "zookeeper/_HOST@EXAMPLE.COM")
-  $zk_principal = regsubst($zk_principal_name, '_HOST', $hostname)
-
-  $zk_keytab_path = hdp_default("zookeeper_keytab_path", "${keytab_path}/zk.service.keytab")
-  $zk_server_jaas_file = hdp_default("zk_server_jaas_conf_file", "${conf_dir}/zookeeper_jaas.conf")
-  $zk_client_jaas_file = hdp_default("zk_client_jaas_conf_file", "${conf_dir}/zookeeper_client_jaas.conf")
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/quorum/service_check.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/quorum/service_check.pp b/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/quorum/service_check.pp
deleted file mode 100644
index 159f225..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/quorum/service_check.pp
+++ /dev/null
@@ -1,61 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-zookeeper::quorum::service_check()
-{
-  include hdp-zookeeper::params
-  $conf_dir = $hdp-zookeeper::params::conf_dir
-  $security_enabled = $hdp::params::security_enabled
-  $smoke_test_user = $hdp::params::smokeuser
-  $kinit_path = $hdp::params::kinit_path_local
-  $smoke_user_keytab = $hdp::params::smokeuser_keytab
-  $smoke_script = $hdp::params::zk_smoke_test_script
-  $quorum_smoke_shell_files = ['zkSmoke.sh']
-
-  anchor { 'hdp-zookeeper::quorum::service_check::begin':}
-
-  hdp-zookeeper::quorum_smoke_shell_file { $quorum_smoke_shell_files: }
-
-  anchor{ 'hdp-zookeeper::quorum::service_check::end':}
-}
-
-define hdp-zookeeper::quorum_smoke_shell_file()
-{
-  $conf_dir = $hdp-zookeeper::params::conf_dir
-  $smoke_test_user = $hdp::params::smokeuser
-  $smoke_script = $hdp::params::zk_smoke_test_script
-  $smoke_user_keytab = $hdp::params::smokeuser_keytab
-  $kinit_path = $hdp::params::kinit_path_local
-  $security_enabled =  $hdp::params::security_enabled
-  file { '/tmp/zkSmoke.sh':
-    ensure => present,
-    source => "puppet:///modules/hdp-zookeeper/zkSmoke.sh",
-    mode => '0755'
-  }
-
-  exec { '/tmp/zkSmoke.sh':
-   command   => "sh /tmp/zkSmoke.sh ${smoke_script} ${smoke_test_user} ${conf_dir} ${::clientPort} ${security_enabled} ${kinit_path} ${smoke_user_keytab}",
-    tries     => 3,
-    try_sleep => 5,
-    require   => File['/tmp/zkSmoke.sh'],
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    logoutput => "true"
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/service.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/service.pp b/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/service.pp
deleted file mode 100644
index 0456520..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/service.pp
+++ /dev/null
@@ -1,96 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-zookeeper::service(
-  $ensure = $hdp::params::cluster_service_state,
-  $myid
-)
-{
-  include $hdp-zookeeper::params
-  $user = $hdp-zookeeper::params::zk_user
-  $conf_dir = $hdp-zookeeper::params::conf_dir
-  $zk_bin = $hdp::params::zk_bin
-  $cmd = "env ZOOCFGDIR=${conf_dir} ZOOCFG=zoo.cfg ${zk_bin}/zkServer.sh"
-
-  $pid_file = $hdp-zookeeper::params::zk_pid_file  
-
-  if ($ensure == 'running') {
-    $daemon_cmd = "su - ${user} -c  'source ${conf_dir}/zookeeper-env.sh ; ${cmd} start'"
-    $no_op_test = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
-    #not using $no_op_test = "su - ${user} -c  '${cmd} status'" because checks more than whether there is a service started up
-  } elsif ($ensure == 'stopped') {
-    $daemon_cmd = "su - ${user} -c  'source ${conf_dir}/zookeeper-env.sh ; ${cmd} stop' && rm -f ${pid_file}"
-    #TODO: put in no_op_test for stopped
-    $no_op_test = undef
-  } else {
-    $daemon_cmd = undef
-  }
-  hdp::directory_recursive_create { $hdp-zookeeper::params::zk_pid_dir: 
-    owner        => $user,
-    context_tag => 'zk_service',
-    service_state => $ensure,
-    force => true
-  }
-  hdp::directory_recursive_create { $hdp-zookeeper::params::zk_log_dir: 
-    owner        => $user,
-    context_tag => 'zk_service',
-    service_state => $ensure,
-    force => true
-  }
-   hdp::directory_recursive_create { $hdp-zookeeper::params::zk_data_dir: 
-    owner        => $user,
-    context_tag => 'zk_service',
-    service_state => $ensure,
-    force => true
-  }
-  
-  if ($daemon_cmd != undef) {
-    hdp::exec { $daemon_cmd:
-      command => $daemon_cmd,
-      unless  => $no_op_test,
-      initial_wait => $initial_wait
-    }
-  }
-
-  if ($ensure == 'uninstalled') {
-    anchor{'hdp-zookeeper::service::begin':} -> Hdp::Directory_recursive_create<|context_tag == 'zk_service'|> ->  anchor{'hdp-zookeeper::service::end':}
-  } else {
-    class { 'hdp-zookeeper::set_myid': myid => $myid}
-
-    anchor{'hdp-zookeeper::service::begin':} -> Hdp::Directory_recursive_create<|context_tag == 'zk_service'|> -> 
-    Class['hdp-zookeeper::set_myid'] -> anchor{'hdp-zookeeper::service::end':}
-
-    if ($daemon_cmd != undef) {
-      Class['hdp-zookeeper::set_myid'] -> Hdp::Exec[$daemon_cmd] -> Anchor['hdp-zookeeper::service::end']
-    }
-  }
-}
-
-class hdp-zookeeper::set_myid($myid)
-{
-  file {"${hdp-zookeeper::params::zk_data_dir}/myid":
-    ensure  => file,
-    content => $myid,
-    mode    => 0644,
-  }
-}
-
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/zookeeper/service_check.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/zookeeper/service_check.pp b/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/zookeeper/service_check.pp
deleted file mode 100644
index 283c6ab..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/zookeeper/service_check.pp
+++ /dev/null
@@ -1,54 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-zookeeper::zookeeper::service_check()
-{
-  include hdp-zookeeper::params
-  $conf_dir = $hdp-zookeeper::params::conf_dir
-  $smoke_script = $hdp::params::zk_smoke_test_script
-  $security_enabled = $hdp::params::security_enabled
-  $smoke_test_user = $hdp::params::smokeuser
-  $zookeeper_smoke_shell_files = ['zkService.sh']
-  $kinit_path = $hdp::params::kinit_path_local
-  $smoke_user_keytab = $hdp::params::smokeuser_keytab
-  anchor { 'hdp-zookeeper::zookeeper::service_check::begin':}
-
-  hdp-zookeeper::zookeeper_smoke_shell_file { $zookeeper_smoke_shell_files: }
-
-  anchor{ 'hdp-zookeeper::zookeeper::service_check::end':}
-}
-
-define hdp-zookeeper::zookeeper_smoke_shell_file()
-{
-  file { '/tmp/zkService.sh':
-    ensure => present,
-    source => "puppet:///modules/hdp-zookeeper/zkService.sh",
-    mode => '0755'
-  }
-
-  exec { '/tmp/zkService.sh':
-    command   => "sh /tmp/zkSmoke.sh ${smoke_script} ${smoke_test_user} ${conf_dir} ${clientPort} ${security_enabled} ${kinit_path} ${smoke_user_keytab}",
-    tries     => 3,
-    try_sleep => 5,
-    require   => File['/tmp/zkService.sh'],
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    logoutput => "true"
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-zookeeper/templates/configuration.xsl.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-zookeeper/templates/configuration.xsl.erb b/ambari-agent/src/main/puppet/modules/hdp-zookeeper/templates/configuration.xsl.erb
deleted file mode 100644
index c003ba2..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-zookeeper/templates/configuration.xsl.erb
+++ /dev/null
@@ -1,24 +0,0 @@
-<?xml version="1.0"?>
-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
-<xsl:output method="html"/>
-<xsl:template match="configuration">
-<html>
-<body>
-<table border="1">
-<tr>
- <td>name</td>
- <td>value</td>
- <td>description</td>
-</tr>
-<xsl:for-each select="property">
-  <tr>
-     <td><a name="{name}"><xsl:value-of select="name"/></a></td>
-     <td><xsl:value-of select="value"/></td>
-     <td><xsl:value-of select="description"/></td>
-  </tr>
-</xsl:for-each>
-</table>
-</body>
-</html>
-</xsl:template>
-</xsl:stylesheet>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-zookeeper/templates/log4j.properties.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-zookeeper/templates/log4j.properties.erb b/ambari-agent/src/main/puppet/modules/hdp-zookeeper/templates/log4j.properties.erb
deleted file mode 100644
index db69564..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-zookeeper/templates/log4j.properties.erb
+++ /dev/null
@@ -1,71 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-#
-# ZooKeeper Logging Configuration
-#
-
-# Format is "<default threshold> (, <appender>)+
-
-# DEFAULT: console appender only
-log4j.rootLogger=INFO, CONSOLE
-
-# Example with rolling log file
-#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE
-
-# Example with rolling log file and tracing
-#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE
-
-#
-# Log INFO level and above messages to the console
-#
-log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
-log4j.appender.CONSOLE.Threshold=INFO
-log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
-log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
-
-#
-# Add ROLLINGFILE to rootLogger to get log file output
-#    Log DEBUG level and above messages to a log file
-log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender
-log4j.appender.ROLLINGFILE.Threshold=DEBUG
-log4j.appender.ROLLINGFILE.File=zookeeper.log
-
-# Max log file size of 10MB
-log4j.appender.ROLLINGFILE.MaxFileSize=10MB
-# uncomment the next line to limit number of backup files
-#log4j.appender.ROLLINGFILE.MaxBackupIndex=10
-
-log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout
-log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
-
-
-#
-# Add TRACEFILE to rootLogger to get log file output
-#    Log DEBUG level and above messages to a log file
-log4j.appender.TRACEFILE=org.apache.log4j.FileAppender
-log4j.appender.TRACEFILE.Threshold=TRACE
-log4j.appender.TRACEFILE.File=zookeeper_trace.log
-
-log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout
-### Notice we are including log4j's NDC here (%x)
-log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-zookeeper/templates/zoo.cfg.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-zookeeper/templates/zoo.cfg.erb b/ambari-agent/src/main/puppet/modules/hdp-zookeeper/templates/zoo.cfg.erb
deleted file mode 100644
index e3edc7c..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-zookeeper/templates/zoo.cfg.erb
+++ /dev/null
@@ -1,44 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-# The number of milliseconds of each tick
-tickTime=<%=scope.function_hdp_template_var("tickTime")%>
-# The number of ticks that the initial
-# synchronization phase can take
-initLimit=<%=scope.function_hdp_template_var("initLimit")%>
-# The number of ticks that can pass between
-# sending a request and getting an acknowledgement
-syncLimit=<%=scope.function_hdp_template_var("syncLimit")%>
-# the directory where the snapshot is stored.
-dataDir=<%=scope.function_hdp_template_var("zk_data_dir")%>
-# the port at which the clients will connect
-clientPort=<%=scope.function_hdp_template_var("clientPort")%>
-<%(scope.function_hdp_host("zookeeper_hosts")||[]).each_with_index do |host,i|-%>
-server.<%=(i+1).to_s%>=<%=host%>:2888:3888
-<% end -%>
-
-<% if scope.function_hdp_template_var("::hdp::params::security_enabled") == true %>
-authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
-jaasLoginRenew=3600000
-kerberos.removeHostFromPrincipal=true
-kerberos.removeRealmFromPrincipal=true
-<% end %>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-zookeeper/templates/zookeeper-env.sh.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-zookeeper/templates/zookeeper-env.sh.erb b/ambari-agent/src/main/puppet/modules/hdp-zookeeper/templates/zookeeper-env.sh.erb
deleted file mode 100644
index f0c18f7..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-zookeeper/templates/zookeeper-env.sh.erb
+++ /dev/null
@@ -1,25 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-export JAVA_HOME=<%=scope.function_hdp_template_var("::hdp::params::java64_home")%>
-export ZOO_LOG_DIR=<%=scope.function_hdp_template_var("zk_log_dir")%>
-export ZOOPIDFILE=<%=scope.function_hdp_template_var("zk_pid_file")%>
-export SERVER_JVMFLAGS=<%=scope.function_hdp_template_var("zk_server_heapsize")%>
-export JAVA=$JAVA_HOME/bin/java
-export CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*
-
-<% if scope.function_hdp_template_var("::hdp::params::security_enabled") == true %>
-export SERVER_JVMFLAGS="$SERVER_JVMFLAGS -Djava.security.auth.login.config=<%=scope.function_hdp_template_var("::hdp-zookeeper::params::zk_server_jaas_file")%>"
-export CLIENT_JVMFLAGS="$CLIENT_JVMFLAGS -Djava.security.auth.login.config=<%=scope.function_hdp_template_var("::hdp-zookeeper::params::zk_client_jaas_file")%>"
-<% end %>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-zookeeper/templates/zookeeper_client_jaas.conf.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-zookeeper/templates/zookeeper_client_jaas.conf.erb b/ambari-agent/src/main/puppet/modules/hdp-zookeeper/templates/zookeeper_client_jaas.conf.erb
deleted file mode 100644
index 696718e..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-zookeeper/templates/zookeeper_client_jaas.conf.erb
+++ /dev/null
@@ -1,5 +0,0 @@
-Client {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=false
-useTicketCache=true;
-};


[03/15] AMBARI-5747 Remove facter-1.6.10, Ruby, Puppet dependencies from pom.xml and src (dsen)

Posted by ds...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/manifests/params.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/manifests/params.pp b/ambari-agent/src/main/puppet/modules/hdp/manifests/params.pp
deleted file mode 100644
index 6e7f7fb..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/manifests/params.pp
+++ /dev/null
@@ -1,794 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp::params()
-{
-
-  ##Constants##
-  $NOTHING='NOTHING'
-  $NOBODY_USER='nobody'
-
-  ###### environment variables
-  if (hdp_is_empty($configuration) == false) {
-    $core-site = $configuration['core-site']
-    $hbase-site = $configuration['hbase-site']
-    $hdfs-site = $configuration['hdfs-site']
-    $hive-site = $configuration['hive-site']
-    $hue-site = $configuration['hue-site']
-    $mapred-site = $configuration['mapred-site']
-    $oozie-site = $configuration['oozie-site']
-    $sqoop-site = $configuration['sqoop-site']
-    $webhcat-site = $configuration['webhcat-site']
-    $yarn-site = $configuration['yarn-site']
-  }
-
-  ## Stack version
-  $stack_version = hdp_default("stack_version", "1.3.0")
-  if (hdp_get_major_stack_version($hdp::params::stack_version) >= 2) {
-    $isHadoop2Stack = true
-  } else {
-    $isHadoop2Stack = false
-  }
-
-  ##### global state defaults ####
-  $cluster_service_state = hdp_default("cluster_service_state","running")
-  $cluster_client_state = hdp_default("cluster_client_state","installed_and_configured")
-
-  ## Hostname defaults
-  $hostname = hdp_to_lowercase(hdp_default("myhostname", $::fqdn))
-  $public_hostname = hdp_default("public_hostname")
-
-  ##### for secure install
-  $hadoop_security_authenticatoin = hdp_default("core-site/hadoop.security.authentication", "simple")
-  
-  $security_enabled = $hadoop_security_authenticatoin ? {
-    'kerberos' => true,
-    default => false,
-  }
-
-  $hadoop_ssl_enabled = hdp_default("core-site/hadoop.ssl.enabled", false)
-
-  $kerberos_domain = hdp_default("kerberos_domain","EXAMPLE.COM")
-  $kinit_path_local = hdp_get_kinit_path(hdp_default("kinit_path_local"), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin")
-  $keytab_path = hdp_default("keytab_path", "/etc/security/keytabs")
-  $use_hostname_in_principal = hdp_default("instance_name", true)
-  $smokeuser_keytab = hdp_default("smokeuser_keytab", "/etc/security/keytabs/smokeuser.headless.keytab")
-  $hdfs_user_keytab = hdp_default("hdfs_user_keytab", "/etc/security/keytabs/hdfs.headless.keytab")
-  $hbase_user_keytab = hdp_default("hbase_user_keytab", "/etc/security/keytabs/hbase.headless.keytab")
-  $nagios_keytab_path = hdp_default("nagios_keytab_path", "/etc/security/keytabs/nagios.service.keytab")
-  $nagios_principal_name = hdp_default("nagios_principal_name", "nagios")
-
-  ###### hostnames
-  $namenode_host = hdp_default("namenode_host")
-  $snamenode_host = hdp_default("snamenode_host")
-  $jtnode_host = hdp_default("jtnode_host")
-  $slave_hosts = hdp_default("slave_hosts")
-  $journalnode_hosts = hdp_default("journalnode_hosts")
-  $zkfc_hosts = hdp_default("zkfc_hosts")
-  $rm_host = hdp_default("rm_host")
-  $nm_hosts = hdp_default("nm_hosts")
-  $hs_host = hdp_default("hs_host")
-  $zookeeper_hosts = hdp_default("zookeeper_hosts")
-  $flume_hosts = hdp_default("flume_hosts")
-
-
-  $nn_principal_str = hdp_default("hdfs-site/dfs.namenode.kerberos.principal", "nn/_HOST@EXAMPLE.COM")
-  if ("_HOST" in $nn_principal_str and hdp_is_empty($namenode_host) == false) {
-    $nn_principal = regsubst($nn_principal_str, "_HOST", hdp_to_lowercase(hdp_first_value_from_list($namenode_host)))
-  } else {
-    $nn_principal = $nn_principal_str
-  }
-  $jt_principal_str = hdp_default("mapred-site/mapreduce.jobtracker.kerberos.principal", "jt/_HOST@EXAMPLE.COM")
-  if ("_HOST" in $jt_principal_str and hdp_is_empty($jtnode_host) == false) {
-    $jt_principal = regsubst($jt_principal_str, "_HOST", hdp_to_lowercase(hdp_first_value_from_list($jtnode_host)))
-  } else {
-    $jt_principal = $jt_principal_str
-  }
-
-  $flume_port = hdp_default("flume_port", "4159")
-
-  $hbase_master_hosts = hdp_default("hbase_master_hosts", "")
-
-  #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
-  $hbase_rs_hosts = hdp_default("hbase_rs_hosts", $slave_hosts)
-
-  #if mapred_tt_hosts not given it is assumed that tasktracker servers on same nodes as slaves
-  $mapred_tt_hosts = hdp_default("mapred_tt_hosts", $slave_hosts)
-
-  $all_hosts = hdp_default("all_hosts")
-
-  $hive_server_host = hdp_default("hive_server_host", "")
-  $oozie_server =  hdp_default("oozie_server", "")
-  $webhcat_server_host = hdp_default("webhcat_server_host", "")
-  $gateway_host = hdp_default("gateway_host")
-  $hue_server_host = hdp_default("hue_server_host", "")
-  
-  $nagios_server_host = hdp_default("nagios_server_host")
-  $ganglia_server_host = hdp_default("ganglia_server_host")
-  
-  $dashboard_host = hdp_default("dashboard_host")
-
-  $hdp_os = $::operatingsystem
-  $hdp_os_version = $::operatingsystemrelease
-
-  
-  case $::operatingsystem {
-    centos: {
-      case $::operatingsystemrelease {
-        /^5\..+$/: { $hdp_os_type = "centos5" }
-        /^6\..+$/: { $hdp_os_type = "centos6" }
-      }
-    }
-    redhat: {
-      case $::operatingsystemrelease {
-        /^5\..+$/: { $hdp_os_type = "redhat5" }
-        /^6\..+$/: { $hdp_os_type = "redhat6" }
-      }
-    }
-    oraclelinux: {
-      case $::operatingsystemrelease {
-        /^5\..+$/: { $hdp_os_type = "oraclelinux5" }
-        /^6\..+$/: { $hdp_os_type = "oraclelinux6" }
-      }
-    }
-    suse: {
-      $hdp_os_type = "suse"
-    }
-    SLES: {
-      $hdp_os_type = "suse"
-    }
-
-    default: {
-      hdp_fail("No support for os $::operatingsystem  ${hdp_os} ${hdp_os_version}")
-    }
-  }
-
-  if ($hostAttributes != undef) {
-    $public_namenode_host = hdp_host_attribute($hostAttributes,"publicfqdn",$namenode_host)
-    $public_snamenode_host = hdp_host_attribute($hostAttributes,"publicfqdn",$snamenode_host)
-    $public_rm_host = hdp_host_attribute($hostAttributes,"publicfqdn",$rm_host)
-    $public_nm_hosts = hdp_host_attribute($hostAttributes,"publicfqdn",$nm_hosts)
-    $public_hs_host = hdp_host_attribute($hostAttributes,"publicfqdn",$hs_host)
-    $public_journalnode_hosts = hdp_host_attribute($hostAttributes,"publicfqdn",$journalnode_hosts)
-    $public_zkfc_hosts = hdp_host_attribute($hostAttributes,"publicfqdn",$zkfc_hosts)
-    $public_jtnode_host = hdp_host_attribute($hostAttributes,"publicfqdn",$jtnode_host)
-    $public_hbase_master_hosts = hdp_host_attribute($hostAttributes,"publicfqdn",$hbase_master_hosts)
-    $public_zookeeper_hosts = hdp_host_attribute($hostAttributes,"publicfqdn",$zookeeper_hosts)
-    $public_ganglia_server_host = hdp_host_attribute($hostAttributes,"publicfqdn",$ganglia_server_host)
-    $public_nagios_server_host = hdp_host_attribute($hostAttributes,"publicfqdn",$nagios_server_host)
-    $public_dashboard_host = hdp_host_attribute($hostAttributes,"publicfqdn",$dashboard_host)
-    $public_hive_server_host = hdp_host_attribute($hostAttributes,"publicfqdn",$hive_server_host)
-    $public_oozie_server = hdp_host_attribute($hostAttributes,"publicfqdn",$oozie_server)
-    $public_webhcat_server_host = hdp_host_attribute($hostAttributes,"publicfqdn",$webhcat_server_host)
-  } else {
-    $public_namenode_host = hdp_default("namenode_host")
-    $public_snamenode_host = hdp_default("snamenode_host")
-    $public_rm_host = hdp_default("rm_host")
-    $public_nm_hosts = hdp_default("nm_hosts")
-    $public_hs_host = hdp_default("hs_host")
-    $public_journalnode_hosts = hdp_default("journalnode_hosts")
-    $public_zkfc_hosts = hdp_default("zkfc_hosts")
-    $public_jtnode_host = hdp_default("jtnode_host")
-    $public_hbase_master_hosts = hdp_default("hbase_master_hosts")
-    $public_zookeeper_hosts = hdp_default("zookeeper_hosts")
-    $public_ganglia_server_host = hdp_default("ganglia_server_host")
-    $public_nagios_server_host = hdp_default("nagios_server_host")
-    $public_dashboard_host = hdp_default("dashboard_host")
-    $public_hive_server_host = hdp_default("hive_server_host")
-    $public_oozie_server = hdp_default("oozie_server")
-    $public_webhcat_server_host = hdp_default("webhcat_server_host")
-  }
-
-
-  ############ users
-  $user_info = hdp_default("user_info",{})
-  $defined_groups = {}
-
-  $nagios_default_user = "nagios"
-  $nagios_default_group = "nagios"
-  $nagios_user = hdp_default("nagios_user", $nagios_default_user)
-  $nagios_group = hdp_default("nagios_group",$nagios_default_group)
-
-  $hdfs_user = hdp_default("hdfs_user","hdfs")
-  $mapred_user = hdp_default("mapred_user","mapred")
-  $yarn_user = hdp_default("yarn_user","yarn")
-
-  $zk_user = hdp_default("zk_user","zookeeper") 
-  $hbase_user = hdp_default("hbase_user","hbase")
-
-  $hive_user = hdp_default("hive_user","hive")
-  $hcat_user = hdp_default("hcat_user","hcat")
-  $webhcat_user = hdp_default("webhcat_user","hcat")
-
-  $oozie_user = hdp_default("oozie_user","oozie")
-  $templeton_user = hdp_default("templeton_user","hcat")
-
-  $gmetad_user = hdp_default("gmetad_user","nobody")
-  $gmond_user = hdp_default("gmond_user","nobody")
-
-  $smokeuser = hdp_default("smokeuser","ambari_qa")
-  $smoke_user_group = hdp_default("smoke_user_group","users")
-
-  $sqoop_user = hdp_default("sqoop_user","sqoop")
-  
-  ############ Hdfs users directories
-  $oozie_hdfs_user_dir = hdp_default("oozie_hdfs_user_dir", "/user/${oozie_user}")
-  $oozie_hdfs_user_mode = 775
-  $hcat_hdfs_user_dir = hdp_default("hcat_hdfs_user_dir", "/user/${hcat_user}")
-  $hcat_hdfs_user_mode = 755
-  $webhcat_hdfs_user_dir = hdp_default("hcat_hdfs_user_dir", "/user/${webhcat_user}")
-  $webhcat_hdfs_user_mode = 755
-  $hive_hdfs_user_dir = hdp_default("hive_hdfs_user_dir", "/user/${hive_user}")
-  $hive_hdfs_user_mode = 700
-  $smoke_hdfs_user_dir = hdp_default("smoke_hdfs_user_dir", "/user/${smokeuser}")
-  $smoke_hdfs_user_mode = 770
-  
-  ############ Hdfs apps directories
-  $hive_apps_whs_dir = hdp_default("hive-site/hive.metastore.warehouse.dir", "/apps/hive/warehouse")
-  $hbase_hdfs_root_dir = hdp_get_dir_from_url(hdp_default("hbase-site/hbase.rootdir"),"/apps/hbase/data")
-  $hbase_staging_dir = hdp_default("hbase-site/hbase.bulkload.staging.dir","/apps/hbase/staging")
-
-  $yarn_nm_app_log_dir = hdp_default("yarn-site/yarn.nodemanager.remote-app-log-dir","/app-logs")
-
-  $yarn_log_aggregation_enabled = hdp_default("yarn-site/yarn.log-aggregation-enable","true")
-
-  $mapreduce_jobhistory_intermediate_done_dir = hdp_default("mapred-site/mapreduce.jobhistory.intermediate-done-dir","/mr-history/tmp")
-  
-  $mapreduce_jobhistory_done_dir = hdp_default("mapred-site/mapreduce.jobhistory.done-dir","/mr-history/done")
-  
-  $user_group = hdp_default("user_group","hadoop")
-
-  $ganglia_enabled = hdp_default("ganglia_enabled",true) 
-
-  #TODO: either remove or make conditional on ec2
-  $host_address = undef 
-
-  ##### java 
-  $java64_home = hdp_default("java_home")
-  
-  $wipeoff_data =  hdp_default("wipeoff_data",false) 
-
-  $jdk_location = hdp_default("jdk_location","http://download.oracle.com/otn-pub/java/jdk/6u31-b03")
-  $jdk_name = hdp_default("jdk_name","")
-
-  $jce_policy_zip = hdp_default("jce_name","")
-  $jce_location = hdp_default("jce_location","http://download.oracle.com/otn-pub/java/jce_policy/6")
-  $server_db_name = hdp_default("db_name", "postgres")
-  $oracle_jdbc_url = hdp_default("oracle_jdbc_url", "")
-  $mysql_jdbc_url = hdp_default("mysql_jdbc_url", "")
-  $db_driver_file = hdp_default("db_driver_filename", "")
-  $check_db_connection_jar_name = "DBConnectionVerification.jar"
-  $check_db_connection_jar = "/usr/lib/ambari-agent/${check_db_connection_jar_name}"
-  $oozie_jdbc_driver = hdp_default("oozie-site/oozie.service.JPAService.jdbc.driver", "")
-
-  #####
-  $hadoop_home = hdp_default("hadoop_home","/usr")
-  $hadoop_lib_home = hdp_default("hadoop_lib_home","/usr/lib/hadoop/lib")
-
-  #####compression related
-
-  $lzo_enabled = hdp_default("lzo_enabled",false)
-  $snappy_enabled = hdp_default("snappy_enabled",true)
-  
-  $lzo_compression_so_dirs = {
-    32 => "${hadoop_lib_home}/native/Linux-i386-32/",
-    64 => "${hadoop_lib_home}/native/Linux-amd64-64/"
-  }
-  
-  $snappy_so_src_dir = {
-    32 => "${hadoop_home}/lib",
-    64 => "${hadoop_home}/lib64"
-  }
-  $snappy_compression_so_dirs = {
-    32 => "${hadoop_lib_home}/native/Linux-i386-32/",
-    64 => "${hadoop_lib_home}/native/Linux-amd64-64/"
-  }
-
-  $lzo_tar_name = hdp_default("lzo_tar_name","hadoop-lzo-0.5.0")
-  
-  $snappy_so = hdp_default("snappy_so","libsnappy.so")
-  #####
- 
-  $exec_path = ["/bin","/usr/bin", "/usr/sbin"]
-
-  #### params used on multiple modules
-  if (hdp_get_major_stack_version($stack_version) >= 2) {
-    $dfs_data_dir = hdp_default("hdfs-site/dfs.datanode.data.dir","/tmp/hadoop-hdfs/dfs/data")
-  } else {
-    $dfs_data_dir = hdp_default("hdfs-site/dfs.data.dir","/tmp/hadoop-hdfs/dfs/data")
-  }
-
-  ### artifact dir
-  $artifact_dir = hdp_default("artifact_dir","/tmp/HDP-artifacts/")
-
-  ### artifacts download url ##
-  $apache_artifacts_download_url = hdp_default("apache_artifacts_download_url","")
-  $gpl_artifacts_download_url = hdp_default("gpl_artifacts_download_url","")
-
-  # hdfs ha settings
-  $dfs_ha_nameservices = hdp_default("hdfs-site/dfs.nameservices")
-  $dfs_ha_namenode_ids = hdp_default("hdfs-site/dfs.ha.namenodes.${dfs_ha_nameservices}")
-  if (hdp_is_empty($dfs_ha_namenode_ids) == false) {
-    $dfs_ha_namenode_ids_array_len = inline_template("<%=(dfs_ha_namenode_ids).split(',').size()%>")
-    if ($dfs_ha_namenode_ids_array_len > 1) {
-      $dfs_ha_enabled = true
-    }
-  } else {
-    $dfs_ha_enabled = false
-  }
-
-  # Directory for limits configurations
-  $limits_conf_dir = "/etc/security/limits.d"
-
-  $packages = 'bigtop' 
-  if ($packages == 'hdp') {
-    $mapred_smoke_test_script = "/usr/sbin/hadoop-validate-setup.sh"
-    $hadoop_bin = "/usr/sbin"
-    $hadoop_conf_dir = "/etc/hadoop"
-    $zk_conf_dir = "/etc/zookeeper"
-    $hbase_conf_dir = "/etc/hbase"
-    $sqoop_conf_dir = "/etc/sqoop"
-    $pig_conf_dir = "/etc/pig"
-    $oozie_conf_dir = "/etc/oozie"
-    $hadoop_jar_location = "/usr/share/hadoop"
-    $hadoop_mapred2_jar_location = "/usr/lib/hadoop-mapreduce"
-    $hbase_daemon_script = "/usr/bin/hbase-daemon.sh"
-    $use_32_bits_on_slaves = false
-    $zk_bin = '/usr/sbin'
-    $zk_smoke_test_script = '/usr/bin/zkCli.sh'
-    $update_zk_shell_files = false
-
-    $hcat_server_host = hdp_default("hcat_server_host")
-    $hcat_mysql_host = hdp_default("hcat_mysql_host")
-    $hue_conf_dir = "/etc/hue/conf"
-    $hive_conf_dir = "/etc/hive/conf"
-
-  } elsif ($packages == 'bigtop') {  
-   
-    $mapred_smoke_test_script = "/usr/lib/hadoop/sbin/hadoop-validate-setup.sh"
-
-    if (hdp_get_major_stack_version($stack_version) >= 2) {
-      $hadoop_bin = "/usr/lib/hadoop/sbin"
-      $hadoop_deps = ['hadoop','hadoop-libhdfs','hadoop-lzo', 'hadoop-lzo-native']
-    } else {
-      $hadoop_bin = "/usr/lib/hadoop/bin"
-      $hadoop_deps = ['hadoop','hadoop-libhdfs','hadoop-native','hadoop-pipes','hadoop-sbin','hadoop-lzo', 'hadoop-lzo-native']
-    }
-    $yarn_bin = "/usr/lib/hadoop-yarn/sbin"
-    $yarn_container_bin = "/usr/lib/hadoop-yarn/bin"
-    $mapred_bin = "/usr/lib/hadoop-mapreduce/sbin"
-    $hadoop_conf_dir = "/etc/hadoop/conf"
-    $yarn_conf_dir = "/etc/hadoop/conf"
-    $zk_conf_dir = "/etc/zookeeper/conf"
-    $hbase_conf_dir = "/etc/hbase/conf"
-    $sqoop_conf_dir = "/usr/lib/sqoop/conf"
-    $pig_conf_dir = "/etc/pig/conf"
-    $oozie_conf_dir = "/etc/oozie/conf"
-    $hive_conf_dir = "/etc/hive/conf"
-    $hcat_conf_dir = "/etc/hcatalog/conf"
-    $hadoop_jar_location = "/usr/lib/hadoop/"
-    $hadoop_mapred2_jar_location = "/usr/lib/hadoop-mapreduce"
-    $hbase_daemon_script = "/usr/lib/hbase/bin/hbase-daemon.sh"
-    $use_32_bits_on_slaves = false
-    $zk_bin = '/usr/lib/zookeeper/bin'
-    $zk_smoke_test_script = "/usr/lib/zookeeper/bin/zkCli.sh"
-    $update_zk_shell_files = false
-
-    $hive_mysql_host = hdp_default("hive_mysql_host","localhost")
-
-    $hcat_server_host = hdp_default("hive_server_host")
-    $hcat_mysql_host = hdp_default("hive_mysql_host")
-    $hue_conf_dir = "/etc/hue/conf"
-
-
-    $pathes = {
-      nagios_p1_pl => {
-      'ALL' => '/usr/bin/p1.pl',
-      suse => '/usr/lib/nagios/p1.pl'
-      },
-      httpd_conf_dir => {
-      'ALL' => '/etc/httpd/conf',
-      suse => '/etc/apache2'
-      },
-    }
-
-    $services_names = {
-      mysql => {
-        'ALL' => 'mysqld',
-        suse => 'mysql'},
-      httpd => {  
-      'ALL' => 'httpd',
-      suse => 'apache2'}
-    }
-
-    $cmds = {
-    htpasswd => {
-    'ALL' => 'htpasswd',
-     suse => 'htpasswd2'} 
-
-    }
-
-    # StackId => Arch => Os
-    $package_names = 
-    {
-      snmp => {
-        'ALL' => {
-          64 => {
-            suse =>['net-snmp'],
-            'ALL' => ['net-snmp', 'net-snmp-utils']
-          }
-        }
-      },
-
-      oozie-server => {
-        'ALL' => {
-          64 => {
-            'ALL' => 'oozie.noarch'
-          }
-        }
-      },
-
-      snappy => {
-        'ALL' => {
-          64 => {
-            'ALL' => ['snappy','snappy-devel']
-          }
-        }
-      },
-
-      hadoop => {
-        'ALL' => {
-          32 => {
-            'ALL' => ['hadoop','hadoop-libhdfs.i386','hadoop-native.i386','hadoop-pipes.i386','hadoop-sbin.i386','hadoop-lzo', 'hadoop-lzo-native.i386']
-          },
-          64 => {
-            'ALL' => $hadoop_deps
-          }
-        }
-      },
-
-    hadoop-mapreduce-client => {
-      'ALL' => {
-        64 => {
-          'ALL' => ['hadoop-mapreduce']
-        }
-      }
-    },
-
-    yarn-common => { 
-      'ALL' => {
-        64 => {
-          'ALL' => ['hadoop-yarn']
-        }
-      }
-    },
-
-    yarn-nodemanager => { 
-      'ALL' => {
-        64 => {
-          'ALL' => ['hadoop-yarn-nodemanager', 'hadoop-mapreduce']
-        }
-      }
-    },
-
-    yarn-proxyserver => { 
-      'ALL' => {
-        64 => {
-          'ALL' => ['hadoop-yarn-proxyserver']
-        }
-      }
-    },
-
-    yarn-resourcemanager => { 
-      'ALL' => {
-        64 => {
-          'ALL' => ['hadoop-yarn-resourcemanager', 'hadoop-mapreduce']
-        }
-      }
-    },
-
-    mapreduce-historyserver => { 
-      'ALL' => {
-        64 => {
-          'ALL' => ['hadoop-mapreduce-historyserver']
-        }
-      }
-    },
-
-    tez_client => { 
-      'ALL' => {
-        64 => {
-          'ALL' => ['tez']
-        }
-      }
-    },
-
-    lzo => {
-      'ALL' => {
-        'ALL' => {
-          'ALL' => ['lzo', 'lzo-devel'],
-          suse => ['lzo-devel']
-        }
-      }
-    },
-
-    glibc=> {
-      'ALL' => {
-        'ALL' => {
-          'ALL' => ['glibc','glibc.i686'],
-          suse => ['glibc']
-        }
-      }
-    },
-
-    zookeeper=> {
-      'ALL' => {64 => {'ALL' => 'zookeeper'}}
-    },
-
-    hbase=> {
-      'ALL' => {64 => {'ALL' => 'hbase'}}
-    },
-
-    pig=> { 
-      'ALL' => {'ALL' => {'ALL'=>['pig.noarch']}}
-    },
-
-    sqoop=> {
-      'ALL' => {'ALL' =>{'ALL' => ['sqoop']}}
-    },
-
-    mysql-connector-java=> {
-      'ALL' => {'ALL' =>{'ALL' => ['mysql-connector-java']}}
-    },
-    oozie-client=> {
-      'ALL' => {'64' =>{'ALL' => ['oozie-client.noarch']}}
-    },
-    extjs=> {
-      'ALL' => {64 =>{'ALL' => ['extjs-2.2-1']}}
-    },
-    hive=> {
-      'ALL' => {64 =>{'ALL' => ['hive']}}
-    },
-    hcat=> {
-      'ALL' => {'ALL' =>{'ALL' => ['hcatalog']}}
-    },
-
-    mysql => {
-      'ALL' => {
-        64 =>  {
-          'ALL' => ['mysql','mysql-server'],
-          suse => ['mysql-client','mysql']
-        }
-      }
-    },
-
-    webhcat => {
-      'ALL' => {'ALL' => {'ALL' => 'hcatalog'}}
-    },
-
-    webhcat-tar-hive => {
-      'ALL' => {64 => {'ALL' => 'webhcat-tar-hive'}}
-    },
-
-    webhcat-tar-pig => {
-      'ALL' => {64 => {'ALL' =>'webhcat-tar-pig'}}
-    },
-
-    dashboard => {
-      'ALL' => {64 => {'ALL' => 'hdp_mon_dashboard'}}
-    },
-
-    perl =>
-    {
-      'ALL' => {64 => {'ALL' => 'perl'}}
-    },
-
-    perl-Net-SNMP =>
-    {
-      'ALL' => {64 => {'ALL' => 'perl-Net-SNMP'}}
-    },
-        
-    nagios-server => {
-      'ALL' => {
-        64 => {
-            'ALL' => ['nagios-3.5.0-99','nagios-www-3.5.0-99']
-          }
-        }
-    },
-
-    nagios-devel => {
-      'ALL' => {64 => {'ALL' => 'nagios-devel-3.5.0-99'}}
-    },
-
-    nagios-fping => {
-      'ALL' => {64 =>{'ALL' => 'fping'}}
-    },
-
-    nagios-plugins => {
-      'ALL' => {64 => {'ALL' => 'nagios-plugins-1.4.9'}}
-    },
-
-    nagios-addons => {
-      'ALL' => {64 => {'ALL' => 'hdp_mon_nagios_addons'}}
-    },
-
-    nagios-php-pecl-json => {
-      'ALL' => {
-        64 => {
-          'ALL' => $NOTHING,
-          suse => 'php5-json',
-          centos6 => $NOTHING,
-          redhat6 => $NOTHING,
-          oraclelinux6 => $NOTHING,
-          centos5 => 'php-pecl-json.x86_64',
-          redhat5 => 'php-pecl-json.x86_64',
-          oraclelinux5 => 'php-pecl-json.x86_64'
-        }
-      }
-    },
-
-    ganglia-devel => {
-      'ALL' => {64 => {'ALL' => 'ganglia-devel-3.5.0-99'}}
-    },
-
-    libganglia => {
-      'ALL' => {64 => {'ALL' => 'libganglia-3.5.0-99'}}
-    },
-
-    ganglia-server => {
-      'ALL' => {64 => {'ALL' => 'ganglia-gmetad-3.5.0-99'}}
-    },
-
-    ganglia-web => {
-      'ALL' => {64 => {'ALL' => 'ganglia-web-3.5.7-99.noarch'}}
-    },
-
-    ganglia-monitor => {
-      'ALL' => {64 => {'ALL' =>'ganglia-gmond-3.5.0-99'}}
-    },
-
-    ganglia-gmond-modules-python => {
-      'ALL' => {64 => {'ALL' =>'ganglia-gmond-modules-python-3.5.0-99'}}
-    },
-
-    rrdtool-python => {
-      'ALL' => {64 => {'ALL' =>'python-rrdtool.x86_64'}}
-    },
-
-    # The 32bit version of package rrdtool-devel is removed on centos 5/6 to prevent conflict ( BUG-2881)
-    rrdtool-devel => {
-      'ALL' => {
-        64 => {
-          'ALL' => 'rrdtool-devel.i686',
-          'centos6' => 'rrdtool-devel.i686',
-          'centos5' => 'rrdtool-devel.i386',
-          'redhat6' => 'rrdtool-devel.i686',
-          'redhat5' => 'rrdtool-devel.i386',
-          'oraclelinux6' => 'rrdtool-devel.i686',
-          'oraclelinux5' => 'rrdtool-devel.i386'
-        }
-      }
-    },
-
-    # The 32bit version of package rrdtool is removed on centos 5/6 to prevent conflict ( BUG-2408)
-    rrdtool => {
-      'ALL' => {
-        64 => {
-          'ALL' => 'rrdtool.i686',
-          'centos6' => 'rrdtool.i686',
-          'centos5' => 'rrdtool.i386',
-          'redhat6' => 'rrdtool.i686',
-          'redhat5' => 'rrdtool.i386',
-          'oraclelinux6' => 'rrdtool.i686',
-          'oraclelinux5' => 'rrdtool.i386'
-        }
-      }
-    },
-
-    hue-server => {
-      'ALL' => {64 => {'ALL' => 'hue.noarch'}}
-    },
-
-    ambari-log4j => {
-      'ALL' => {
-        64 => {
-          'ALL' => 'ambari-log4j'
-        }
-      }
-    },
-
-    httpd => {
-      'ALL' => {
-        64 => {
-          'ALL' => 'httpd',
-          suse => ['apache2', 'apache2-mod_php5']
-        }
-      }
-    },
-    
-    unzip => {
-      'ALL' => {'ALL' => {'ALL' => 'unzip'}}
-    }
-}
-
-  $repos_paths = 
-  {
-    centos6 => '/etc/yum.repos.d',
-    centos5 => '/etc/yum.repos.d',
-    suse => '/etc/zypp/repos.d',
-    redhat6 => '/etc/yum.repos.d',
-    redhat5 => '/etc/yum.repos.d',
-    oraclelinux6 => '/etc/yum.repos.d',
-    oraclelinux5 => '/etc/yum.repos.d'
-  }
-
-  $rrd_py_path =
-  {
-    suse => '/srv/www/cgi-bin',
-    centos6 => '/var/www/cgi-bin',
-    centos5 => '/var/www/cgi-bin',
-    redhat6 => '/var/www/cgi-bin',
-    redhat5 => '/var/www/cgi-bin',
-    oraclelinux6 => '/var/www/cgi-bin',
-    oraclelinux5 => '/var/www/cgi-bin'
-  }
-  
-  $nagios_lookup_daemon_strs = 
-  {
-    suse => '/usr/sbin/nagios',
-    centos6 => '/usr/sbin/nagios',
-    centos5 => '/usr/sbin/nagios',
-    redhat6 => '/usr/sbin/nagios',
-    redhat5 => '/usr/sbin/nagios',
-    oraclelinux6 => '/usr/sbin/nagios',
-    oraclelinux5 => '/usr/sbin/nagios'
-  }
-
-
-
-
-  }
-
- 
-###### snmp
-
-  $snmp_conf_dir = hdp_default("snmp_conf_dir","/etc/snmp/")
-  $snmp_source = hdp_default("snmp_source","0.0.0.0/0") ##TODO!!! for testing needs to be closed up
-  $snmp_community = hdp_default("snmp_community","hadoop")
-
-###### aux
-  #used by ganglia monitor to tell what components and services are present
-  $component_exists = {} 
-  $service_exists = {}
-
-  $is_namenode_master = $hdp::params::hostname in $namenode_host
-  $is_jtnode_master   = $hdp::params::hostname in $jtnode_host
-  $is_rmnode_master   = $hdp::params::hostname in $rm_host
-  $is_hsnode_master   = $hdp::params::hostname in $hs_host
-  $is_hbase_master    = $hdp::params::hostname in $hbase_master_hosts
-  $is_slave           = $hdp::params::hostname in $slave_hosts
-  
-  $has_namenodes = hdp_is_empty($namenode_host) == false
-  $has_jobtracker = hdp_is_empty($jtnode_host) == false
-  $has_resourcemanager = hdp_is_empty($rm_host) == false
-  $has_histroryserver = hdp_is_empty($hs_host) == false
-  $has_hbase_masters = hdp_is_empty($hbase_master_hosts) == false
-  $has_slaves = hdp_is_empty($slave_hosts) == false
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/manifests/snappy/package.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/manifests/snappy/package.pp b/ambari-agent/src/main/puppet/modules/hdp/manifests/snappy/package.pp
deleted file mode 100644
index 52a4094..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/manifests/snappy/package.pp
+++ /dev/null
@@ -1,52 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp::snappy::package()
-{
- hdp::package {'snappy':
-    package_type  => 'snappy',
-    java_needed   => false
-  }
-  
-  hdp::snappy::package::ln{ 64:} 
-  hdp::snappy::package::ln{ 32:} 
-  
-  anchor{'hdp::snappy::package::begin':} ->  Hdp::Package['snappy'] -> Hdp::Snappy::Package::Ln<||> -> anchor{'hdp::snappy::package::end':}
-}
-
-define hdp::snappy::package::ln()
-{
-  $size = $name
-  $hadoop_home = $hdp::params::hadoop_home  
-  $snappy_so = $hdp::params::snappy_so
-  $so_target_dir = $hdp::params::snappy_compression_so_dirs[$size]
-  $so_target = "${so_target_dir}/libsnappy.so"
-  $so_src_dir = $hdp::params::snappy_so_src_dir[$size]
-  $so_src = "${so_src_dir}/${snappy_so}" 
-  
-  if ($so_target != $so_src) { 
-    $ln_cmd = "mkdir -p $so_target_dir; ln -sf ${so_src} ${so_target}"
-    hdp::exec{ "hdp::snappy::package::ln ${name}":
-      command => $ln_cmd,
-      unless  => "test -f ${so_target}",
-      creates => $so_target
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/manifests/snmp.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/manifests/snmp.pp b/ambari-agent/src/main/puppet/modules/hdp/manifests/snmp.pp
deleted file mode 100644
index 00be1b7..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/manifests/snmp.pp
+++ /dev/null
@@ -1,53 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp::snmp(
-  $service_state = 'running'
-)
-{
-  include hdp::params
-
-  hdp::package {'snmp':}
-
-  hdp::snmp-configfile {'snmpd.conf': 
-    notify => Service['snmpd']    
-  }
-
-  service { 'snmpd' :
-    ensure => $service_state
-  }
-
-  exec { "snmpd_autostart" :
-    command => "chkconfig snmpd on",
-    path => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-  }
- 
-  anchor{'hdp::snmp::begin':} -> Hdp::Package['snmp'] -> Hdp::Snmp-configfile<||> -> Service['snmpd'] -> Exec['snmpd_autostart'] -> anchor{'hdp::snmp::end':}
-}
-
-define hdp::snmp-configfile()
-{ 
-  hdp::configfile { "${hdp::params::snmp_conf_dir}/${name}":
-    component     => 'base',
-    owner         => root,
-    group         => root
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/manifests/testing_env_patch.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/manifests/testing_env_patch.pp b/ambari-agent/src/main/puppet/modules/hdp/manifests/testing_env_patch.pp
deleted file mode 100644
index 70bf722..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/manifests/testing_env_patch.pp
+++ /dev/null
@@ -1,49 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp::testing_env_patch()
-{
-  $cmd = "mkdir /tmp/repos; mv /etc/yum.repos.d/* /tmp/repos"
-  $repo_target = "/etc/yum.repos.d/${hdp::params::hdp_yum_repo}"
-
-  anchor { 'hdp::testing_env_patch::begin' :}
-  exec { '/bin/echo 0 > /selinux/enforce':
-    require => Anchor['hdp::testing_env_patch::begin']
-  }
-  hdp::testing_env_patch::packages { 'common' :
-    require => Exec['/bin/echo 0 > /selinux/enforce']
-  }
-  hdp::exec { $cmd :
-    command => $cmd,
-    unless => "test -e ${repo_target}",
-    require => Hdp::Testing_env_patch::Packages['common']
-  }  
-  anchor { 'hdp::testing_env_patch::end' :
-    require => Exec[$cmd]
-  }
-}
-
-define hdp::testing_env_patch::packages(
-  $needed = false)
-{
- if ($needed == true) {
-   package { ['perl-Digest-HMAC','perl-Socket6','perl-Crypt-DES','xorg-x11-fonts-Type1','libdbi'] :} 
- }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/templates/snmpd.conf.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/templates/snmpd.conf.erb b/ambari-agent/src/main/puppet/modules/hdp/templates/snmpd.conf.erb
deleted file mode 100644
index 8a93b53..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/templates/snmpd.conf.erb
+++ /dev/null
@@ -1,48 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-com2sec notConfigUser  <%=scope.function_hdp_template_var("snmp_source")%>   <%=scope.function_hdp_template_var("snmp_community")%>
-group   notConfigGroup v1           notConfigUser
-group   notConfigGroup v2c           notConfigUser
-view    systemview    included   .1
-access  notConfigGroup ""      any       noauth    exact  systemview none none
-
-syslocation Hadoop 
-syscontact HadoopMaster 
-dontLogTCPWrappersConnects yes
-
-###############################################################################
-# disk checks
-
-disk / 10000
-
-
-###############################################################################
-# load average checks
-#
-
-# load [1MAX=12.0] [5MAX=12.0] [15MAX=12.0]
-#
-# 1MAX:   If the 1 minute load average is above this limit at query
-#         time, the errorFlag will be set.
-# 5MAX:   Similar, but for 5 min average.
-# 15MAX:  Similar, but for 15 min average.
-
-# Check for loads:
-#load 12 14 14
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/puppetApply.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/puppetApply.sh b/ambari-agent/src/main/puppet/modules/puppetApply.sh
deleted file mode 100644
index 559c2e2..0000000
--- a/ambari-agent/src/main/puppet/modules/puppetApply.sh
+++ /dev/null
@@ -1,29 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-rm -f /var/log/puppet_apply.log
-puppet apply --confdir=/etc/puppet/agent --logdest=/var/log/puppet_apply.log --debug --autoflush --detailed-exitcodes /etc/puppet/agent/modules/catalog/files/site.pp  >> /var/log/puppet_apply.log  2>&1
-ret=$?
-cat /var/log/puppet_apply.log
-if [ "$ret" == "0" ] || [ "$ret" == "2" ]; then
-  exit 0
-else
-  exit 1 
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/stdlib/CHANGELOG
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/stdlib/CHANGELOG b/ambari-agent/src/main/puppet/modules/stdlib/CHANGELOG
deleted file mode 100644
index ee6d3b5..0000000
--- a/ambari-agent/src/main/puppet/modules/stdlib/CHANGELOG
+++ /dev/null
@@ -1,20 +0,0 @@
-2011-06-21 Jeff McCune <je...@puppetlabs.com> - 0.1.7
-* Add validate_hash() and getvar() functions
-
-2011-06-15 Jeff McCune <je...@puppetlabs.com> - 0.1.6
-* Add anchor resource type to provide containment for composite classes
-
-2011-06-03 Jeff McCune <je...@puppetlabs.com> - 0.1.5
-* Add validate_bool() function to stdlib
-
-0.1.4 2011-05-26 Jeff McCune <je...@puppetlabs.com>
-* Move most stages after main
-
-0.1.3 2011-05-25 Jeff McCune <je...@puppetlabs.com>
-* Add validate_re() function
-
-0.1.2 2011-05-24 Jeff McCune <je...@puppetlabs.com>
-* Update to add annotated tag
-
-0.1.1 2011-05-24 Jeff McCune <je...@puppetlabs.com>
-* Add stdlib::stages class with a standard set of stages

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/stdlib/LICENSE
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/stdlib/LICENSE b/ambari-agent/src/main/puppet/modules/stdlib/LICENSE
deleted file mode 100644
index 57bc88a..0000000
--- a/ambari-agent/src/main/puppet/modules/stdlib/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/stdlib/Modulefile
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/stdlib/Modulefile b/ambari-agent/src/main/puppet/modules/stdlib/Modulefile
deleted file mode 100644
index 4927119..0000000
--- a/ambari-agent/src/main/puppet/modules/stdlib/Modulefile
+++ /dev/null
@@ -1,11 +0,0 @@
-name    'puppetlabs-stdlib'
-version '0.1.7'
-source 'git://github.com/puppetlabs/puppetlabs-stdlib'
-author 'puppetlabs'
-license 'Apache 2.0'
-summary 'Puppet Module Standard Library'
-description 'Standard Library for Puppet Modules'
-project_page 'https://github.com/puppetlabs/puppetlabs-stdlib'
-
-## Add dependencies, if any:
-# dependency 'username/name', '>= 1.2.0'

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/stdlib/README.markdown
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/stdlib/README.markdown b/ambari-agent/src/main/puppet/modules/stdlib/README.markdown
deleted file mode 100644
index 1e93c6f..0000000
--- a/ambari-agent/src/main/puppet/modules/stdlib/README.markdown
+++ /dev/null
@@ -1,45 +0,0 @@
-# Puppet Labs Standard Library #
-
-This module provides a "standard library" of resources for developing Puppet
-Modules.  This modules will include the following additions to Puppet
-
- * Stages
- * Facts
- * Functions
- * Defined resource types
- * Types
- * Providers
-
-This module is officially curated and provided by Puppet Labs.  The modules
-Puppet Labs writes and distributes will make heavy use of this standard
-library.
-
-# Compatibility #
-
-This module is designed to work with Puppet version 2.6 and later.  It may be
-forked if Puppet 2.7 specific features are added.  There are currently no plans
-for a Puppet 0.25 standard library module.
-
-# Overview #
-
-TBA
-
-# Contact Information #
-
-  Jeff McCune <je...@puppetlabs.com>
-
-# Functions #
-## validate\_hash ##
-
-    $somehash = { 'one' => 'two' }
-    validate\_hash($somehash)
-
-## getvar() ##
-
-This function aims to look up variables in user-defined namespaces within
-puppet.  Note, if the namespace is a class, it should already be evaluated
-before the function is used.
-
-    $namespace = 'site::data'
-    include "${namespace}"
-    $myvar = getvar("${namespace}::myvar")

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/stdlib/RELEASE_PROCESS.markdown
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/stdlib/RELEASE_PROCESS.markdown b/ambari-agent/src/main/puppet/modules/stdlib/RELEASE_PROCESS.markdown
deleted file mode 100644
index df20730..0000000
--- a/ambari-agent/src/main/puppet/modules/stdlib/RELEASE_PROCESS.markdown
+++ /dev/null
@@ -1,12 +0,0 @@
-# Releasing this module #
-
- * Work in a topic branch
- * Submit a github pull request
- * Address any comments / feeback
- * Merge into master using --no-fw
- * Update the CHANGELOG
- * Create an annotated tag with git tag -a X.Y.Z -m 'version X.Y.Z'
- * Push the tag with git push origin --tags
- * Build a new package with puppet-module
- * Publish the new package to the forge
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/getvar.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/getvar.rb b/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/getvar.rb
deleted file mode 100644
index ffd774d..0000000
--- a/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/getvar.rb
+++ /dev/null
@@ -1,23 +0,0 @@
-module Puppet::Parser::Functions
-
-  newfunction(:getvar, :type => :rvalue, :doc => <<-'ENDHEREDOC') do |args|
-    Lookup a variable in a remote namespace.
-
-    For example:
-
-      $foo = getvar('site::data::foo')
-
-    This is useful if the namespace itself is stored in a string:
-
-      $bar = getvar("${datalocation}::bar")
-    ENDHEREDOC
-
-    unless args.length == 1
-      raise Puppet::ParseError, ("getvar(): wrong number of arguments (#{args.length}; must be 1)")
-    end
-
-    self.lookupvar("#{args[0]}")
-
-  end
-
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/has_key.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/has_key.rb b/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/has_key.rb
deleted file mode 100644
index 9c1c4c3..0000000
--- a/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/has_key.rb
+++ /dev/null
@@ -1,27 +0,0 @@
-module Puppet::Parser::Functions
-
-  newfunction(:has_key, :type => :rvalue, :doc => <<-'ENDHEREDOC') do |args|
-    determine if a hash has a certain key value.
-
-    Example:
-      $my_hash = {'key_one' => 'value_one'}
-      if has_key($my_hash, 'key_two') {
-        notice('we will not reach here')
-      }
-      if has_key($my_hash, 'key_one') {
-        notice('this will be printed')
-      }
-
-    ENDHEREDOC
-
-    unless args.length == 2
-      raise Puppet::ParseError, ("has_key(): wrong number of arguments (#{args.length}; must be 2)")
-    end
-    unless args[0].is_a?(Hash)
-      raise Puppet::ParseError, "has_key(): expects the first argument to be a hash, got #{args[0].inspect} which is of type #{args[0].class}"
-    end
-    args[0].has_key?(args[1])
-
-  end
-
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/loadyaml.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/loadyaml.rb b/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/loadyaml.rb
deleted file mode 100644
index 0f16f69..0000000
--- a/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/loadyaml.rb
+++ /dev/null
@@ -1,20 +0,0 @@
-module Puppet::Parser::Functions
-
-  newfunction(:loadyaml, :type => :rvalue, :doc => <<-'ENDHEREDOC') do |args|
-    Load a YAML file and return the data if it contains an Array, String, or Hash
-    as a Puppet variable.
-
-    For example:
-
-      $myhash = loadyaml('/etc/puppet/data/myhash.yaml')
-    ENDHEREDOC
-
-    unless args.length == 1
-      raise Puppet::ParseError, ("loadyaml(): wrong number of arguments (#{args.length}; must be 1)")
-    end
-
-    YAML.load_file(args[0])
-
-  end
-
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/merge.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/merge.rb b/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/merge.rb
deleted file mode 100644
index 6693884..0000000
--- a/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/merge.rb
+++ /dev/null
@@ -1,28 +0,0 @@
-module Puppet::Parser::Functions
-
-  newfunction(:merge, :type => :rvalue, :doc => <<-'ENDHEREDOC') do |args|
-    Merges two or more hashes together and returns the resulting hash.
-
-    For example:
-
-      $hash1 = {'one' => 1, 'two', => 2}
-      $hash1 = {'two' => 2, 'three', => 2}
-      $merged_hash = merge($hash1, $hash2)
-      # merged_hash =  {'one' => 1, 'two' => 2, 'three' => 2}
-
-    ENDHEREDOC
-
-    if args.length < 2
-      raise Puppet::ParseError, ("merge(): wrong number of arguments (#{args.length}; must be at least 2)")
-    end
-    args.each do |arg|
-      unless arg.is_a?(Hash)
-        raise Puppet::ParseError, "merge: unexpected argument type #{arg.class}, only expects hash arguments"
-      end
-    end
-
-    args.inject({}, :merge)
-
-  end
-
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/validate_bool.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/validate_bool.rb b/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/validate_bool.rb
deleted file mode 100644
index 49e6378..0000000
--- a/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/validate_bool.rb
+++ /dev/null
@@ -1,39 +0,0 @@
-module Puppet::Parser::Functions
-
-  newfunction(:validate_bool, :doc => <<-'ENDHEREDOC') do |args|
-    Validate all passed values are true or false.  Abort catalog compilation if the
-    value does not pass the check.
-
-    Example:
-
-    These booleans validate
-
-        $iamtrue = true
-        validate_bool(true)
-        validate_bool(true, true, false, $iamtrue)
-
-    These strings do NOT validate and will abort catalog compilation
-
-        $some_array = [ true ]
-        validate_bool("false")
-        validate_bool("true")
-        validate_bool($some_array)
-
-    * Jeff McCune <je...@puppetlabs.com>
-    * Dan Bode <da...@puppetlabs.com>
-
-    ENDHEREDOC
-
-    unless args.length > 0 then
-      raise Puppet::ParseError, ("validate_bool(): wrong number of arguments (#{args.length}; must be > 0)")
-    end
-
-    args.each do |arg|
-      unless (arg.is_a?(TrueClass) || arg.is_a?(FalseClass))
-        raise Puppet::ParseError, ("#{arg.inspect} is not a boolean.  It looks to be a #{arg.class}")
-      end
-    end
-
-  end
-
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/validate_hash.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/validate_hash.rb b/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/validate_hash.rb
deleted file mode 100644
index 1443318..0000000
--- a/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/validate_hash.rb
+++ /dev/null
@@ -1,37 +0,0 @@
-module Puppet::Parser::Functions
-
-  newfunction(:validate_hash, :doc => <<-'ENDHEREDOC') do |args|
-    Validate all passed values are a Hash data structure
-    value does not pass the check.
-
-    Example:
-
-    These values validate
-
-        $my_hash = { 'one' => 'two' }
-        validate_hash($my_hash)
-
-    These values do NOT validate
-
-        validate_hash(true)
-        validate_hash('some_string')
-        $undefined = undef
-        validate_hash($undefined)
-
-    * Jeff McCune <je...@puppetlabs.com>
-
-    ENDHEREDOC
-
-    unless args.length > 0 then
-      raise Puppet::ParseError, ("validate_hash(): wrong number of arguments (#{args.length}; must be > 0)")
-    end
-
-    args.each do |arg|
-      unless arg.is_a?(Hash)
-        raise Puppet::ParseError, ("#{arg.inspect} is not a Hash.  It looks to be a #{arg.class}")
-      end
-    end
-
-  end
-
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/validate_re.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/validate_re.rb b/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/validate_re.rb
deleted file mode 100644
index 583f26a..0000000
--- a/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/validate_re.rb
+++ /dev/null
@@ -1,35 +0,0 @@
-module Puppet::Parser::Functions
-
-  newfunction(:validate_re, :doc => <<-'ENDHEREDOC') do |args|
-    Perform simple validation of a string against a regular expression.  The second
-    argument of the function should be a string regular expression (without the //'s)
-    or an array of regular expressions.  If none of the regular expressions in the array
-    match the string passed in, then an exception will be raised.
-
-    Example:
-
-    These strings validate against the regular expressions
-
-        validate_re('one', '^one$')
-        validate_re('one', [ '^one', '^two' ])
-
-    These strings do NOT validate
-
-        validate_re('one', [ '^two', '^three' ])
-
-    Jeff McCune <je...@puppetlabs.com>
-
-    ENDHEREDOC
-    if args.length != 2 then
-      raise Puppet::ParseError, ("validate_re(): wrong number of arguments (#{args.length}; must be 2)")
-    end
-
-    msg = "validate_re(): #{args[0].inspect} does not match #{args[1].inspect}"
-
-    raise Puppet::ParseError, (msg) unless args[1].any? do |re_str|
-      args[0] =~ Regexp.compile(re_str)
-    end
-
-  end
-
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/provider/append_line/ruby.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/provider/append_line/ruby.rb b/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/provider/append_line/ruby.rb
deleted file mode 100644
index 5e78659..0000000
--- a/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/provider/append_line/ruby.rb
+++ /dev/null
@@ -1,15 +0,0 @@
-Puppet::Type.type(:append_line).provide(:ruby) do
-
-  def exists?
-    File.readlines(resource[:path]).find do |line|
-      line.chomp == resource[:line].chomp
-    end
-  end
-
-  def create
-    File.open(resource[:path], 'a') do |fh|
-      fh.puts resource[:line]
-    end
-  end
-
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/type/anchor.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/type/anchor.rb b/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/type/anchor.rb
deleted file mode 100644
index 0c28b1c..0000000
--- a/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/type/anchor.rb
+++ /dev/null
@@ -1,32 +0,0 @@
-Puppet::Type.newtype(:anchor) do
-  desc <<-'ENDOFDESC'
-  A simple resource type intended to be used as an anchor in a composite class.
-
-      class ntp {
-        class { 'ntp::package': }
-        -> class { 'ntp::config': }
-        -> class { 'ntp::service': }
-
-        # These two resources "anchor" the composed classes
-        # such that the end user may use "require" and "before"
-        # relationships with Class['ntp']
-        anchor { 'ntp::begin': }   -> class  { 'ntp::package': }
-        class  { 'ntp::service': } -> anchor { 'ntp::end': }
-      }
-
-  This resource allows all of the classes in the ntp module to be contained
-  within the ntp class from a dependency management point of view.
-
-  This allows the end user of the ntp module to establish require and before
-  relationships easily:
-
-      class { 'ntp': } -> class { 'mcollective': }
-      class { 'mcollective': } -> class { 'ntp': }
-
-  ENDOFDESC
-
-  newparam :name do
-    desc "The name of the anchor resource."
-  end
-
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/type/append_line.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/type/append_line.rb b/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/type/append_line.rb
deleted file mode 100644
index b3f926c..0000000
--- a/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/type/append_line.rb
+++ /dev/null
@@ -1,44 +0,0 @@
-Puppet::Type.newtype(:append_line) do
-
-  desc <<-EOT
-  Type that can append a line to a file if it does not already contain it.
-
-  Example:
-
-  append_line { 'sudo_rule':
-    path => '/etc/sudoers',
-    line => '%admin ALL=(ALL) ALL',
-  }
-
-  EOT
-
-  ensurable do
-    defaultto :present
-    newvalue(:present) do
-      provider.create
-    end
-  end
-
-  newparam(:name, :namevar => true) do
-    desc 'arbitrary name used as identity'
-  end
-
-  newparam(:line) do
-    desc 'The line to be appended to the path.'
-  end
-
-  newparam(:path) do
-    desc 'File to possibly append a line to.'
-    validate do |value|
-      unless (Puppet.features.posix? and value =~ /^\//) or (Puppet.features.microsoft_windows? and (value =~ /^.:\// or value =~ /^\/\/[^\/]+\/[^\/]+/))
-        raise(Puppet::Error, "File paths must be fully qualified, not '#{value}'")
-      end
-    end
-  end
-
-  validate do
-    unless self[:line] and self[:path]
-      raise(Puppet::Error, "Both line and path are required attributes")
-    end
-  end
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/stdlib/manifests/init.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/stdlib/manifests/init.pp b/ambari-agent/src/main/puppet/modules/stdlib/manifests/init.pp
deleted file mode 100644
index 1f18d8a..0000000
--- a/ambari-agent/src/main/puppet/modules/stdlib/manifests/init.pp
+++ /dev/null
@@ -1,19 +0,0 @@
-#
-# Class: stdlib
-#
-# This module manages stdlib
-#
-# Parameters:
-#
-# Actions:
-#
-# Requires:
-#
-# Sample Usage:
-#
-# [Remember: No empty lines between comments and class definition]
-class stdlib {
-
-  class { 'stdlib::stages': }
-
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/stdlib/manifests/stages.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/stdlib/manifests/stages.pp b/ambari-agent/src/main/puppet/modules/stdlib/manifests/stages.pp
deleted file mode 100644
index 97b9e90..0000000
--- a/ambari-agent/src/main/puppet/modules/stdlib/manifests/stages.pp
+++ /dev/null
@@ -1,43 +0,0 @@
-#
-# Class: stdlib::stages
-#
-# This class manages a standard set of Run Stages for Puppet.
-#
-# The high level stages are (In order):
-#
-#  * setup
-#  * main
-#  * runtime
-#  * setup_infra
-#  * deploy_infra
-#  * setup_app
-#  * deploy_app
-#  * deploy
-#
-# Parameters:
-#
-# Actions:
-#
-#   Declares various run-stages for deploying infrastructure,
-#   language runtimes, and application layers.
-#
-# Requires:
-#
-# Sample Usage:
-#
-#  node default {
-#    include stdlib::stages
-#    class { java: stage => 'runtime' }
-#  }
-#
-class stdlib::stages {
-
-  stage { 'setup':  before => Stage['main'] }
-  stage { 'runtime': require => Stage['main'] }
-  -> stage { 'setup_infra': }
-  -> stage { 'deploy_infra': }
-  -> stage { 'setup_app': }
-  -> stage { 'deploy_app': }
-  -> stage { 'deploy': }
-
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/stdlib/spec/spec.opts
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/stdlib/spec/spec.opts b/ambari-agent/src/main/puppet/modules/stdlib/spec/spec.opts
deleted file mode 100644
index 91cd642..0000000
--- a/ambari-agent/src/main/puppet/modules/stdlib/spec/spec.opts
+++ /dev/null
@@ -1,6 +0,0 @@
---format
-s
---colour
---loadby
-mtime
---backtrace

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/stdlib/spec/spec_helper.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/stdlib/spec/spec_helper.rb b/ambari-agent/src/main/puppet/modules/stdlib/spec/spec_helper.rb
deleted file mode 100644
index a4aeeae..0000000
--- a/ambari-agent/src/main/puppet/modules/stdlib/spec/spec_helper.rb
+++ /dev/null
@@ -1,18 +0,0 @@
-require 'pathname'
-dir = Pathname.new(__FILE__).parent
-$LOAD_PATH.unshift(dir, dir + 'lib', dir + '../lib')
-
-require 'mocha'
-require 'puppet'
-gem 'rspec', '=1.2.9'
-require 'spec/autorun'
-
-Spec::Runner.configure do |config|
-    config.mock_with :mocha
-end
-
-# We need this because the RAL uses 'should' as a method.  This
-# allows us the same behaviour but with a different method name.
-class Object
-    alias :must :should
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/parser/functions/getvar_spec.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/parser/functions/getvar_spec.rb b/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/parser/functions/getvar_spec.rb
deleted file mode 100644
index 16edd98..0000000
--- a/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/parser/functions/getvar_spec.rb
+++ /dev/null
@@ -1,53 +0,0 @@
-require 'puppet'
-
-# We don't need this for the basic tests we're doing
-# require 'spec_helper'
-
-# Dan mentioned that Nick recommended the function method call
-# to return the string value for the test description.
-# this will not even try the test if the function cannot be
-# loaded.
-describe Puppet::Parser::Functions.function(:getvar) do
-
-  # Pulled from Dan's create_resources function
-  def get_scope
-    @topscope = Puppet::Parser::Scope.new
-    # This is necessary so we don't try to use the compiler to discover our parent.
-    @topscope.parent = nil
-    @scope = Puppet::Parser::Scope.new
-    @scope.compiler = Puppet::Parser::Compiler.new(Puppet::Node.new("floppy", :environment => 'production'))
-    @scope.parent = @topscope
-    @compiler = @scope.compiler
-  end
-
-  describe 'when calling getvar from puppet' do
-
-    it "should not compile when no arguments are passed" do
-      Puppet[:code] = 'getvar()'
-      get_scope
-      expect { @scope.compiler.compile }.should raise_error(Puppet::ParseError, /wrong number of arguments/)
-    end
-    it "should not compile when too many arguments are passed" do
-      Puppet[:code] = 'getvar("foo::bar", "baz")'
-      get_scope
-      expect { @scope.compiler.compile }.should raise_error(Puppet::ParseError, /wrong number of arguments/)
-    end
-
-    it "should lookup variables in other namespaces" do
-      pending "Puppet doesn't appear to think getvar is an rvalue function... BUG?"
-      Puppet[:code] = <<-'ENDofPUPPETcode'
-        class site::data { $foo = 'baz' }
-        include site::data
-        $foo = getvar("site::data::foo")
-        if $foo != 'baz' {
-          fail('getvar did not return what we expect')
-        }
-      ENDofPUPPETcode
-      get_scope
-      @scope.compiler.compile
-    end
-
-  end
-
-end
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/parser/functions/has_key_spec.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/parser/functions/has_key_spec.rb b/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/parser/functions/has_key_spec.rb
deleted file mode 100644
index d1dcd15..0000000
--- a/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/parser/functions/has_key_spec.rb
+++ /dev/null
@@ -1,46 +0,0 @@
-require 'puppet'
-require 'mocha'
-describe Puppet::Parser::Functions.function(:has_key) do
-
-  # Pulled from Dan's create_resources function
-  # TODO - this should be moved to spec_helper since the
-  # logic is likely to be applied to multiple rspec files.
-  let(:compiler) {
-    topscope = Puppet::Parser::Scope.new
-    # This is necessary so we don't try to use the compiler to discover our parent.
-    topscope.parent = nil
-    my_scope = Puppet::Parser::Scope.new
-    my_scope.compiler = Puppet::Parser::Compiler.new(Puppet::Node.new("floppy", :environment => 'production'))
-    my_scope.parent = topscope
-    compiler = my_scope.compiler
-  }
-  let(:scope) {
-    scope = Puppet::Parser::Scope.new
-    scope.stubs(:environment).returns(Puppet::Node::Environment.new('production'))
-    scope
-  }
-
-  describe 'when calling has_key from puppet' do
-    it "should not compile when no arguments are passed" do
-      Puppet[:code] = 'has_key()'
-      expect { compiler.compile }.should raise_error(Puppet::ParseError, /wrong number of arguments/)
-    end
-    it "should not compile when 1 argument is passed" do
-      Puppet[:code] = "has_key('foo')"
-      expect { compiler.compile }.should raise_error(Puppet::ParseError, /wrong number of arguments/)
-    end
-    it "should require the first value to be a Hash" do
-      Puppet[:code] = "has_key('foo', 'bar')"
-      expect { compiler.compile }.should raise_error(Puppet::ParseError, /expects the first argument to be a hash/)
-    end
-  end
-  describe 'when calling the function has_key from a scope instance' do
-    it 'should detect existing keys' do
-      scope.function_has_key([{'one' => 1}, 'one']).should be_true
-    end
-    it 'should detect existing keys' do
-      scope.function_has_key([{'one' => 1}, 'two']).should be_false
-    end
-  end
-
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/parser/functions/merge_spec.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/parser/functions/merge_spec.rb b/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/parser/functions/merge_spec.rb
deleted file mode 100644
index 71e1869..0000000
--- a/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/parser/functions/merge_spec.rb
+++ /dev/null
@@ -1,54 +0,0 @@
-require 'puppet'
-require 'mocha'
-describe Puppet::Parser::Functions.function(:merge) do
-
-  # Pulled from Dan's create_resources function
-  # TODO - these let statements should be moved somewhere
-  # where they can be resued
-  let(:compiler) {
-    topscope = Puppet::Parser::Scope.new
-    # This is necessary so we don't try to use the compiler to discover our parent.
-    topscope.parent = nil
-    my_scope = Puppet::Parser::Scope.new
-    my_scope.compiler = Puppet::Parser::Compiler.new(Puppet::Node.new("floppy", :environment => 'production'))
-    my_scope.parent = topscope
-    compiler = my_scope.compiler
-  }
-  let(:scope) {
-    scope = Puppet::Parser::Scope.new
-    scope.stubs(:environment).returns(Puppet::Node::Environment.new('production'))
-    scope
-  }
-
-  describe 'when calling merge from puppet' do
-    it "should not compile when no arguments are passed" do
-      Puppet[:code] = 'merge()'
-      expect { compiler.compile }.should raise_error(Puppet::ParseError, /wrong number of arguments/)
-    end
-    it "should not compile when 1 argument is passed" do
-      Puppet[:code] = "$my_hash={'one' => 1}\nmerge($my_hash)"
-      expect { compiler.compile }.should raise_error(Puppet::ParseError, /wrong number of arguments/)
-    end
-  end
-  describe 'when calling merge on the scope instance' do
-    it 'should require all parameters are hashes' do
-      expect { new_hash = scope.function_merge([{}, '2'])}.should raise_error(Puppet::ParseError, /unexpected argument type String/)
-
-    end
-    it 'should be able to merge two hashes' do
-      new_hash = scope.function_merge([{'one' => '1', 'two' => '1'}, {'two' => '2', 'three' => '2'}])
-      new_hash['one'].should   == '1'
-      new_hash['two'].should   == '2'
-      new_hash['three'].should == '2'
-    end
-    it 'should merge multiple hashes' do
-      hash = scope.function_merge([{'one' => 1}, {'one' => '2'}, {'one' => '3'}])
-      hash['one'].should == '3'
-    end
-    it 'should accept empty hashes' do
-      scope.function_merge([{},{},{}]).should == {}
-    end
-
-  end
-
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/parser/functions/validate_bool_spec.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/parser/functions/validate_bool_spec.rb b/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/parser/functions/validate_bool_spec.rb
deleted file mode 100644
index e95c396..0000000
--- a/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/parser/functions/validate_bool_spec.rb
+++ /dev/null
@@ -1,76 +0,0 @@
-require 'puppet'
-
-# We don't need this for the basic tests we're doing
-# require 'spec_helper'
-
-# Dan mentioned that Nick recommended the function method call
-# to return the string value for the test description.
-# this will not even try the test if the function cannot be
-# loaded.
-describe Puppet::Parser::Functions.function(:validate_bool) do
-
-  # Pulled from Dan's create_resources function
-  def get_scope
-    @topscope = Puppet::Parser::Scope.new
-    # This is necessary so we don't try to use the compiler to discover our parent.
-    @topscope.parent = nil
-    @scope = Puppet::Parser::Scope.new
-    @scope.compiler = Puppet::Parser::Compiler.new(Puppet::Node.new("floppy", :environment => 'production'))
-    @scope.parent = @topscope
-    @compiler = @scope.compiler
-  end
-
-  describe 'when calling validate_bool from puppet' do
-
-    %w{ true false }.each do |the_string|
-
-      it "should not compile when #{the_string} is a string" do
-        Puppet[:code] = "validate_bool('#{the_string}')"
-        get_scope
-        expect { @scope.compiler.compile }.should raise_error(Puppet::ParseError, /is not a boolean/)
-      end
-
-      it "should compile when #{the_string} is a bare word" do
-        Puppet[:code] = "validate_bool(#{the_string})"
-        get_scope
-        @scope.compiler.compile
-      end
-
-    end
-
-    it "should not compile when an arbitrary string is passed" do
-      Puppet[:code] = 'validate_bool("jeff and dan are awesome")'
-      get_scope
-      expect { @scope.compiler.compile }.should raise_error(Puppet::ParseError, /is not a boolean/)
-    end
-
-    it "should not compile when no arguments are passed" do
-      Puppet[:code] = 'validate_bool()'
-      get_scope
-      expect { @scope.compiler.compile }.should raise_error(Puppet::ParseError, /wrong number of arguments/)
-    end
-
-    it "should compile when multiple boolean arguments are passed" do
-      Puppet[:code] = <<-'ENDofPUPPETcode'
-        $foo = true
-        $bar = false
-        validate_bool($foo, $bar, true, false)
-      ENDofPUPPETcode
-      get_scope
-      @scope.compiler.compile
-    end
-
-    it "should compile when multiple boolean arguments are passed" do
-      Puppet[:code] = <<-'ENDofPUPPETcode'
-        $foo = true
-        $bar = false
-        validate_bool($foo, $bar, true, false, 'jeff')
-      ENDofPUPPETcode
-      get_scope
-      expect { @scope.compiler.compile }.should raise_error(Puppet::ParseError, /is not a boolean/)
-    end
-
-  end
-
-end
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/parser/functions/validate_hash_spec.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/parser/functions/validate_hash_spec.rb b/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/parser/functions/validate_hash_spec.rb
deleted file mode 100644
index 8cc0b3d..0000000
--- a/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/parser/functions/validate_hash_spec.rb
+++ /dev/null
@@ -1,63 +0,0 @@
-require 'puppet'
-
-# We don't need this for the basic tests we're doing
-# require 'spec_helper'
-
-# Dan mentioned that Nick recommended the function method call
-# to return the string value for the test description.
-# this will not even try the test if the function cannot be
-# loaded.
-describe Puppet::Parser::Functions.function(:validate_hash) do
-
-  # Pulled from Dan's create_resources function
-  def get_scope
-    @topscope = Puppet::Parser::Scope.new
-    # This is necessary so we don't try to use the compiler to discover our parent.
-    @topscope.parent = nil
-    @scope = Puppet::Parser::Scope.new
-    @scope.compiler = Puppet::Parser::Compiler.new(Puppet::Node.new("floppy", :environment => 'production'))
-    @scope.parent = @topscope
-    @compiler = @scope.compiler
-  end
-
-  describe 'when calling validate_hash from puppet' do
-
-    %w{ true false }.each do |the_string|
-
-      it "should not compile when #{the_string} is a string" do
-        Puppet[:code] = "validate_hash('#{the_string}')"
-        get_scope
-        expect { @scope.compiler.compile }.should raise_error(Puppet::ParseError, /is not a Hash/)
-      end
-
-      it "should not compile when #{the_string} is a bare word" do
-        Puppet[:code] = "validate_hash(#{the_string})"
-        get_scope
-        expect { @scope.compiler.compile }.should raise_error(Puppet::ParseError, /is not a Hash/)
-      end
-
-    end
-
-    it "should compile when multiple hash arguments are passed" do
-      Puppet[:code] = <<-'ENDofPUPPETcode'
-        $foo = {}
-        $bar = { 'one' => 'two' }
-        validate_hash($foo, $bar)
-      ENDofPUPPETcode
-      get_scope
-      @scope.compiler.compile
-    end
-
-    it "should not compile when an undef variable is passed" do
-      Puppet[:code] = <<-'ENDofPUPPETcode'
-        $foo = undef
-        validate_hash($foo)
-      ENDofPUPPETcode
-      get_scope
-      expect { @scope.compiler.compile }.should raise_error(Puppet::ParseError, /is not a Hash/)
-    end
-
-  end
-
-end
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/provider/append_line/ruby_spec.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/provider/append_line/ruby_spec.rb b/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/provider/append_line/ruby_spec.rb
deleted file mode 100644
index ea28c31..0000000
--- a/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/provider/append_line/ruby_spec.rb
+++ /dev/null
@@ -1,30 +0,0 @@
-require 'puppet'
-require 'tempfile'
-provider_class = Puppet::Type.type(:append_line).provider(:ruby)
-describe provider_class do
-  before :each do
-    tmp = Tempfile.new('tmp')
-    @tmpfile = tmp.path
-    tmp.close!
-    @resource = Puppet::Type::Append_line.new(
-      {:name => 'foo', :path => @tmpfile, :line => 'foo'}
-    )
-    @provider = provider_class.new(@resource)
-  end
-  it 'should detect if the line exists in the file' do
-    File.open(@tmpfile, 'w') do |fh|
-      fh.write('foo')
-    end
-    @provider.exists?.should be_true
-  end
-  it 'should detect if the line does not exist in the file' do
-    File.open(@tmpfile, 'w') do |fh|
-      fh.write('foo1')
-    end
-    @provider.exists?.should be_nil
-  end
-  it 'should append to an existing file when creating' do
-    @provider.create
-    File.read(@tmpfile).chomp.should == 'foo'
-  end
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/type/anchor_spec.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/type/anchor_spec.rb b/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/type/anchor_spec.rb
deleted file mode 100644
index 2030b83..0000000
--- a/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/type/anchor_spec.rb
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env ruby
-
-require 'puppet'
-
-anchor = Puppet::Type.type(:anchor).new(:name => "ntp::begin")
-
-describe anchor do
-  it "should stringify normally" do
-    anchor.to_s.should == "Anchor[ntp::begin]"
-  end
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/type/append_line_spec.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/type/append_line_spec.rb b/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/type/append_line_spec.rb
deleted file mode 100644
index d0564c3..0000000
--- a/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/type/append_line_spec.rb
+++ /dev/null
@@ -1,24 +0,0 @@
-require 'puppet'
-require 'tempfile'
-describe Puppet::Type.type(:append_line) do
-  before :each do
-    @append_line = Puppet::Type.type(:append_line).new(:name => 'foo', :line => 'line', :path => '/tmp/path')
-  end
-  it 'should accept a line and path' do
-    @append_line[:line] = 'my_line'
-    @append_line[:line].should == 'my_line'
-  end
-  it 'should accept posix filenames' do
-    @append_line[:path] = '/tmp/path'
-    @append_line[:path].should == '/tmp/path'
-  end
-  it 'should not accept unqualified path' do
-    expect { @append_line[:path] = 'file' }.should raise_error(Puppet::Error, /File paths must be fully qualified/)
-  end
-  it 'should require that a line is specified' do
-    expect { Puppet::Type.type(:append_line).new(:name => 'foo', :path => '/tmp/file') }.should raise_error(Puppet::Error, /Both line and path are required attributes/)
-  end
-  it 'should require that a file is specified' do
-    expect { Puppet::Type.type(:append_line).new(:name => 'foo', :line => 'path') }.should raise_error(Puppet::Error, /Both line and path are required attributes/)
-  end
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/stdlib/tests/append_line.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/stdlib/tests/append_line.pp b/ambari-agent/src/main/puppet/modules/stdlib/tests/append_line.pp
deleted file mode 100644
index f50a833..0000000
--- a/ambari-agent/src/main/puppet/modules/stdlib/tests/append_line.pp
+++ /dev/null
@@ -1,7 +0,0 @@
-file { '/tmp/dansfile':
-  ensure => present
-}->
-append_line { 'dans_line':
-  line => 'dan is awesome',
-  #path => '/tmp/dansfile',
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/stdlib/tests/init.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/stdlib/tests/init.pp b/ambari-agent/src/main/puppet/modules/stdlib/tests/init.pp
deleted file mode 100644
index 9675d83..0000000
--- a/ambari-agent/src/main/puppet/modules/stdlib/tests/init.pp
+++ /dev/null
@@ -1 +0,0 @@
-include stdlib


[04/15] AMBARI-5747 Remove facter-1.6.10, Ruby, Puppet dependencies from pom.xml and src (dsen)

Posted by ds...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-zookeeper/templates/zookeeper_jaas.conf.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-zookeeper/templates/zookeeper_jaas.conf.erb b/ambari-agent/src/main/puppet/modules/hdp-zookeeper/templates/zookeeper_jaas.conf.erb
deleted file mode 100644
index 5a53a13..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-zookeeper/templates/zookeeper_jaas.conf.erb
+++ /dev/null
@@ -1,8 +0,0 @@
-Server {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=true
-storeKey=true
-useTicketCache=false
-keyTab="<%=scope.function_hdp_template_var("::hdp-zookeeper::params::zk_keytab_path")%>"
-principal="<%=scope.function_hdp_template_var("::hdp-zookeeper::params::zk_principal")%>";
-};

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/files/changeToSecureUid.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/files/changeToSecureUid.sh b/ambari-agent/src/main/puppet/modules/hdp/files/changeToSecureUid.sh
deleted file mode 100644
index 4872a10..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/files/changeToSecureUid.sh
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-username=$1
-directories=$2
-
-function find_available_uid() {
- for ((i=1001; i<=2000; i++))
- do
-   grep -q $i /etc/passwd
-   if [ "$?" -ne 0 ]
-   then
-    newUid=$i
-    break
-   fi
- done
-}
-
-find_available_uid
-
-if [ $newUid -eq 0 ]
-then
-  echo "Failed to find Uid between 1000 and 2000"
-  exit 1
-fi
-
-dir_array=($(echo $directories | sed 's/,/\n/g'))
-old_uid=$(id -u $username)
-echo "Changing uid of $username from $old_uid to $newUid"
-echo "Changing directory permisions for ${dir_array[@]}"
-usermod -u $newUid $username && for dir in ${dir_array[@]} ; do chown -Rh $newUid $dir ; done
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_args_as_array.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_args_as_array.rb b/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_args_as_array.rb
deleted file mode 100644
index 46becea..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_args_as_array.rb
+++ /dev/null
@@ -1,26 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#to handle differences in how args passed in
-module Puppet::Parser::Functions
-  newfunction(:hdp_args_as_array, :type => :rvalue) do |args|
-    args.kind_of?(Array) ? args : [args]
-  end
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_array_from_comma_list.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_array_from_comma_list.rb b/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_array_from_comma_list.rb
deleted file mode 100644
index 56882f3..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_array_from_comma_list.rb
+++ /dev/null
@@ -1,26 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-module Puppet::Parser::Functions
-  newfunction(:hdp_array_from_comma_list, :type => :rvalue) do |args|
-    args = [args].flatten
-    function_hdp_is_empty(args[0]) ? "" : args[0].split(",") 
-  end
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_calc_xmn_from_xms.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_calc_xmn_from_xms.rb b/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_calc_xmn_from_xms.rb
deleted file mode 100644
index e83a742..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_calc_xmn_from_xms.rb
+++ /dev/null
@@ -1,37 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-module Puppet::Parser::Functions
-  newfunction(:hdp_calc_xmn_from_xms, :type => :rvalue) do |args|
-    heapsize_orig_str = args[0].to_s
-    xmn_percent = args[1].to_f
-    xmn_max = args[2].to_i
-    heapsize_str = heapsize_orig_str.gsub(/\D/,"")
-    heapsize = heapsize_str.to_i
-    heapsize_unit = heapsize_orig_str.gsub(/\d/,"")
-    xmn_val = heapsize*xmn_percent
-    xmn_val = xmn_val.floor.to_i
-    xmn_val = xmn_val/8
-    xmn_val = xmn_val*8
-    xmn_val = xmn_val > xmn_max ? xmn_max : xmn_val
-    xmn_val_str = "" + xmn_val.to_s + heapsize_unit
-    xmn_val_str
-  end
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_comma_list_from_array.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_comma_list_from_array.rb b/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_comma_list_from_array.rb
deleted file mode 100644
index 41007a4..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_comma_list_from_array.rb
+++ /dev/null
@@ -1,25 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-module Puppet::Parser::Functions
-  newfunction(:hdp_comma_list_from_array, :type => :rvalue) do |args|
-    args[0].join(",") 
-  end
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_default.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_default.rb b/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_default.rb
deleted file mode 100644
index 7d48cab..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_default.rb
+++ /dev/null
@@ -1,50 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-module Puppet::Parser::Functions
-  newfunction(:hdp_default, :type => :rvalue) do |args|
-    args = function_hdp_args_as_array(args)
-    scoped_var_name = args[0]
-    var_parts = scoped_var_name.split("/")
-    var_name = var_parts.last    
-    default = args[1]    
-    val = lookupvar("::#{var_name}") 
-    if function_hdp_is_empty(val) == false and val.class == String
-      val = val.strip
-    end      
-    # Lookup value inside a hash map.
-    if var_parts.length > 1 and function_hdp_is_empty(val) and function_hdp_is_empty(lookupvar("::configuration")) == false and function_hdp_is_empty(lookupvar("#{var_parts[-2]}")) == false
-      keyHash = var_parts[-2]
-      hashMap = lookupvar("#{keyHash}") 
-      val = hashMap.fetch(var_name, default.to_s)
-    end
-    # To workaround string-boolean comparison issues,
-    # ensure that we return boolean result if the default value
-    # is also boolean
-    if default == true or default == false # we expect boolean value as a result
-      casted_val = (val == "true" or val == true) # converting to boolean
-    else # default
-      casted_val = val
-    end
-    function_hdp_is_empty(val) ? (default||"") : casted_val
-  end
-end
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_escape_spec_characters.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_escape_spec_characters.rb b/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_escape_spec_characters.rb
deleted file mode 100644
index a3d7ef8..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_escape_spec_characters.rb
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#to handle differences in how args passed in
-module Puppet::Parser::Functions
-  newfunction(:hdp_escape_spec_characters, :type => :rvalue) do |args|
-    pw_value = args[0]
-    pattern = /(\!|\'|\$|\)|\(|\*|\"|\.|\\)/
-    pw_value.gsub(pattern){|match|"\\"  + match}
-  end
-end
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_fail.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_fail.rb b/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_fail.rb
deleted file mode 100644
index 9241b1d..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_fail.rb
+++ /dev/null
@@ -1,27 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-module Puppet::Parser::Functions
-  newfunction(:hdp_fail) do |args|
-    args = [args].flatten
-    msg = args[0]
-    function_fail(msg)
-  end
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_first_value_from_list.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_first_value_from_list.rb b/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_first_value_from_list.rb
deleted file mode 100644
index a361419..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_first_value_from_list.rb
+++ /dev/null
@@ -1,25 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-module Puppet::Parser::Functions
-  newfunction(:hdp_first_value_from_list, :type => :rvalue) do |args|
-    args.kind_of?(Array) && args.any? ? args[0].to_s : args
-  end
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_get_dir_from_url.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_get_dir_from_url.rb b/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_get_dir_from_url.rb
deleted file mode 100644
index 1bd8ee4..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_get_dir_from_url.rb
+++ /dev/null
@@ -1,57 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-# to get directory from URL string
-module Puppet::Parser::Functions
-  newfunction(:hdp_get_dir_from_url, :type => :rvalue) do |args|
-
-    if args.length > 1
-      default = args[1]
-    else
-      default = ""
-    end
-
-    if args.empty?
-      var = default
-    else
-      if args.kind_of?(Array)
-        splitArgsResult = args[0].split(":")
-      else
-        splitArgsResult = args.split(":")
-      end
-      if splitArgsResult.length < 2
-        var = default
-      else
-        strWithDir = splitArgsResult[splitArgsResult.length - 1]
-        startIndexOfDir = strWithDir.index('/')
-        startIndexOfUri = strWithDir.index('///')
-        if startIndexOfDir == nil
-          var = default
-        else
-          if startIndexOfUri == nil
-            var = strWithDir[startIndexOfDir, strWithDir.size - 1]
-          else
-            var = strWithDir[startIndexOfDir + 2, strWithDir.size - 1]
-          end
-        end
-      end
-    end
-  end
-end
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_get_directory_from_filepath.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_get_directory_from_filepath.rb b/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_get_directory_from_filepath.rb
deleted file mode 100644
index aff6be3..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_get_directory_from_filepath.rb
+++ /dev/null
@@ -1,31 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#given file absolute path, return parent directory path
-
-module Puppet::Parser::Functions
-  newfunction(:hdp_get_directory_from_filepath, :type => :rvalue) do |args|
-    dir_path = ""
-    if args.length > 0
-      dir_path = File.split(args[0])[0]
-    end
-    dir_path
-  end
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_get_kinit_path.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_get_kinit_path.rb b/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_get_kinit_path.rb
deleted file mode 100644
index ee8e6b1..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_get_kinit_path.rb
+++ /dev/null
@@ -1,36 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#given set of paths find the first full path to knit
-module Puppet::Parser::Functions
-  newfunction(:hdp_get_kinit_path, :type => :rvalue) do |args|
-    kinit_path = ""
-    if args.length > 0
-      args.join(",").split(',').reject{|s| s.strip.length < 1}.each do |s|
-        path = File.join(s.strip, "kinit")
-        if File.exist?(path) and File.file?(path)
-          kinit_path = path
-          break
-        end
-      end
-    end
-    kinit_path
-  end
-end
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_get_major_stack_version.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_get_major_stack_version.rb b/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_get_major_stack_version.rb
deleted file mode 100644
index 859af6f..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_get_major_stack_version.rb
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#to handle differences in how args passed in
-module Puppet::Parser::Functions
-  newfunction(:hdp_get_major_stack_version, :type => :rvalue) do |args|
-    stack_version = args[0]
-    major_stack_version = stack_version.split('.')[0]
-    major_stack_version.to_i
-  end
-end
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_get_port_from_url.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_get_port_from_url.rb b/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_get_port_from_url.rb
deleted file mode 100644
index 8fff51e..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_get_port_from_url.rb
+++ /dev/null
@@ -1,44 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-# to get Port from URL string
-module Puppet::Parser::Functions
-  newfunction(:hdp_get_port_from_url, :type => :rvalue) do |args|
-    def is_numeric?(s)
-       !!Integer(s) rescue false
-    end
-
-    var = args.empty? ? "" : args.kind_of?(Array) ? args[0].split(":")[1] : args.split(":")[1]
-    
-    if function_hdp_is_empty(var)
-       if args.kind_of?(Array)
-          if args.length > 1
-             var = args[1]        
-          else 
-             is_numeric?(args[0]) ? args[0] : ""
-          end
-       else 
-          is_numeric?(args) ? args : "";
-       end 
-    else 
-       var
-    end
-  end
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_get_value_from_map.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_get_value_from_map.rb b/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_get_value_from_map.rb
deleted file mode 100644
index f1a9b2a..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_get_value_from_map.rb
+++ /dev/null
@@ -1,37 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-module Puppet::Parser::Functions
-  newfunction(:hdp_get_value_from_map, :type => :rvalue) do |args|
-    args = function_hdp_args_as_array(args)
-    hashMap = args[0]
-    key = args[1]
-    default = args[2]
-    val = hashMap.fetch(key, default.to_s)
-    if default == true or default == false
-      casted_val = (val == "true" or val == true) # converting to boolean
-    else # default
-      casted_val = val
-    end
-    function_hdp_is_empty(val) ? (default||"") : casted_val
-  end
-end
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_host.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_host.rb b/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_host.rb
deleted file mode 100644
index 00ecb2b..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_host.rb
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-module Puppet::Parser::Functions
-  newfunction(:hdp_host, :type => :rvalue) do |args|
-    args = function_hdp_args_as_array(args)
-    var = args[0]
-    val = lookupvar("::"+var)
-    function_hdp_is_empty(val) ? "" : val 
-  end
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_host_attribute.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_host_attribute.rb b/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_host_attribute.rb
deleted file mode 100644
index be6ddda..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_host_attribute.rb
+++ /dev/null
@@ -1,49 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-
-module Puppet::Parser::Functions
-
-=begin
-  This function returns value of an attribute for a given host
-  or an array of attributes for a given array of hosts (one-to-one mapping).
-  The attribute type is specified by a string identifier (like "publicfqdn").
-=end
-
-  newfunction(:hdp_host_attribute, :type => :rvalue) do |args|
-    args = function_hdp_args_as_array(args)
-    hash,attr,source = args
-    ret_val = lambda do |hash,attr,s|
-      ret = ""
-      ndx = hash[s]
-      unless function_hdp_is_empty(ndx)
-        val = ndx[attr]
-        ret = function_hdp_is_empty(val) ? "" : val
-      end
-      ret
-    end
-    if source.kind_of?(Array)
-      source.map{|s|ret_val.call(hash,attr,s)}
-    else
-     ret_val.call(hash,attr,source)
-    end
-  end
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_is_empty.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_is_empty.rb b/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_is_empty.rb
deleted file mode 100644
index f57f8eb..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_is_empty.rb
+++ /dev/null
@@ -1,27 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-module Puppet::Parser::Functions
-  newfunction(:hdp_is_empty, :type => :rvalue) do |args|
-    args = function_hdp_args_as_array(args)
-    el = args[0]
-    el.nil? or (el.respond_to?(:to_s) and ["undefined","undef",""].include?(el.to_s))
-  end
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_no_hosts.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_no_hosts.rb b/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_no_hosts.rb
deleted file mode 100644
index 6c1a988..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_no_hosts.rb
+++ /dev/null
@@ -1,27 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-module Puppet::Parser::Functions
-  newfunction(:hdp_no_hosts, :type => :rvalue) do |args|
-    args = function_hdp_args_as_array(args)
-    var = args[0]
-    function_hdp_is_empty(function_hdp_host(var))
-  end
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_option_value.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_option_value.rb b/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_option_value.rb
deleted file mode 100644
index 1348879..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_option_value.rb
+++ /dev/null
@@ -1,32 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-module Puppet::Parser::Functions
-  newfunction(:hdp_option_value, :type => :rvalue) do |args|
-    args = [args].flatten
-    opts = args[0]
-    key = args[1]
-    if opts.kind_of?(Hash) and not function_hdp_is_empty(key)
-      opts[key]||:undef
-    else
-      :undef
-    end
-  end
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_set_from_comma_list.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_set_from_comma_list.rb b/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_set_from_comma_list.rb
deleted file mode 100644
index 8790fde..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_set_from_comma_list.rb
+++ /dev/null
@@ -1,34 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#to handle differences in how args passed in
-require 'set'
-module Puppet::Parser::Functions
-  newfunction(:hdp_set_from_comma_list, :type => :rvalue) do |args|
-    dir_list = args[0]
-    reject_items = args[1].nil? ? [] : function_hdp_array_from_comma_list(args[1])
-
-    list = function_hdp_array_from_comma_list(dir_list)
-    list.each_index {|i| list [i]=list [i].strip}
-    #Delete empty strings
-    list.reject! { |e| e.empty? or reject_items.include?(e) }
-    list.uniq   
-  end
-end
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_str_ends_with.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_str_ends_with.rb b/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_str_ends_with.rb
deleted file mode 100644
index d20f515..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_str_ends_with.rb
+++ /dev/null
@@ -1,32 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-module Puppet::Parser::Functions
-  newfunction(:hdp_str_ends_with, :type => :rvalue) do |args|
-    args = function_hdp_args_as_array(args)
-    if args.size >= 2
-      str = args[0]
-      substr = args[1]
-      result = str.end_with?(substr)
-    else 
-      result = false
-    end
-  end
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_template_var.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_template_var.rb b/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_template_var.rb
deleted file mode 100644
index 78a84a5..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_template_var.rb
+++ /dev/null
@@ -1,38 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-module Puppet::Parser::Functions
-  newfunction(:hdp_template_var, :type => :rvalue) do |args|
-    args = [args].flatten
-    qualified_var = args[0]
-    unless qualified_var =~ /^::/
-      #module_name = lookupvar("module_name")||"UNKNOWN"
-      #qualified_var = "::#{module_name}::params::#{args[0]}"
-      component = lookupvar("component")||"UNKNOWN"
-      module_name = (component == "base" ? "::hdp" : "::hdp-#{component}")      
-      qualified_var = "#{module_name}::params::#{args[0]}"
-    end
-    val = lookupvar(qualified_var)
-    if function_hdp_is_empty(val) == false and val.class == String
-      val = val.strip
-    end  
-    (val.nil? or val == :undefined) ? "" : val 
-  end
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_to_lowercase.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_to_lowercase.rb b/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_to_lowercase.rb
deleted file mode 100644
index 25e2eaa..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_to_lowercase.rb
+++ /dev/null
@@ -1,48 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-module Puppet::Parser::Functions
-  newfunction(:hdp_to_lowercase, :type => :rvalue, :doc => <<-EOS
-Converts the case of a string or all strings in an array to lower case.
-    EOS
-  ) do |arguments|
-
-    raise(Puppet::ParseError, "downcase(): Wrong number of arguments " +
-      "given (#{arguments.size} for 1)") if arguments.size < 1
-
-    value = arguments[0]
-    klass = value.class
-
-    unless [Array, String].include?(klass)
-      raise(Puppet::ParseError, 'downcase(): Requires either ' +
-        'array or string to work with')
-    end
-
-    if value.is_a?(Array)
-      # Numbers in Puppet are often string-encoded which is troublesome ...
-      result = value.collect { |i| i.is_a?(String) ? i.downcase : i }
-    else
-      result = value.downcase
-    end
-
-    return result
-  end
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_unique_id_and_date.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_unique_id_and_date.rb b/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_unique_id_and_date.rb
deleted file mode 100644
index 01179e1..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_unique_id_and_date.rb
+++ /dev/null
@@ -1,27 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-module Puppet::Parser::Functions
-  newfunction(:hdp_unique_id_and_date, :type => :rvalue) do 
-    id = lookupvar('::uniqueid')
-    date = `date +"%M%d%y"`.chomp
-    "id#{id}_date#{date}"
-  end
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_user.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_user.rb b/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_user.rb
deleted file mode 100644
index a858fbb..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_user.rb
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-module Puppet::Parser::Functions
-  newfunction(:hdp_user, :type => :rvalue) do |args|
-    args = [args].flatten
-    user = args[0]
-    val = lookupvar("::hdp::params::#{user}")
-    (val.nil? or val == :undefined) ? "" : val 
-  end
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/manifests/.directory
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/manifests/.directory b/ambari-agent/src/main/puppet/modules/hdp/manifests/.directory
deleted file mode 100644
index 6f816d7..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/manifests/.directory
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License
-
-[Dolphin]
-Timestamp=2011,3,16,9,26,14
-ViewMode=1

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/manifests/configfile.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/manifests/configfile.pp b/ambari-agent/src/main/puppet/modules/hdp/manifests/configfile.pp
deleted file mode 100644
index b7db742..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/manifests/configfile.pp
+++ /dev/null
@@ -1,98 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-define hdp::configfile(
-  $component,
-  $conf_dir = undef, #if this is undef then name is of form conf_dir/file_name
-  $owner = undef, 
-  $group = $hdp::params::user_group,
-  $mode = undef,
-  $size = 64, #32 or 64 bit (used to pick appropriate java_home)
-  $template_tag = undef,
-  $namenode_host = $hdp::params::namenode_host,
-  $jtnode_host = $hdp::params::jtnode_host,
-  $snamenode_host = $hdp::params::snamenode_host,
-  $rm_host = $hdp::params::rm_host,
-  $nm_hosts = $hdp::params::nm_hosts,
-  $hs_host = $hdp::params::hs_host,
-  $slave_hosts = $hdp::params::slave_hosts,
-  $journalnode_hosts = $hdp::params::journalnode_hosts,
-  $zkfc_hosts = $hdp::params::zkfc_hosts,
-  $mapred_tt_hosts = $hdp::params::mapred_tt_hosts,
-  $all_hosts = $hdp::params::all_hosts,
-  $hbase_rs_hosts = $hdp::params::hbase_rs_hosts,
-  $zookeeper_hosts = $hdp::params::zookeeper_hosts,
-  $flume_hosts = $hdp::params::flume_hosts,
-  $hbase_master_hosts = $hdp::params::hbase_master_hosts,
-  $hcat_server_host = $hdp::params::hcat_server_host,
-  $hive_server_host = $hdp::params::hive_server_host,
-  $oozie_server = $hdp::params::oozie_server,
-  $webhcat_server_host = $hdp::params::webhcat_server_host,
-  $hcat_mysql_host = $hdp::params::hcat_mysql_host,
-  $nagios_server_host = $hdp::params::nagios_server_host,
-  $ganglia_server_host = $hdp::params::ganglia_server_host,
-  $dashboard_host = $hdp::params::dashboard_host,
-  $gateway_host = $hdp::params::gateway_host,
-  $public_namenode_host = $hdp::params::public_namenode_host,
-  $public_snamenode_host = $hdp::params::public_snamenode_host,
-  $public_rm_host = $hdp::params::public_rm_host,
-  $public_nm_hosts = $hdp::params::public_nm_hosts,
-  $public_hs_host = $hdp::params::public_hs_host,
-  $public_journalnode_hosts = $hdp::params::public_journalnode_hosts,
-  $public_zkfc_hosts = $hdp::params::public_zkfc_hosts,
-  $public_jtnode_host = $hdp::params::public_jtnode_host,
-  $public_hbase_master_hosts = $hdp::params::public_hbase_master_hosts,
-  $public_zookeeper_hosts = $hdp::params::public_zookeeper_hosts,
-  $public_ganglia_server_host = $hdp::params::public_ganglia_server_host,
-  $public_nagios_server_host = $hdp::params::public_nagios_server_host,
-  $public_dashboard_host = $hdp::params::public_dashboard_host,
-  $public_hive_server_host = $hdp::params::public_hive_server_host,
-  $public_oozie_server = $hdp::params::public_oozie_server,
-  $public_webhcat_server_host = $hdp::params::public_webhcat_server_host
-) 
-{
-
-   if ($conf_dir == undef) {
-     $qualified_file_name = $name
-     $file_name = regsubst($name,'^.+/([^/]+$)','\1')
-   } else {
-     $qualified_file_name = "${conf_dir}/${name}"
-     $file_name = $name
-   }
-   if ($component == 'base') {
-     $module = 'hdp'
-   } else {
-      $module = "hdp-${component}"   
-   }
-
-   if ($template_tag == undef) {  
-     $template_name = "${module}/${file_name}.erb"
-   } else {
-     $template_name = "${module}/${file_name}-${template_tag}.erb"
-   }
-
-   file{ $qualified_file_name:
-     ensure  => present,
-     owner   => $owner,
-     group   => $group,
-     mode    => $mode,
-     content => template($template_name)
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/manifests/download_keytabs.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/manifests/download_keytabs.pp b/ambari-agent/src/main/puppet/modules/hdp/manifests/download_keytabs.pp
deleted file mode 100644
index e404c9e..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/manifests/download_keytabs.pp
+++ /dev/null
@@ -1,43 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-define hdp::download_keytab(
-  $masterhost,
-  $keytabdst,
-  $keytabfile,
-  $owner,
-  $group = undef,
-  $mode = '0400',
-  $hostnameInPrincipals = 'yes'
-)
-{
-  $hostname = $hdp::params::hostname
-  if ($hostnameInPrincipals == 'yes') {
-    $keytabsrc = "puppet://${masterhost}/modules/keytabs/${hostname}.${keytabfile}"
-  } else {
-    $keytabsrc = "puppet://${masterhost}/modules/keytabs/${keytabfile}"
-  }
-  file { $keytabdst :
-    ensure => present,
-    source => $keytabsrc,
-    mode => $mode,
-    owner => $owner,
-    group => $group
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/manifests/init.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/manifests/init.pp b/ambari-agent/src/main/puppet/modules/hdp/manifests/init.pp
deleted file mode 100644
index d907130..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/manifests/init.pp
+++ /dev/null
@@ -1,528 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp(
-  $service_state = undef,
-  $pre_installed_pkgs = undef
-)
-{
-
-  import 'params.pp'
-  include hdp::params
-
-  Exec { logoutput => 'on_failure' }
-  
-  hdp::group { 'hdp_user_group':
-    group_name => $hdp::params::user_group,
-  }
-
- ## Port settings
-  if has_key($configuration, 'hdfs-site') {
-    $hdfs-site = $configuration['hdfs-site']
-
-    if (hdp_get_major_stack_version($stack_version) >= 2) {
-      $namenode_port = hdp_get_port_from_url($hdfs-site["dfs.namenode.http-address"])
-      $snamenode_port = hdp_get_port_from_url($hdfs-site["dfs.namenode.secondary.http-address"])
-    } else {
-      $namenode_port = hdp_get_port_from_url($hdfs-site["dfs.http.address"])
-      $snamenode_port = hdp_get_port_from_url($hdfs-site["dfs.secondary.http.address"])
-    }
-
-    $datanode_port = hdp_get_port_from_url($hdfs-site["dfs.datanode.http.address"])
-    $journalnode_port = hdp_get_port_from_url($hdfs-site["dfs.journalnode.http-address"])
-  } else {
-    $namenode_port = "50070"
-    $snamenode_port = "50090"
-    $datanode_port = "50075"
-    $journalnode_port = "8480"
-  }
-
-  if has_key($configuration, 'mapred-site') {
-    $mapred-site = $configuration['mapred-site']
-
-    if (hdp_get_major_stack_version($stack_version) >= 2) {
-      $jtnode_port = hdp_get_port_from_url($mapred-site["mapreduce.jobtracker.http.address"],"50030")
-      $tasktracker_port = hdp_get_port_from_url($mapred-site["mapreduce.tasktracker.http.address"],"50060")
-    } else {
-      $jtnode_port = hdp_get_port_from_url($mapred-site["mapred.job.tracker.http.address"],"50030")
-      $tasktracker_port = hdp_get_port_from_url($mapred-site["mapred.task.tracker.http.address"],"50060")
-    }
-    $jobhistory_port = hdp_get_port_from_url($mapred-site["mapreduce.history.server.http.address"],"51111")
-
-    $hs_port = hdp_get_port_from_url($mapred-site["mapreduce.jobhistory.webapp.address"],"19888")
-  }
-
-  if has_key($configuration, 'yarn-site') {
-    $yarn-site = $configuration['yarn-site']
-    $rm_port = hdp_get_port_from_url($yarn-site["yarn.resourcemanager.webapp.address"],"8088")
-    $rm_https_port = hdp_get_port_from_url($yarn-site["yarn.resourcemanager.webapp.https.address"],"8090")
-    $nm_port = hdp_get_port_from_url($yarn-site["yarn.nodemanager.webapp.address"],"8042")
-  }
-
-  $hbase_master_port = hdp_default("hbase-site/hbase.master.info.port","60010")
-  $hbase_rs_port = hdp_default("hbase-site/hbase.regionserver.info.port","60030")
-  ## Nagios check_tcp port
-  $hbase_master_rpc_port = hdp_default("hbase-site/hbase.master.port", "60000")
-  
-  $ganglia_port = hdp_default("ganglia_port","8651")
-  $ganglia_collector_slaves_port = hdp_default("ganglia_collector_slaves_port","8660")
-  $ganglia_collector_namenode_port = hdp_default("ganglia_collector_namenode_port","8661")
-  $ganglia_collector_jobtracker_port = hdp_default("ganglia_collector_jobtracker_port","8662")
-  $ganglia_collector_hbase_port = hdp_default("ganglia_collector_hbase_port","8663")
-  $ganglia_collector_rm_port = hdp_default("ganglia_collector_rm_port","8664")
-  $ganglia_collector_nm_port = hdp_default("ganglia_collector_nm_port","8660")
-  $ganglia_collector_hs_port = hdp_default("ganglia_collector_hs_port","8666")
-
-  $oozie_server_port = hdp_default("oozie_server_port","11000")
-
-  $templeton_port = hdp_default("webhcat-site/templeton.port","50111")
-
-  $namenode_metadata_port = hdp_default("namenode_metadata_port","8020")
-
-  $changeUid_path = "/tmp/changeUid.sh"
-
-  file { $changeUid_path :
-    ensure => present,
-    source => "puppet:///modules/hdp/changeToSecureUid.sh",
-    mode => '0755'
-  }
-  
-  #TODO: think not needed and also there seems to be a puppet bug around this and ldap
-  class { 'hdp::snmp': service_state => 'running'}
-
-  class { 'hdp::create_smoke_user': }
-
-  if ($pre_installed_pkgs != undef) {
-    class { 'hdp::pre_install_pkgs': }
-  }
-
-  #turns off selinux
-  class { 'hdp::set_selinux': }
-
-  if ($service_state != 'uninstalled') {
-    if ($hdp::params::lzo_enabled == true) {
-      @hdp::lzo::package{ 32:}
-      @hdp::lzo::package{ 64:}
-    }
-    if ($hdp::params::security_enabled) {
-      hdp::package{ 'unzip':
-        ensure       => 'present',
-        size         => $size,
-        java_needed  => false,
-        lzo_needed   => false
-      }
-    }
-  }
-
-  #TODO: treat consistently 
-  if ($service_state != 'uninstalled') {
-    if ($hdp::params::snappy_enabled == true) {
-      include hdp::snappy::package
-    }
-  }
-
-  Hdp::Package<|title == 'hadoop 32'|> ->   Hdp::Package<|title == 'hbase'|>
-  Hdp::Package<|title == 'hadoop 64'|> ->   Hdp::Package<|title == 'hbase'|>
-
-  hdp::package{ 'glibc':
-    ensure       => 'present',
-    size         => $size,
-    java_needed  => false,
-    lzo_needed   => false
-  }
-
-    anchor{'hdp::begin':}
-    anchor{'hdp::end':}
-
-    ##Create all users for all components presents in cluster
-    if ($hdp::params::hbase_master_hosts != "") {
-      class { 'hdp::create_hbase_user': }
-    }
-    
-    if ($hdp::params::nagios_server_host != "") {
-	    hdp::group { 'nagios_group':
-	      group_name => $hdp::params::nagios_group,
-	    }
-
-      hdp::user{ 'nagios_user':
-        user_name => $hdp::params::nagios_user,
-        gid => $hdp::params::nagios_group
-      }
-
-      Anchor['hdp::begin'] -> Hdp::Group['nagios_group'] -> Hdp::User['nagios_user'] -> Anchor['hdp::end']
-    }
-
-    if ($hdp::params::oozie_server != "") {
-      hdp::user{ 'oozie_user':
-        user_name => $hdp::params::oozie_user
-      }
-
-      Anchor['hdp::begin'] -> Hdp::Group['hdp_user_group'] -> Hdp::User['oozie_user'] -> Anchor['hdp::end']  
-    }
-
-    if ($hdp::params::hcat_server_host != "") {
-      hdp::user{ 'webhcat_user':
-        user_name => $hdp::params::webhcat_user
-      }
-
-      if ($hdp::params::webhcat_user != $hdp::params::hcat_user) {
-        hdp::user { 'hcat_user':
-          user_name => $hdp::params::hcat_user
-        }
-      }
-
-      Anchor['hdp::begin'] -> Hdp::Group['hdp_user_group'] -> Hdp::User<|title == 'webhcat_user' or title == 'hcat_user'|> -> Anchor['hdp::end'] 
-    }
-
-    if ($hdp::params::hive_server_host != "") {
-      hdp::user{ 'hive_user':
-        user_name => $hdp::params::hive_user
-      }
-
-      Anchor['hdp::begin'] -> Hdp::Group['hdp_user_group'] -> Hdp::User['hive_user'] -> Anchor['hdp::end']  
-    }
-
-    if ($hdp::params::rm_host != "") {
-      hdp::user { 'yarn_user':
-        user_name => $hdp::params::yarn_user
-      }
-      
-      Anchor['hdp::begin'] -> Hdp::Group['hdp_user_group'] -> Hdp::User['yarn_user'] -> Anchor['hdp::end']
-    }
-
-}
-
-class hdp::pre_install_pkgs
-{
-
-  if ($service_state == 'installed_and_configured') {
-    hdp::exec{ 'yum install $pre_installed_pkgs':
-       command => "yum install -y $pre_installed_pkgs"
-    }
-  } elsif ($service_state == 'uninstalled') {
-    hdp::exec{ 'yum erase $pre_installed_pkgs':
-       command => "yum erase -y $pre_installed_pkgs"
-    }
-  }
-}
-
-class hdp::create_hbase_user()
-{
-  $hbase_user = $hdp::params::hbase_user
-
-  hdp::user{ 'hbase_user':
-    user_name => $hbase_user,
-    groups => [$hdp::params::user_group]
-  }
-
-  ## Set hbase user uid to > 1000
-  $cmd_set_hbase_uid_check = "test $(id -u ${hbase_user}) -gt 1000"
-  $hbase_user_dirs = "/home/${hbase_user},/tmp/${hbase_user},/usr/bin/${hbase_user}"
-
-  hdp::set_uid { 'set_hbase_user_uid':
-    user      => $hbase_user,
-    user_dirs => $hbase_user_dirs,
-    unless    => $cmd_set_hbase_uid_check
-  }
-
-  Group['hdp_user_group'] -> Hdp::User['hbase_user'] -> Hdp::Set_uid['set_hbase_user_uid']
-}
-
-class hdp::create_smoke_user()
-{
-
-  $smoke_group = $hdp::params::smoke_user_group
-  $smoke_user = $hdp::params::smokeuser
-  $security_enabled = $hdp::params::security_enabled
-
-  hdp::group { 'smoke_group':
-    group_name => $smoke_group,
-  }
-  
-	hdp::group { 'proxyuser_group':
-	  group_name => $proxyuser_group,
-	}
-  
-  hdp::user { 'smoke_user':
-    user_name => $smoke_user,
-    gid    => $hdp::params::user_group,
-    groups => ["$proxyuser_group"]
-  }
-
-  ## Set smoke user uid to > 1000 for enable security feature
-  $smoke_user_dirs = "/tmp/hadoop-${smoke_user},/tmp/hsperfdata_${smoke_user},/home/${smoke_user},/tmp/${smoke_user},/tmp/sqoop-${smoke_user}"
-  $cmd_set_uid_check = "test $(id -u ${smoke_user}) -gt 1000"
-
-  hdp::set_uid { 'set_smoke_user_uid':
-    user      => $smoke_user,
-    user_dirs => $smoke_user_dirs,
-    unless    => $cmd_set_uid_check
-  }
-
-  Hdp::Group<|title == 'smoke_group' or title == 'proxyuser_group'|> ->
-  Hdp::User['smoke_user'] -> Hdp::Set_uid['set_smoke_user_uid']
-}
-
-
-class hdp::set_selinux()
-{
- $cmd = "/bin/echo 0 > /selinux/enforce"
- hdp::exec{ $cmd:
-    command => $cmd,
-    unless => "head -n 1 /selinux/enforce | grep ^0$",
-    onlyif => "test -f /selinux/enforce"
- }
-}
-
-define hdp::group(
-  $group_name = undef
-)
-{
-  if($hdp::params::defined_groups[$group_name]!="defined"){
-    group { $name:
-      name => $group_name,
-      ensure => present   
-    }
-    
-    $hdp::params::defined_groups[$group_name] = "defined"
-  }
-}
-
-define hdp::user(
-  $user_name = undef,
-  $gid = $hdp::params::user_group,
-  $just_validate = undef,
-  $groups = undef,
-  $uid = undef
-)
-{
-  $user_info = $hdp::params::user_info[$user_name]
-  
-  if ($just_validate != undef) {
-    $just_val  = $just_validate
-  } elsif (($user_info == undef) or ("|${user_info}|" == '||')){ #tests for different versions of Puppet
-    $just_val = false
-  } else {
-    $just_val = $user_info[just_validate]
-  }
-  
-  if ($just_val == true) {
-    exec { "user ${name} exists":
-      command => "su - ${user_name} -c 'ls /dev/null' >/dev/null 2>&1",
-      path    => ['/bin']
-    }
-  } else {
-      if(!defined(User[$user_name])){
-        user { $user_name:
-          ensure     => present,
-          managehome => true,
-          gid        => $gid, #TODO either remove this to support LDAP env or fix it
-          shell      => '/bin/bash',
-          groups     => $groups,
-          uid        => $uid
-        }
-      } else {
-        User <| $name == $user_name |> {
-          groups +> $groups
-        }
-      }
-  }
-}
-
-     
-define hdp::directory(
-  $owner = undef,
-  $group = $hdp::params::user_group,
-  $mode  = undef,
-  $ensure = directory,
-  $force = undef,
-  $links = 'follow',
-  $service_state = 'running',
-  $override_owner = false
-  )
-{
- if (($service_state == 'uninstalled') and ($wipeoff_data == true)) {
-  file { $name :
-    ensure => absent,
-    owner  => $owner,
-    group  => $group,
-    mode   => $mode,
-    links  => $links,
-    force  => $force
-   }
-  } elsif ($service_state != 'uninstalled') {
-    if $override_owner == true {
-      file { $name :
-      ensure => $ensure,
-      owner  => $owner,
-      group  => $group,
-      links  => $links,
-      mode   => $mode,
-      force  => $force
-     }
-    } else {
-      file { $name :
-      ensure => $ensure,
-      links  => $links,
-      mode   => $mode,
-      force  => $force
-     }
-    }
-  }
-}
-#TODO: check on -R flag and use of recurse
-define hdp::directory_recursive_create(
-  $owner = undef,
-  $group = $hdp::params::user_group,
-  $mode = undef,
-  $context_tag = undef,
-  $ensure = directory,
-  $force = undef,
-  $service_state = 'running',
-  $override_owner = true
-  )
-{
-
-  hdp::exec {"mkdir -p ${name}" :
-    command => "mkdir -p ${name}",
-    creates => $name
-  }
-  #to take care of setting ownership and mode
-  hdp::directory { $name :
-    owner => $owner,
-    group => $group,
-    mode  => $mode,
-    ensure => $ensure,
-    force => $force,
-    service_state => $service_state,
-    override_owner => $override_owner
-  }
-  Hdp::Exec["mkdir -p ${name}"] -> Hdp::Directory[$name]
-}
-
-define hdp::directory_recursive_create_ignore_failure(
-  $owner = undef,
-  $group = $hdp::params::user_group,
-  $mode = undef,
-  $context_tag = undef,
-  $ensure = directory,
-  $force = undef,
-  $service_state = 'running'
-  )
-{
-  hdp::exec {"mkdir -p ${name} ; exit 0" :
-    command => "mkdir -p ${name} ; exit 0",
-    creates => $name
-  }
-    hdp::exec {"chown ${owner}:${group} ${name}; exit 0" :
-    command => "chown ${owner}:${group} ${name}; exit 0"
-  }
-    hdp::exec {"chmod ${mode} ${name} ; exit 0" :
-    command => "chmod ${mode} ${name} ; exit 0"
-  }
-  Hdp::Exec["mkdir -p ${name} ; exit 0"] -> Hdp::Exec["chown ${owner}:${group} ${name}; exit 0"] -> Hdp::Exec["chmod ${mode} ${name} ; exit 0"]
-}
-
-### helper to do exec
-define hdp::exec(
-  $command,
-  $refreshonly = undef,
-  $unless = undef,
-  $onlyif = undef,
-  $path = $hdp::params::exec_path,
-  $user = undef,
-  $creates = undef,
-  $tries = 1,
-  $timeout = 300,
-  $try_sleep = undef,
-  $initial_wait = undef,
-  $logoutput = 'on_failure',
-  $cwd = undef
-)
-{
-     
-
-
-  if (($initial_wait != undef) and ($initial_wait != "undef")) {
-    #passing in creates and unless so dont have to wait if condition has been acheived already
-    hdp::wait { "service ${name}" : 
-      wait_time => $initial_wait,
-      creates   => $creates,
-      unless    => $unless,
-      onlyif    => $onlyif,
-      path      => $path
-    }
-  }
-  
-  exec { $name :
-    command     => $command,
-    refreshonly => $refreshonly,
-    path        => $path,
-    user        => $user,
-    creates     => $creates,
-    unless      => $unless,
-    onlyif      => $onlyif,
-    tries       => $tries,
-    timeout     => $timeout,
-    try_sleep   => $try_sleep,
-    logoutput   => $logoutput,
-    cwd         => $cwd
-  }
-
-  hdp::java::package{ $name:}
-  
-  anchor{ "hdp::exec::${name}::begin":} -> Hdp::Java::Package[$name] -> Exec[$name] -> anchor{ "hdp::exec::${name}::end":}
-  if (($initial_wait != undef) and ($initial_wait != "undef")) {
-    Anchor["hdp::exec::${name}::begin"] -> Hdp::Wait["service ${name}"] -> Hdp::Java::Package[$name] -> Exec[$name]
-  }
-}
-
-#### utilities for waits
-define hdp::wait(
-  $wait_time,
-  $creates = undef,
-  $unless = undef,
-  $onlyif = undef,
-  $path = undef #used for unless
-)   
-{
-  exec { "wait ${name} ${wait_time}" :
-    command => "/bin/sleep ${wait_time}",
-    creates => $creates,
-    unless  => $unless,
-    onlyif  => $onlyif,
-    path    => $path
-  } 
-}
-
-define hdp::set_uid(
-  $user = undef,
-  $user_dirs = undef,
-  $unless = undef
-)
-{
-  $cmd_set_uid = "/tmp/changeUid.sh ${user} ${user_dirs} 2>/dev/null"
-
-  hdp::exec{ $cmd_set_uid:
-    command => $cmd_set_uid,
-    unless  => $unless
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/manifests/java/jce/package.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/manifests/java/jce/package.pp b/ambari-agent/src/main/puppet/modules/hdp/manifests/java/jce/package.pp
deleted file mode 100644
index 41b8bc9..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/manifests/java/jce/package.pp
+++ /dev/null
@@ -1,62 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-define hdp::java::jce::package(
-  $java_home_dir, $jdk_location, $jdk_bin
-)
-{
-  include hdp::params
-
-  $jce_policy_zip = $hdp::params::jce_policy_zip
-  $artifact_dir = $hdp::params::artifact_dir
-  $jce_curl_target = "${artifact_dir}/${jce_policy_zip}"
-
-  if $jdk_location != '' and $jdk_bin != ''  {
-    $jce_location = regsubst($jdk_location, $jdk_bin, '')
-  } else {
-    $jce_location = $hdp::params::jce_location
-  }
-
-  #TODO:SUHAS how to avoid redownload and install if correct version already present.
-  # may be check the file sizes for local_policy and export_US policy jars? 
-  # UNLESS  => "test -e ${java_exec}"
-  # curl -k - ignoring unverified server ssl sertificate,
-  $curl_cmd = "mkdir -p ${artifact_dir}; curl -kf --retry 10 ${jce_location}/${jce_policy_zip} -o ${jce_curl_target}; echo 0"
-  exec{ "jce-download ${name}":
-    command => $curl_cmd,
-    creates => $jce_curl_target,
-    path    => ["/bin","/usr/bin/"],
-    unless => "test -e ${jce_curl_target}"
-  }
-
-  $security_dir = "${java_home_dir}/jre/lib/security"
-  $cmd = "rm -f local_policy.jar; rm -f US_export_policy.jar; unzip -o -j -q ${jce_curl_target}" 
-  exec { "jce-install ${name}":
-    command => $cmd,
-    onlyif  => "test -e ${security_dir} && test -f ${jce_curl_target}",
-    cwd     => $security_dir,
-    path    => ['/bin/','/usr/bin']
-  }
-
-  #TODO: SUHAS add ensure code to check local and us export policy files exist -> File["${java_exec} ${name}"]
-
-  anchor{"hdp::java::jce::package::${name}::begin":} -> Exec["jce-download ${name}"] ->  Exec["jce-install ${name}"] -> anchor{"hdp::java::jce::package::${name}::end":}
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/manifests/java/package.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/manifests/java/package.pp b/ambari-agent/src/main/puppet/modules/hdp/manifests/java/package.pp
deleted file mode 100644
index 1b748f6..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/manifests/java/package.pp
+++ /dev/null
@@ -1,77 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-define hdp::java::package()
-{
-    
-  include hdp::params
-  
-  $security_enabled = $hdp::params::security_enabled
-  $jdk_bin = $hdp::params::jdk_name
-  $artifact_dir = $hdp::params::artifact_dir
-  $jdk_location = $hdp::params::jdk_location
-  $jdk_curl_target = "${artifact_dir}/${jdk_bin}"
- 
-  $java_home = $hdp::params::java64_home
-  $java_exec = "${java_home}/bin/java"
-  $java_dir = regsubst($java_home,'/[^/]+$','')
-
-  if ($jdk_bin != "") {
-    # curl -k - ignoring unverified server ssl sertificate,
-    $curl_cmd = "mkdir -p ${artifact_dir} ; curl -kf --retry 10 ${jdk_location}/${jdk_bin} -o ${jdk_curl_target}"
-    exec{ "${curl_cmd} ${name}":
-      command => $curl_cmd,
-      creates => $jdk_curl_target,
-      path    => ["/bin","/usr/bin/"],
-      unless  => "test -e ${java_exec}"
-    }
-
-    if (hdp_str_ends_with($jdk_bin, ".bin")) {
-      $install_cmd = "mkdir -p ${java_dir} ; chmod +x ${jdk_curl_target}; cd ${java_dir} ; echo A | ${jdk_curl_target} -noregister > /dev/null 2>&1"
-    }
-    elsif (hdp_str_ends_with($jdk_bin, ".gz")) {
-      $install_cmd = "mkdir -p ${java_dir} ; cd ${java_dir} ; tar -xf ${jdk_curl_target} > /dev/null 2>&1"
-    }
-
-    exec{ "${install_cmd} ${name}":
-      command => $install_cmd,
-      unless  => "test -e ${java_exec}",
-      path    => ["/bin","/usr/bin/"]
-    }
-
-    exec{ "${java_exec} ${name}":
-      command => "test -e ${java_exec}",
-      path    => ["/bin","/usr/bin/"]
-    }
-
-    if ($security_enabled == true) {
-      hdp::java::jce::package{ $name:
-        java_home_dir  => $java_home,
-        jdk_location => $jdk_location,
-        jdk_bin => $jdk_bin
-      }
-    }
-
-    anchor{"hdp::java::package::${name}::begin":} -> Exec["${curl_cmd} ${name}"] ->  Exec["${install_cmd} ${name}"] -> Exec["${java_exec} ${name}"] ->  anchor{"hdp::java::package::${name}::end":}
-    if ($security_enabled == true) {
-      Exec["${java_exec} ${name}"] -> Hdp::Java::Jce::Package[$name]
-   }
- }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/manifests/lzo/package.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/manifests/lzo/package.pp b/ambari-agent/src/main/puppet/modules/hdp/manifests/lzo/package.pp
deleted file mode 100644
index fe5a764..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/manifests/lzo/package.pp
+++ /dev/null
@@ -1,36 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-define hdp::lzo::package()
-{
-  $size = $name
-
-  $pkg_type = "lzo"
-
-  hdp::package {"lzo ${size}":
-    package_type  => "${pkg_type}",
-    size          => $size,
-    java_needed   => false
-  }
-
-  $anchor_beg = "hdp::lzo::package::${size}::begin"
-  $anchor_end = "hdp::lzo::package::${size}::end"
-  anchor{$anchor_beg:} ->  Hdp::Package["lzo ${size}"] -> anchor{$anchor_end:}
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/manifests/namenode-conn.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/manifests/namenode-conn.pp b/ambari-agent/src/main/puppet/modules/hdp/manifests/namenode-conn.pp
deleted file mode 100644
index 268e213..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/manifests/namenode-conn.pp
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp::namenode-conn($namenode_host)
-{
-  Hdp::Configfile<||>{namenode_host => $namenode_host}
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp/manifests/package.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/manifests/package.pp b/ambari-agent/src/main/puppet/modules/hdp/manifests/package.pp
deleted file mode 100644
index 0ed383e..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp/manifests/package.pp
+++ /dev/null
@@ -1,156 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-define hdp::package(
-  $ensure = present,
-  $package_type = undef,
-  $size = 64,
-  $java_needed = true,
-  $lzo_needed = false
-  )
-{
-
-  $pt = $package_type ? {
-    undef  => $name,
-    default  => $package_type
-  }
-  
-  hdp::package::process_pkg { $name:
-    ensure       => $ensure,
-    package_type => $pt,
-    size         => $size,
-    java_needed  => $java_needed,
-    lzo_needed   => $lzo_needed
-  }
-}
-
-define hdp::package::process_pkg(
-  $ensure = present,
-  $package_type,
-  $size,
-  $java_needed,
-  $lzo_needed
-  )
-{
-
-  $stack_version = $hdp::params::stack_version
-    
-
-  debug("##Processing package:  $ensure,$package_type,$size,$java_needed,$lzo_needed")
-
-  include hdp::params
-
-  if hdp_is_empty($hdp::params::package_names[$package_type]) {
-    hdp_fail("No packages for $package_type")
-  }
-  
-  ## Process packages depending on stack
-  if hdp_is_empty($hdp::params::package_names[$package_type][$stack_version]) {
-
-    if hdp_is_empty($hdp::params::package_names[$package_type][ALL]) {
-      hdp_fail("No packages for $package_type")
-    }
-    else {
-      $packages_list_by_stack = $hdp::params::package_names[$package_type][ALL]
-    }
-  }
-  else {
-    $packages_list_by_stack = $hdp::params::package_names[$package_type][$stack_version]
-  }
-  
-  debug("##Pkg for stack: $packages_list_by_stack")
-  
-  ## Process packages depending on arch
-  if hdp_is_empty($packages_list_by_stack[$size]) {
-
-    if hdp_is_empty($packages_list_by_stack[ALL]) {
-      hdp_fail("No packages for $package_type")
-    }
-    else {
-      $packages_list_by_size = $packages_list_by_stack[ALL]
-    }
-  }
-  else {
-    $packages_list_by_size = $packages_list_by_stack[$size]
-
-  }
-  
-  debug("##Pkg for arch: $packages_list_by_size")
-  
-  ## Process packages depending on os
-  if hdp_is_empty($packages_list_by_size[$hdp::params::hdp_os_type]) {
-
-    if hdp_is_empty($packages_list_by_size[ALL]) {
-      hdp_fail("No packages for $package_type")
-    }
-    else {
-      $packages_list = $packages_list_by_size[ALL]
-    }
-  }
-  else {
-    $packages_list = $packages_list_by_size[$hdp::params::hdp_os_type]
-  }
-
-  debug("##Packages list: $packages_list")
-
-  if (($java_needed == true) and ($ensure == 'present')){
-    hdp::java::package{ $name:}
-  }
-
-  if (($lzo_needed == true) and ($ensure == 'present')){
-    Hdp::Lzo::Package<|title == $size|>
-  }
-
-  if ($ensure == 'uninstalled') {
-    $ensure_actual = 'purged'
-  } else {
-    $ensure_actual = $ensure
-  }
-  $tag = regsubst($name,' ','-',G)
-  if $packages_list != $hdp::params::NOTHING {
-    package{ $packages_list:
-      ensure   => $ensure_actual,
-      tag      => $tag
-    }
-  }
-  anchor{ "hdp::package::${name}::begin": } -> Package<|tag == $tag|> -> anchor{ "hdp::package::${name}::end": }
-  
-  if (($java_needed == true)and ($ensure == 'present')) {
-   Anchor["hdp::package::${name}::begin"] -> Hdp::Java::Package[$name] -> Anchor["hdp::package::${name}::end"] 
-  }
-}
-
-# Removes the specified package using shell command appropriate for current OS type.
-# Method DOES NOT resolve package name via hdp::params.
-# If package does not exist or is not installed, command does nothing.
-define hdp::package::remove_pkg(
-    $package_type,
-  )
-{
-
-  # TODO: For non-rpm based systems, provide appropriate command
-  exec { "remove_package ${package_type}":
-    path    => "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
-    command => $hdp::params::hdp_os_type ? {
-      default => "rpm -e --allmatches ${package_type} ; true"
-    },
-  }
-
-}


[07/15] AMBARI-5747 Remove facter-1.6.10, Ruby, Puppet dependencies from pom.xml and src (dsen)

Posted by ds...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/nagios.cfg.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/nagios.cfg.erb b/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/nagios.cfg.erb
deleted file mode 100644
index d5b6a7b..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/nagios.cfg.erb
+++ /dev/null
@@ -1,1349 +0,0 @@
-##############################################################################
-#
-# NAGIOS.CFG - Sample Main Config File for Nagios 3.2.3
-#
-# Read the documentation for more information on this configuration
-# file.  I've provided some comments here, but things may not be so
-# clear without further explanation.
-#
-# Last Modified: 12-14-2008
-#
-##############################################################################
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-# LOG FILE
-# This is the main log file where service and host events are logged
-# for historical purposes.  This should be the first option specified 
-# in the config file!!!
-
-log_file=/var/log/nagios/nagios.log
-
-
-
-# OBJECT CONFIGURATION FILE(S)
-# These are the object configuration files in which you define hosts,
-# host groups, contacts, contact groups, services, etc.
-# You can split your object definitions across several config files
-# if you wish (as shown below), or keep them all in a single config file.
-
-# You can specify individual object config files as shown below:
-cfg_file=/etc/nagios/objects/commands.cfg
-cfg_file=/etc/nagios/objects/contacts.cfg
-cfg_file=/etc/nagios/objects/timeperiods.cfg
-cfg_file=/etc/nagios/objects/templates.cfg
-
-# Definitions for monitoring the local (Linux) host
-#cfg_file=/etc/nagios/objects/localhost.cfg
-
-# Definitions for monitoring a Windows machine
-#cfg_file=/etc/nagios/objects/windows.cfg
-
-# Definitions for monitoring a router/switch
-#cfg_file=/etc/nagios/objects/switch.cfg
-
-# Definitions for monitoring a network printer
-#cfg_file=/etc/nagios/objects/printer.cfg
-
-# Definitions for hadoop servers
-cfg_file=<%=scope.function_hdp_template_var("nagios_host_cfg")%>
-cfg_file=<%=scope.function_hdp_template_var("nagios_hostgroup_cfg")%>
-cfg_file=<%=scope.function_hdp_template_var("nagios_servicegroup_cfg")%>
-cfg_file=<%=scope.function_hdp_template_var("nagios_service_cfg")%>
-cfg_file=<%=scope.function_hdp_template_var("nagios_command_cfg")%>
-
-
-# You can also tell Nagios to process all config files (with a .cfg
-# extension) in a particular directory by using the cfg_dir
-# directive as shown below:
-
-#cfg_dir=/etc/nagios/servers
-#cfg_dir=/etc/nagios/printers
-#cfg_dir=/etc/nagios/switches
-#cfg_dir=/etc/nagios/routers
-
-
-
-
-# OBJECT CACHE FILE
-# This option determines where object definitions are cached when
-# Nagios starts/restarts.  The CGIs read object definitions from 
-# this cache file (rather than looking at the object config files
-# directly) in order to prevent inconsistencies that can occur
-# when the config files are modified after Nagios starts.
-
-object_cache_file=/var/nagios/objects.cache
-
-
-
-# PRE-CACHED OBJECT FILE
-# This options determines the location of the precached object file.
-# If you run Nagios with the -p command line option, it will preprocess
-# your object configuration file(s) and write the cached config to this
-# file.  You can then start Nagios with the -u option to have it read
-# object definitions from this precached file, rather than the standard
-# object configuration files (see the cfg_file and cfg_dir options above).
-# Using a precached object file can speed up the time needed to (re)start 
-# the Nagios process if you've got a large and/or complex configuration.
-# Read the documentation section on optimizing Nagios to find our more
-# about how this feature works.
-
-precached_object_file=/var/nagios/objects.precache
-
-
-
-# RESOURCE FILE
-# This is an optional resource file that contains $USERx$ macro
-# definitions. Multiple resource files can be specified by using
-# multiple resource_file definitions.  The CGIs will not attempt to
-# read the contents of resource files, so information that is
-# considered to be sensitive (usernames, passwords, etc) can be
-# defined as macros in this file and restrictive permissions (600)
-# can be placed on this file.
-
-resource_file=<%=scope.function_hdp_template_var("nagios_resource_cfg")%>
-
-
-
-# STATUS FILE
-# This is where the current status of all monitored services and
-# hosts is stored.  Its contents are read and processed by the CGIs.
-# The contents of the status file are deleted every time Nagios
-#  restarts.
-
-status_file=/var/nagios/status.dat
-
-
-
-# STATUS FILE UPDATE INTERVAL
-# This option determines the frequency (in seconds) that
-# Nagios will periodically dump program, host, and 
-# service status data.
-
-status_update_interval=10
-
-
-
-# NAGIOS USER
-# This determines the effective user that Nagios should run as.  
-# You can either supply a username or a UID.
-
-nagios_user=<%=scope.function_hdp_template_var("nagios_user")%>
-
-
-
-# NAGIOS GROUP
-# This determines the effective group that Nagios should run as.  
-# You can either supply a group name or a GID.
-
-nagios_group=<%=scope.function_hdp_template_var("nagios_group")%>
-
-
-
-# EXTERNAL COMMAND OPTION
-# This option allows you to specify whether or not Nagios should check
-# for external commands (in the command file defined below).  By default
-# Nagios will *not* check for external commands, just to be on the
-# cautious side.  If you want to be able to use the CGI command interface
-# you will have to enable this.
-# Values: 0 = disable commands, 1 = enable commands
-
-check_external_commands=1
-
-
-
-# EXTERNAL COMMAND CHECK INTERVAL
-# This is the interval at which Nagios should check for external commands.
-# This value works of the interval_length you specify later.  If you leave
-# that at its default value of 60 (seconds), a value of 1 here will cause
-# Nagios to check for external commands every minute.  If you specify a
-# number followed by an "s" (i.e. 15s), this will be interpreted to mean
-# actual seconds rather than a multiple of the interval_length variable.
-# Note: In addition to reading the external command file at regularly 
-# scheduled intervals, Nagios will also check for external commands after
-# event handlers are executed.
-# NOTE: Setting this value to -1 causes Nagios to check the external
-# command file as often as possible.
-
-#command_check_interval=15s
-command_check_interval=-1
-
-
-
-# EXTERNAL COMMAND FILE
-# This is the file that Nagios checks for external command requests.
-# It is also where the command CGI will write commands that are submitted
-# by users, so it must be writeable by the user that the web server
-# is running as (usually 'nobody').  Permissions should be set at the 
-# directory level instead of on the file, as the file is deleted every
-# time its contents are processed.
-
-command_file=/var/nagios/rw/nagios.cmd
-
-
-
-# EXTERNAL COMMAND BUFFER SLOTS
-# This settings is used to tweak the number of items or "slots" that
-# the Nagios daemon should allocate to the buffer that holds incoming 
-# external commands before they are processed.  As external commands 
-# are processed by the daemon, they are removed from the buffer.  
-
-external_command_buffer_slots=4096
-
-
-
-# LOCK FILE
-# This is the lockfile that Nagios will use to store its PID number
-# in when it is running in daemon mode.
-
-lock_file=<%=scope.function_hdp_template_var("nagios_pid_file")%>
-
-
-
-# TEMP FILE
-# This is a temporary file that is used as scratch space when Nagios
-# updates the status log, cleans the comment file, etc.  This file
-# is created, used, and deleted throughout the time that Nagios is
-# running.
-
-temp_file=/var/nagios/nagios.tmp
-
-
-
-# TEMP PATH
-# This is path where Nagios can create temp files for service and
-# host check results, etc.
-
-temp_path=/tmp
-
-
-
-# EVENT BROKER OPTIONS
-# Controls what (if any) data gets sent to the event broker.
-# Values:  0      = Broker nothing
-#         -1      = Broker everything
-#         <other> = See documentation
-
-event_broker_options=-1
-
-
-
-# EVENT BROKER MODULE(S)
-# This directive is used to specify an event broker module that should
-# by loaded by Nagios at startup.  Use multiple directives if you want
-# to load more than one module.  Arguments that should be passed to
-# the module at startup are seperated from the module path by a space.
-#
-#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-# WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING
-#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-#
-# Do NOT overwrite modules while they are being used by Nagios or Nagios
-# will crash in a fiery display of SEGFAULT glory.  This is a bug/limitation
-# either in dlopen(), the kernel, and/or the filesystem.  And maybe Nagios...
-#
-# The correct/safe way of updating a module is by using one of these methods:
-#    1. Shutdown Nagios, replace the module file, restart Nagios
-#    2. Delete the original module file, move the new module file into place, restart Nagios
-#
-# Example:
-#
-#   broker_module=<modulepath> [moduleargs]
-
-#broker_module=/somewhere/module1.o
-#broker_module=/somewhere/module2.o arg1 arg2=3 debug=0
-
-
-
-# LOG ROTATION METHOD
-# This is the log rotation method that Nagios should use to rotate
-# the main log file. Values are as follows..
-#	n	= None - don't rotate the log
-#	h	= Hourly rotation (top of the hour)
-#	d	= Daily rotation (midnight every day)
-#	w	= Weekly rotation (midnight on Saturday evening)
-#	m	= Monthly rotation (midnight last day of month)
-
-log_rotation_method=d
-
-
-
-# LOG ARCHIVE PATH
-# This is the directory where archived (rotated) log files should be 
-# placed (assuming you've chosen to do log rotation).
-
-log_archive_path=/var/log/nagios/archives
-
-
-
-# LOGGING OPTIONS
-# If you want messages logged to the syslog facility, as well as the
-# Nagios log file set this option to 1.  If not, set it to 0.
-
-use_syslog=1
-
-
-
-# NOTIFICATION LOGGING OPTION
-# If you don't want notifications to be logged, set this value to 0.
-# If notifications should be logged, set the value to 1.
-
-log_notifications=1
-
-
-
-# SERVICE RETRY LOGGING OPTION
-# If you don't want service check retries to be logged, set this value
-# to 0.  If retries should be logged, set the value to 1.
-
-log_service_retries=1
-
-
-
-# HOST RETRY LOGGING OPTION
-# If you don't want host check retries to be logged, set this value to
-# 0.  If retries should be logged, set the value to 1.
-
-log_host_retries=1
-
-
-
-# EVENT HANDLER LOGGING OPTION
-# If you don't want host and service event handlers to be logged, set
-# this value to 0.  If event handlers should be logged, set the value
-# to 1.
-
-log_event_handlers=1
-
-
-
-# INITIAL STATES LOGGING OPTION
-# If you want Nagios to log all initial host and service states to
-# the main log file (the first time the service or host is checked)
-# you can enable this option by setting this value to 1.  If you
-# are not using an external application that does long term state
-# statistics reporting, you do not need to enable this option.  In
-# this case, set the value to 0.
-
-log_initial_states=0
-
-
-
-# EXTERNAL COMMANDS LOGGING OPTION
-# If you don't want Nagios to log external commands, set this value
-# to 0.  If external commands should be logged, set this value to 1.
-# Note: This option does not include logging of passive service
-# checks - see the option below for controlling whether or not
-# passive checks are logged.
-
-log_external_commands=1
-
-
-
-# PASSIVE CHECKS LOGGING OPTION
-# If you don't want Nagios to log passive host and service checks, set
-# this value to 0.  If passive checks should be logged, set
-# this value to 1.
-
-log_passive_checks=1
-
-
-
-# GLOBAL HOST AND SERVICE EVENT HANDLERS
-# These options allow you to specify a host and service event handler
-# command that is to be run for every host or service state change.
-# The global event handler is executed immediately prior to the event
-# handler that you have optionally specified in each host or
-# service definition. The command argument is the short name of a
-# command definition that you define in your host configuration file.
-# Read the HTML docs for more information.
-
-#global_host_event_handler=somecommand
-#global_service_event_handler=somecommand
-
-
-
-# SERVICE INTER-CHECK DELAY METHOD
-# This is the method that Nagios should use when initially
-# "spreading out" service checks when it starts monitoring.  The
-# default is to use smart delay calculation, which will try to
-# space all service checks out evenly to minimize CPU load.
-# Using the dumb setting will cause all checks to be scheduled
-# at the same time (with no delay between them)!  This is not a
-# good thing for production, but is useful when testing the
-# parallelization functionality.
-#	n	= None - don't use any delay between checks
-#	d	= Use a "dumb" delay of 1 second between checks
-#	s	= Use "smart" inter-check delay calculation
-#       x.xx    = Use an inter-check delay of x.xx seconds
-
-service_inter_check_delay_method=s
-
-
-
-# MAXIMUM SERVICE CHECK SPREAD
-# This variable determines the timeframe (in minutes) from the
-# program start time that an initial check of all services should
-# be completed.  Default is 30 minutes.
-
-max_service_check_spread=30
-
-
-
-# SERVICE CHECK INTERLEAVE FACTOR
-# This variable determines how service checks are interleaved.
-# Interleaving the service checks allows for a more even
-# distribution of service checks and reduced load on remote
-# hosts.  Setting this value to 1 is equivalent to how versions
-# of Nagios previous to 0.0.5 did service checks.  Set this
-# value to s (smart) for automatic calculation of the interleave
-# factor unless you have a specific reason to change it.
-#       s       = Use "smart" interleave factor calculation
-#       x       = Use an interleave factor of x, where x is a
-#                 number greater than or equal to 1.
-
-service_interleave_factor=s
-
-
-
-# HOST INTER-CHECK DELAY METHOD
-# This is the method that Nagios should use when initially
-# "spreading out" host checks when it starts monitoring.  The
-# default is to use smart delay calculation, which will try to
-# space all host checks out evenly to minimize CPU load.
-# Using the dumb setting will cause all checks to be scheduled
-# at the same time (with no delay between them)!
-#	n	= None - don't use any delay between checks
-#	d	= Use a "dumb" delay of 1 second between checks
-#	s	= Use "smart" inter-check delay calculation
-#       x.xx    = Use an inter-check delay of x.xx seconds
-
-host_inter_check_delay_method=s
-
-
-
-# MAXIMUM HOST CHECK SPREAD
-# This variable determines the timeframe (in minutes) from the
-# program start time that an initial check of all hosts should
-# be completed.  Default is 30 minutes.
-
-max_host_check_spread=30
-
-
-
-# MAXIMUM CONCURRENT SERVICE CHECKS
-# This option allows you to specify the maximum number of 
-# service checks that can be run in parallel at any given time.
-# Specifying a value of 1 for this variable essentially prevents
-# any service checks from being parallelized.  A value of 0
-# will not restrict the number of concurrent checks that are
-# being executed.
-
-max_concurrent_checks=0
-
-
-
-# HOST AND SERVICE CHECK REAPER FREQUENCY
-# This is the frequency (in seconds!) that Nagios will process
-# the results of host and service checks.
-
-check_result_reaper_frequency=10
-
-
-
-
-# MAX CHECK RESULT REAPER TIME
-# This is the max amount of time (in seconds) that  a single
-# check result reaper event will be allowed to run before 
-# returning control back to Nagios so it can perform other
-# duties.
-
-max_check_result_reaper_time=30
-
-
-
-
-# CHECK RESULT PATH
-# This is directory where Nagios stores the results of host and
-# service checks that have not yet been processed.
-#
-# Note: Make sure that only one instance of Nagios has access
-# to this directory!  
-
-check_result_path=/var/nagios/spool/checkresults
-
-
-
-
-# MAX CHECK RESULT FILE AGE
-# This option determines the maximum age (in seconds) which check
-# result files are considered to be valid.  Files older than this 
-# threshold will be mercilessly deleted without further processing.
-
-max_check_result_file_age=3600
-
-
-
-
-# CACHED HOST CHECK HORIZON
-# This option determines the maximum amount of time (in seconds)
-# that the state of a previous host check is considered current.
-# Cached host states (from host checks that were performed more
-# recently that the timeframe specified by this value) can immensely
-# improve performance in regards to the host check logic.
-# Too high of a value for this option may result in inaccurate host
-# states being used by Nagios, while a lower value may result in a
-# performance hit for host checks.  Use a value of 0 to disable host
-# check caching.
-
-cached_host_check_horizon=15
-
-
-
-# CACHED SERVICE CHECK HORIZON
-# This option determines the maximum amount of time (in seconds)
-# that the state of a previous service check is considered current.
-# Cached service states (from service checks that were performed more
-# recently that the timeframe specified by this value) can immensely
-# improve performance in regards to predictive dependency checks.
-# Use a value of 0 to disable service check caching.
-
-cached_service_check_horizon=15
-
-
-
-# ENABLE PREDICTIVE HOST DEPENDENCY CHECKS
-# This option determines whether or not Nagios will attempt to execute
-# checks of hosts when it predicts that future dependency logic test
-# may be needed.  These predictive checks can help ensure that your
-# host dependency logic works well.
-# Values:
-#  0 = Disable predictive checks
-#  1 = Enable predictive checks (default)
-
-enable_predictive_host_dependency_checks=1
-
-
-
-# ENABLE PREDICTIVE SERVICE DEPENDENCY CHECKS
-# This option determines whether or not Nagios will attempt to execute
-# checks of service when it predicts that future dependency logic test
-# may be needed.  These predictive checks can help ensure that your
-# service dependency logic works well.
-# Values:
-#  0 = Disable predictive checks
-#  1 = Enable predictive checks (default)
-
-enable_predictive_service_dependency_checks=1
-
-
-
-# SOFT STATE DEPENDENCIES
-# This option determines whether or not Nagios will use soft state 
-# information when checking host and service dependencies. Normally 
-# Nagios will only use the latest hard host or service state when 
-# checking dependencies. If you want it to use the latest state (regardless
-# of whether its a soft or hard state type), enable this option. 
-# Values:
-#  0 = Don't use soft state dependencies (default) 
-#  1 = Use soft state dependencies 
-
-soft_state_dependencies=0
-
-
-
-# TIME CHANGE ADJUSTMENT THRESHOLDS
-# These options determine when Nagios will react to detected changes
-# in system time (either forward or backwards).
-
-#time_change_threshold=900
-
-
-
-# AUTO-RESCHEDULING OPTION
-# This option determines whether or not Nagios will attempt to
-# automatically reschedule active host and service checks to
-# "smooth" them out over time.  This can help balance the load on
-# the monitoring server.  
-# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
-# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
-
-auto_reschedule_checks=0
-
-
-
-# AUTO-RESCHEDULING INTERVAL
-# This option determines how often (in seconds) Nagios will
-# attempt to automatically reschedule checks.  This option only
-# has an effect if the auto_reschedule_checks option is enabled.
-# Default is 30 seconds.
-# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
-# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
-
-auto_rescheduling_interval=30
-
-
-
-# AUTO-RESCHEDULING WINDOW
-# This option determines the "window" of time (in seconds) that
-# Nagios will look at when automatically rescheduling checks.
-# Only host and service checks that occur in the next X seconds
-# (determined by this variable) will be rescheduled. This option
-# only has an effect if the auto_reschedule_checks option is
-# enabled.  Default is 180 seconds (3 minutes).
-# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
-# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
-
-auto_rescheduling_window=180
-
-
-
-# SLEEP TIME
-# This is the number of seconds to sleep between checking for system
-# events and service checks that need to be run.
-
-sleep_time=0.25
-
-
-
-# TIMEOUT VALUES
-# These options control how much time Nagios will allow various
-# types of commands to execute before killing them off.  Options
-# are available for controlling maximum time allotted for
-# service checks, host checks, event handlers, notifications, the
-# ocsp command, and performance data commands.  All values are in
-# seconds.
-
-service_check_timeout=60
-host_check_timeout=30
-event_handler_timeout=30
-notification_timeout=30
-ocsp_timeout=5
-perfdata_timeout=5
-
-
-
-# RETAIN STATE INFORMATION
-# This setting determines whether or not Nagios will save state
-# information for services and hosts before it shuts down.  Upon
-# startup Nagios will reload all saved service and host state
-# information before starting to monitor.  This is useful for 
-# maintaining long-term data on state statistics, etc, but will
-# slow Nagios down a bit when it (re)starts.  Since its only
-# a one-time penalty, I think its well worth the additional
-# startup delay.
-
-retain_state_information=1
-
-
-
-# STATE RETENTION FILE
-# This is the file that Nagios should use to store host and
-# service state information before it shuts down.  The state 
-# information in this file is also read immediately prior to
-# starting to monitor the network when Nagios is restarted.
-# This file is used only if the retain_state_information
-# variable is set to 1.
-
-state_retention_file=/var/nagios/retention.dat
-
-
-
-# RETENTION DATA UPDATE INTERVAL
-# This setting determines how often (in minutes) that Nagios
-# will automatically save retention data during normal operation.
-# If you set this value to 0, Nagios will not save retention
-# data at regular interval, but it will still save retention
-# data before shutting down or restarting.  If you have disabled
-# state retention, this option has no effect.
-
-retention_update_interval=60
-
-
-
-# USE RETAINED PROGRAM STATE
-# This setting determines whether or not Nagios will set 
-# program status variables based on the values saved in the
-# retention file.  If you want to use retained program status
-# information, set this value to 1.  If not, set this value
-# to 0.
-
-use_retained_program_state=1
-
-
-
-# USE RETAINED SCHEDULING INFO
-# This setting determines whether or not Nagios will retain
-# the scheduling info (next check time) for hosts and services
-# based on the values saved in the retention file.  If you
-# If you want to use retained scheduling info, set this
-# value to 1.  If not, set this value to 0.
-
-use_retained_scheduling_info=1
-
-
-
-# RETAINED ATTRIBUTE MASKS (ADVANCED FEATURE)
-# The following variables are used to specify specific host and
-# service attributes that should *not* be retained by Nagios during
-# program restarts.
-#
-# The values of the masks are bitwise ANDs of values specified
-# by the "MODATTR_" definitions found in include/common.h.  
-# For example, if you do not want the current enabled/disabled state
-# of flap detection and event handlers for hosts to be retained, you
-# would use a value of 24 for the host attribute mask...
-# MODATTR_EVENT_HANDLER_ENABLED (8) + MODATTR_FLAP_DETECTION_ENABLED (16) = 24
-
-# This mask determines what host attributes are not retained
-retained_host_attribute_mask=0
-
-# This mask determines what service attributes are not retained
-retained_service_attribute_mask=0
-
-# These two masks determine what process attributes are not retained.
-# There are two masks, because some process attributes have host and service
-# options.  For example, you can disable active host checks, but leave active
-# service checks enabled.
-retained_process_host_attribute_mask=0
-retained_process_service_attribute_mask=0
-
-# These two masks determine what contact attributes are not retained.
-# There are two masks, because some contact attributes have host and
-# service options.  For example, you can disable host notifications for
-# a contact, but leave service notifications enabled for them.
-retained_contact_host_attribute_mask=0
-retained_contact_service_attribute_mask=0
-
-
-
-# INTERVAL LENGTH
-# This is the seconds per unit interval as used in the
-# host/contact/service configuration files.  Setting this to 60 means
-# that each interval is one minute long (60 seconds).  Other settings
-# have not been tested much, so your mileage is likely to vary...
-
-interval_length=60
-
-
-
-# CHECK FOR UPDATES
-# This option determines whether Nagios will automatically check to
-# see if new updates (releases) are available.  It is recommend that you
-# enable this option to ensure that you stay on top of the latest critical
-# patches to Nagios.  Nagios is critical to you - make sure you keep it in
-# good shape.  Nagios will check once a day for new updates. Data collected
-# by Nagios Enterprises from the update check is processed in accordance 
-# with our privacy policy - see http://api.nagios.org for details.
-
-check_for_updates=1
-
-
-
-# BARE UPDATE CHECK
-# This option deterines what data Nagios will send to api.nagios.org when
-# it checks for updates.  By default, Nagios will send information on the 
-# current version of Nagios you have installed, as well as an indicator as
-# to whether this was a new installation or not.  Nagios Enterprises uses
-# this data to determine the number of users running specific version of 
-# Nagios.  Enable this option if you do not want this information to be sent.
-
-bare_update_check=0
-
-
-
-# AGGRESSIVE HOST CHECKING OPTION
-# If you don't want to turn on aggressive host checking features, set
-# this value to 0 (the default).  Otherwise set this value to 1 to
-# enable the aggressive check option.  Read the docs for more info
-# on what aggressive host check is or check out the source code in
-# base/checks.c
-
-use_aggressive_host_checking=0
-
-
-
-# SERVICE CHECK EXECUTION OPTION
-# This determines whether or not Nagios will actively execute
-# service checks when it initially starts.  If this option is 
-# disabled, checks are not actively made, but Nagios can still
-# receive and process passive check results that come in.  Unless
-# you're implementing redundant hosts or have a special need for
-# disabling the execution of service checks, leave this enabled!
-# Values: 1 = enable checks, 0 = disable checks
-
-execute_service_checks=1
-
-
-
-# PASSIVE SERVICE CHECK ACCEPTANCE OPTION
-# This determines whether or not Nagios will accept passive
-# service checks results when it initially (re)starts.
-# Values: 1 = accept passive checks, 0 = reject passive checks
-
-accept_passive_service_checks=1
-
-
-
-# HOST CHECK EXECUTION OPTION
-# This determines whether or not Nagios will actively execute
-# host checks when it initially starts.  If this option is 
-# disabled, checks are not actively made, but Nagios can still
-# receive and process passive check results that come in.  Unless
-# you're implementing redundant hosts or have a special need for
-# disabling the execution of host checks, leave this enabled!
-# Values: 1 = enable checks, 0 = disable checks
-
-execute_host_checks=1
-
-
-
-# PASSIVE HOST CHECK ACCEPTANCE OPTION
-# This determines whether or not Nagios will accept passive
-# host checks results when it initially (re)starts.
-# Values: 1 = accept passive checks, 0 = reject passive checks
-
-accept_passive_host_checks=1
-
-
-
-# NOTIFICATIONS OPTION
-# This determines whether or not Nagios will sent out any host or
-# service notifications when it is initially (re)started.
-# Values: 1 = enable notifications, 0 = disable notifications
-
-enable_notifications=1
-
-
-
-# EVENT HANDLER USE OPTION
-# This determines whether or not Nagios will run any host or
-# service event handlers when it is initially (re)started.  Unless
-# you're implementing redundant hosts, leave this option enabled.
-# Values: 1 = enable event handlers, 0 = disable event handlers
-
-enable_event_handlers=1
-
-
-
-# PROCESS PERFORMANCE DATA OPTION
-# This determines whether or not Nagios will process performance
-# data returned from service and host checks.  If this option is
-# enabled, host performance data will be processed using the
-# host_perfdata_command (defined below) and service performance
-# data will be processed using the service_perfdata_command (also
-# defined below).  Read the HTML docs for more information on
-# performance data.
-# Values: 1 = process performance data, 0 = do not process performance data
-
-process_performance_data=0
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA PROCESSING COMMANDS
-# These commands are run after every host and service check is
-# performed.  These commands are executed only if the
-# enable_performance_data option (above) is set to 1.  The command
-# argument is the short name of a command definition that you 
-# define in your host configuration file.  Read the HTML docs for
-# more information on performance data.
-
-#host_perfdata_command=process-host-perfdata
-#service_perfdata_command=process-service-perfdata
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILES
-# These files are used to store host and service performance data.
-# Performance data is only written to these files if the
-# enable_performance_data option (above) is set to 1.
-
-#host_perfdata_file=/tmp/host-perfdata
-#service_perfdata_file=/tmp/service-perfdata
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE TEMPLATES
-# These options determine what data is written (and how) to the
-# performance data files.  The templates may contain macros, special
-# characters (\t for tab, \r for carriage return, \n for newline)
-# and plain text.  A newline is automatically added after each write
-# to the performance data file.  Some examples of what you can do are
-# shown below.
-
-#host_perfdata_file_template=[HOSTPERFDATA]\t$TIMET$\t$HOSTNAME$\t$HOSTEXECUTIONTIME$\t$HOSTOUTPUT$\t$HOSTPERFDATA$
-#service_perfdata_file_template=[SERVICEPERFDATA]\t$TIMET$\t$HOSTNAME$\t$SERVICEDESC$\t$SERVICEEXECUTIONTIME$\t$SERVICELATENCY$\t$SERVICEOUTPUT$\t$SERVICEPERFDATA$
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE MODES
-# This option determines whether or not the host and service
-# performance data files are opened in write ("w") or append ("a")
-# mode. If you want to use named pipes, you should use the special
-# pipe ("p") mode which avoid blocking at startup, otherwise you will
-# likely want the defult append ("a") mode.
-
-#host_perfdata_file_mode=a
-#service_perfdata_file_mode=a
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING INTERVAL
-# These options determine how often (in seconds) the host and service
-# performance data files are processed using the commands defined
-# below.  A value of 0 indicates the files should not be periodically
-# processed.
-
-#host_perfdata_file_processing_interval=0
-#service_perfdata_file_processing_interval=0
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING COMMANDS
-# These commands are used to periodically process the host and
-# service performance data files.  The interval at which the
-# processing occurs is determined by the options above.
-
-#host_perfdata_file_processing_command=process-host-perfdata-file
-#service_perfdata_file_processing_command=process-service-perfdata-file
-
-
-
-# OBSESS OVER SERVICE CHECKS OPTION
-# This determines whether or not Nagios will obsess over service
-# checks and run the ocsp_command defined below.  Unless you're
-# planning on implementing distributed monitoring, do not enable
-# this option.  Read the HTML docs for more information on
-# implementing distributed monitoring.
-# Values: 1 = obsess over services, 0 = do not obsess (default)
-
-obsess_over_services=0
-
-
-
-# OBSESSIVE COMPULSIVE SERVICE PROCESSOR COMMAND
-# This is the command that is run for every service check that is
-# processed by Nagios.  This command is executed only if the
-# obsess_over_services option (above) is set to 1.  The command 
-# argument is the short name of a command definition that you
-# define in your host configuration file. Read the HTML docs for
-# more information on implementing distributed monitoring.
-
-#ocsp_command=somecommand
-
-
-
-# OBSESS OVER HOST CHECKS OPTION
-# This determines whether or not Nagios will obsess over host
-# checks and run the ochp_command defined below.  Unless you're
-# planning on implementing distributed monitoring, do not enable
-# this option.  Read the HTML docs for more information on
-# implementing distributed monitoring.
-# Values: 1 = obsess over hosts, 0 = do not obsess (default)
-
-obsess_over_hosts=0
-
-
-
-# OBSESSIVE COMPULSIVE HOST PROCESSOR COMMAND
-# This is the command that is run for every host check that is
-# processed by Nagios.  This command is executed only if the
-# obsess_over_hosts option (above) is set to 1.  The command 
-# argument is the short name of a command definition that you
-# define in your host configuration file. Read the HTML docs for
-# more information on implementing distributed monitoring.
-
-#ochp_command=somecommand
-
-
-
-# TRANSLATE PASSIVE HOST CHECKS OPTION
-# This determines whether or not Nagios will translate
-# DOWN/UNREACHABLE passive host check results into their proper
-# state for this instance of Nagios.  This option is useful
-# if you have distributed or failover monitoring setup.  In
-# these cases your other Nagios servers probably have a different
-# "view" of the network, with regards to the parent/child relationship
-# of hosts.  If a distributed monitoring server thinks a host
-# is DOWN, it may actually be UNREACHABLE from the point of
-# this Nagios instance.  Enabling this option will tell Nagios
-# to translate any DOWN or UNREACHABLE host states it receives
-# passively into the correct state from the view of this server.
-# Values: 1 = perform translation, 0 = do not translate (default)
-
-translate_passive_host_checks=0
-
-
-
-# PASSIVE HOST CHECKS ARE SOFT OPTION
-# This determines whether or not Nagios will treat passive host
-# checks as being HARD or SOFT.  By default, a passive host check
-# result will put a host into a HARD state type.  This can be changed
-# by enabling this option.
-# Values: 0 = passive checks are HARD, 1 = passive checks are SOFT
-
-passive_host_checks_are_soft=0
-
-
-
-# ORPHANED HOST/SERVICE CHECK OPTIONS
-# These options determine whether or not Nagios will periodically 
-# check for orphaned host service checks.  Since service checks are
-# not rescheduled until the results of their previous execution 
-# instance are processed, there exists a possibility that some
-# checks may never get rescheduled.  A similar situation exists for
-# host checks, although the exact scheduling details differ a bit
-# from service checks.  Orphaned checks seem to be a rare
-# problem and should not happen under normal circumstances.
-# If you have problems with service checks never getting
-# rescheduled, make sure you have orphaned service checks enabled.
-# Values: 1 = enable checks, 0 = disable checks
-
-check_for_orphaned_services=1
-check_for_orphaned_hosts=1
-
-
-
-# SERVICE FRESHNESS CHECK OPTION
-# This option determines whether or not Nagios will periodically
-# check the "freshness" of service results.  Enabling this option
-# is useful for ensuring passive checks are received in a timely
-# manner.
-# Values: 1 = enabled freshness checking, 0 = disable freshness checking
-
-check_service_freshness=1
-
-
-
-# SERVICE FRESHNESS CHECK INTERVAL
-# This setting determines how often (in seconds) Nagios will
-# check the "freshness" of service check results.  If you have
-# disabled service freshness checking, this option has no effect.
-
-service_freshness_check_interval=60
-
-
-
-# HOST FRESHNESS CHECK OPTION
-# This option determines whether or not Nagios will periodically
-# check the "freshness" of host results.  Enabling this option
-# is useful for ensuring passive checks are received in a timely
-# manner.
-# Values: 1 = enabled freshness checking, 0 = disable freshness checking
-
-check_host_freshness=0
-
-
-
-# HOST FRESHNESS CHECK INTERVAL
-# This setting determines how often (in seconds) Nagios will
-# check the "freshness" of host check results.  If you have
-# disabled host freshness checking, this option has no effect.
-
-host_freshness_check_interval=60
-
-
-
-
-# ADDITIONAL FRESHNESS THRESHOLD LATENCY
-# This setting determines the number of seconds that Nagios
-# will add to any host and service freshness thresholds that
-# it calculates (those not explicitly specified by the user).
-
-additional_freshness_latency=15
-
-
-
-
-# FLAP DETECTION OPTION
-# This option determines whether or not Nagios will try
-# and detect hosts and services that are "flapping".  
-# Flapping occurs when a host or service changes between
-# states too frequently.  When Nagios detects that a 
-# host or service is flapping, it will temporarily suppress
-# notifications for that host/service until it stops
-# flapping.  Flap detection is very experimental, so read
-# the HTML documentation before enabling this feature!
-# Values: 1 = enable flap detection
-#         0 = disable flap detection (default)
-
-enable_flap_detection=1
-
-
-
-# FLAP DETECTION THRESHOLDS FOR HOSTS AND SERVICES
-# Read the HTML documentation on flap detection for
-# an explanation of what this option does.  This option
-# has no effect if flap detection is disabled.
-
-low_service_flap_threshold=5.0
-high_service_flap_threshold=20.0
-low_host_flap_threshold=5.0
-high_host_flap_threshold=20.0
-
-
-
-# DATE FORMAT OPTION
-# This option determines how short dates are displayed. Valid options
-# include:
-#	us		(MM-DD-YYYY HH:MM:SS)
-#	euro    	(DD-MM-YYYY HH:MM:SS)
-#	iso8601		(YYYY-MM-DD HH:MM:SS)
-#	strict-iso8601	(YYYY-MM-DDTHH:MM:SS)
-#
-
-date_format=us
-
-
-
-
-# TIMEZONE OFFSET
-# This option is used to override the default timezone that this
-# instance of Nagios runs in.  If not specified, Nagios will use
-# the system configured timezone.
-#
-# NOTE: In order to display the correct timezone in the CGIs, you
-# will also need to alter the Apache directives for the CGI path 
-# to include your timezone.  Example:
-#
-#   <Directory "/usr/local/nagios/sbin/">
-#      SetEnv TZ "Australia/Brisbane"
-#      ...
-#   </Directory>
-
-#use_timezone=US/Mountain
-#use_timezone=Australia/Brisbane
-
-
-
-
-# P1.PL FILE LOCATION
-# This value determines where the p1.pl perl script (used by the
-# embedded Perl interpreter) is located.  If you didn't compile
-# Nagios with embedded Perl support, this option has no effect.
-
-p1_file = <%=scope.function_hdp_template_var("::hdp-nagios::server::nagios_p1_pl") %>
-
-
-
-# EMBEDDED PERL INTERPRETER OPTION
-# This option determines whether or not the embedded Perl interpreter
-# will be enabled during runtime.  This option has no effect if Nagios
-# has not been compiled with support for embedded Perl.
-# Values: 0 = disable interpreter, 1 = enable interpreter
-
-enable_embedded_perl=1
-
-
-
-# EMBEDDED PERL USAGE OPTION
-# This option determines whether or not Nagios will process Perl plugins
-# and scripts with the embedded Perl interpreter if the plugins/scripts
-# do not explicitly indicate whether or not it is okay to do so. Read
-# the HTML documentation on the embedded Perl interpreter for more 
-# information on how this option works.
-
-use_embedded_perl_implicitly=1
-
-
-
-# ILLEGAL OBJECT NAME CHARACTERS
-# This option allows you to specify illegal characters that cannot
-# be used in host names, service descriptions, or names of other
-# object types.
-
-illegal_object_name_chars=`~!$%^&*|'"<>?,()=
-
-
-
-# ILLEGAL MACRO OUTPUT CHARACTERS
-# This option allows you to specify illegal characters that are
-# stripped from macros before being used in notifications, event
-# handlers, etc.  This DOES NOT affect macros used in service or
-# host check commands.
-# The following macros are stripped of the characters you specify:
-#	$HOSTOUTPUT$
-#	$HOSTPERFDATA$
-#	$HOSTACKAUTHOR$
-#	$HOSTACKCOMMENT$
-#	$SERVICEOUTPUT$
-#	$SERVICEPERFDATA$
-#	$SERVICEACKAUTHOR$
-#	$SERVICEACKCOMMENT$
-
-illegal_macro_output_chars=`~$&|'"<>
-
-
-
-# REGULAR EXPRESSION MATCHING
-# This option controls whether or not regular expression matching
-# takes place in the object config files.  Regular expression
-# matching is used to match host, hostgroup, service, and service
-# group names/descriptions in some fields of various object types.
-# Values: 1 = enable regexp matching, 0 = disable regexp matching
-
-use_regexp_matching=0
-
-
-
-# "TRUE" REGULAR EXPRESSION MATCHING
-# This option controls whether or not "true" regular expression 
-# matching takes place in the object config files.  This option
-# only has an effect if regular expression matching is enabled
-# (see above).  If this option is DISABLED, regular expression
-# matching only occurs if a string contains wildcard characters
-# (* and ?).  If the option is ENABLED, regexp matching occurs
-# all the time (which can be annoying).
-# Values: 1 = enable true matching, 0 = disable true matching
-
-use_true_regexp_matching=0
-
-
-
-# ADMINISTRATOR EMAIL/PAGER ADDRESSES
-# The email and pager address of a global administrator (likely you).
-# Nagios never uses these values itself, but you can access them by
-# using the $ADMINEMAIL$ and $ADMINPAGER$ macros in your notification
-# commands.
-
-admin_email=nagios@localhost
-admin_pager=pagenagios@localhost
-
-
-
-# DAEMON CORE DUMP OPTION
-# This option determines whether or not Nagios is allowed to create
-# a core dump when it runs as a daemon.  Note that it is generally
-# considered bad form to allow this, but it may be useful for
-# debugging purposes.  Enabling this option doesn't guarantee that
-# a core file will be produced, but that's just life...
-# Values: 1 - Allow core dumps
-#         0 - Do not allow core dumps (default)
-
-daemon_dumps_core=0
-
-
-
-# LARGE INSTALLATION TWEAKS OPTION
-# This option determines whether or not Nagios will take some shortcuts
-# which can save on memory and CPU usage in large Nagios installations.
-# Read the documentation for more information on the benefits/tradeoffs
-# of enabling this option.
-# Values: 1 - Enabled tweaks
-#         0 - Disable tweaks (default)
-
-use_large_installation_tweaks=0
-
-
-
-# ENABLE ENVIRONMENT MACROS
-# This option determines whether or not Nagios will make all standard
-# macros available as environment variables when host/service checks
-# and system commands (event handlers, notifications, etc.) are
-# executed.  Enabling this option can cause performance issues in 
-# large installations, as it will consume a bit more memory and (more
-# importantly) consume more CPU.
-# Values: 1 - Enable environment variable macros (default)
-#         0 - Disable environment variable macros
-
-enable_environment_macros=1
-
-
-
-# CHILD PROCESS MEMORY OPTION
-# This option determines whether or not Nagios will free memory in
-# child processes (processed used to execute system commands and host/
-# service checks).  If you specify a value here, it will override
-# program defaults.
-# Value: 1 - Free memory in child processes
-#        0 - Do not free memory in child processes
-
-#free_child_process_memory=1
-
-
-
-# CHILD PROCESS FORKING BEHAVIOR
-# This option determines how Nagios will fork child processes
-# (used to execute system commands and host/service checks).  Normally
-# child processes are fork()ed twice, which provides a very high level
-# of isolation from problems.  Fork()ing once is probably enough and will
-# save a great deal on CPU usage (in large installs), so you might
-# want to consider using this.  If you specify a value here, it will
-# program defaults.
-# Value: 1 - Child processes fork() twice
-#        0 - Child processes fork() just once
-
-#child_processes_fork_twice=1
-
-
-
-# DEBUG LEVEL
-# This option determines how much (if any) debugging information will
-# be written to the debug file.  OR values together to log multiple
-# types of information.
-# Values: 
-#          -1 = Everything
-#          0 = Nothing
-#	   1 = Functions
-#          2 = Configuration
-#          4 = Process information
-#	   8 = Scheduled events
-#          16 = Host/service checks
-#          32 = Notifications
-#          64 = Event broker
-#          128 = External commands
-#          256 = Commands
-#          512 = Scheduled downtime
-#          1024 = Comments
-#          2048 = Macros
-
-debug_level=0
-
-
-
-# DEBUG VERBOSITY
-# This option determines how verbose the debug log out will be.
-# Values: 0 = Brief output
-#         1 = More detailed
-#         2 = Very detailed
-
-debug_verbosity=1
-
-
-
-# DEBUG FILE
-# This option determines where Nagios should write debugging information.
-
-debug_file=/var/log/nagios/nagios.debug
-
-
-
-# MAX DEBUG FILE SIZE
-# This option determines the maximum size (in bytes) of the debug file.  If
-# the file grows larger than this size, it will be renamed with a .old
-# extension.  If a file already exists with a .old extension it will
-# automatically be deleted.  This helps ensure your disk space usage doesn't
-# get out of control when debugging Nagios.
-
-max_debug_file_size=1000000
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/nagios.conf.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/nagios.conf.erb b/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/nagios.conf.erb
deleted file mode 100644
index d8936a0..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/nagios.conf.erb
+++ /dev/null
@@ -1,62 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-#
-# SAMPLE CONFIG SNIPPETS FOR APACHE WEB SERVER
-# Last Modified: 11-26-2005
-#
-# This file contains examples of entries that need
-# to be incorporated into your Apache web server
-# configuration file.  Customize the paths, etc. as
-# needed to fit your system.
-#
-
-ScriptAlias /nagios/cgi-bin "/usr/lib/nagios/cgi"
-
-<Directory "/usr/lib/nagios/cgi">
-#  SSLRequireSSL
-   Options ExecCGI
-   AllowOverride None
-   Order allow,deny
-   Allow from all
-#  Order deny,allow
-#  Deny from all
-#  Allow from 127.0.0.1
-   AuthName "Nagios Access"
-   AuthType Basic
-   AuthUserFile /etc/nagios/htpasswd.users
-   Require valid-user
-</Directory>
-
-Alias /nagios "/usr/share/nagios"
-
-<Directory "/usr/share/nagios">
-#  SSLRequireSSL
-   Options None
-   AllowOverride None
-   Order allow,deny
-   Allow from all
-#  Order deny,allow
-#  Deny from all
-#  Allow from 127.0.0.1
-   AuthName "Nagios Access"
-   AuthType Basic
-   AuthUserFile /etc/nagios/htpasswd.users
-   Require valid-user
-</Directory>
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/nagios.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/nagios.erb b/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/nagios.erb
deleted file mode 100644
index 6497534..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/nagios.erb
+++ /dev/null
@@ -1,146 +0,0 @@
-#!/bin/sh
-# $Id$
-# Nagios	Startup script for the Nagios monitoring daemon
-#
-# chkconfig:	- 85 15
-# description:	Nagios is a service monitoring system
-# processname: nagios
-# config: /etc/nagios/nagios.cfg
-# pidfile: /var/nagios/nagios.pid
-#
-### BEGIN INIT INFO
-# Provides:		nagios
-# Required-Start:	$local_fs $syslog $network
-# Required-Stop:	$local_fs $syslog $network
-# Short-Description:    start and stop Nagios monitoring server
-# Description:		Nagios is is a service monitoring system 
-### END INIT INFO
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-# Source function library.
-. /etc/rc.d/init.d/functions
-
-prefix="/usr"
-exec_prefix="/usr"
-exec="/usr/sbin/nagios"
-prog="nagios"
-config="/etc/nagios/nagios.cfg"
-pidfile="<%=scope.function_hdp_template_var("nagios_pid_file")%>"
-user="<%=scope.function_hdp_template_var("nagios_user")%>"
-
-[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog
-
-lockfile=/var/lock/subsys/$prog
-
-start() {
-    [ -x $exec ] || exit 5
-    [ -f $config ] || exit 6
-    echo -n $"Starting $prog: "
-    daemon --user=$user $exec -d $config
-    retval=$?
-    echo
-    [ $retval -eq 0 ] && touch $lockfile
-    return $retval
-}
-
-stop() {
-    echo -n $"Stopping $prog: "
-    killproc -d 10 $exec
-    retval=$?
-    echo
-    [ $retval -eq 0 ] && rm -f $lockfile
-    return $retval
-}
-
-
-restart() {
-    stop
-    start
-}
-
-reload() {
-    echo -n $"Reloading $prog: "
-    killproc $exec -HUP
-    RETVAL=$?
-    echo
-}
-
-force_reload() {
-    restart
-}
-
-check_config() {
-        $nice runuser -s /bin/bash - $user -c "$corelimit >/dev/null 2>&1 ; $exec -v $config > /dev/null 2>&1"
-        RETVAL=$?
-        if [ $RETVAL -ne 0 ] ; then
-                echo -n $"Configuration validation failed"
-                failure
-                echo
-                exit 1
-
-        fi
-}
-
-
-case "$1" in
-    start)
-        status $prog && exit 0
-	check_config
-        $1
-        ;;
-    stop)
-        status $prog|| exit 0
-        $1
-        ;;
-    restart)
-	check_config
-        $1
-        ;;
-    reload)
-        status $prog || exit 7
-	check_config
-        $1
-        ;;
-    force-reload)
-	check_config
-        force_reload
-        ;;
-    status)
-        status $prog
-        ;;
-    condrestart|try-restart)
-        status $prog|| exit 0
-	check_config
-        restart
-        ;;
-    configtest)
-        echo -n  $"Checking config for $prog: "
-        check_config && success
-        echo
-	;;
-    *)
-        echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload|configtest}"
-        exit 2
-esac
-exit $?

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/resource.cfg.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/resource.cfg.erb b/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/resource.cfg.erb
deleted file mode 100644
index b6a9a7b..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/resource.cfg.erb
+++ /dev/null
@@ -1,33 +0,0 @@
-###########################################################################
-#
-# RESOURCE.CFG - Sample Resource File for Nagios 3.2.3
-#
-# Last Modified: 09-10-2003
-#
-# You can define $USERx$ macros in this file, which can in turn be used
-# in command definitions in your host config file(s).  $USERx$ macros are
-# useful for storing sensitive information such as usernames, passwords,
-# etc.  They are also handy for specifying the path to plugins and
-# event handlers - if you decide to move the plugins or event handlers to
-# a different directory in the future, you can just update one or two
-# $USERx$ macros, instead of modifying a lot of command definitions.
-#
-# The CGIs will not attempt to read the contents of resource files, so
-# you can set restrictive permissions (600 or 660) on them.
-#
-# Nagios supports up to 32 $USERx$ macros ($USER1$ through $USER32$)
-#
-# Resource files may also be used to store configuration directives for
-# external data sources like MySQL...
-#
-###########################################################################
-
-# Sets $USER1$ to be the path to the plugins
-$USER1$=<%=scope.function_hdp_template_var("plugins_dir")%>
-
-# Sets $USER2$ to be the path to event handlers
-#$USER2$=<%=scope.function_hdp_template_var("eventhandlers_dir")%>
-
-# Store some usernames and passwords (hidden from the CGIs)
-#$USER3$=someuser
-#$USER4$=somepassword
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-oozie/files/oozieSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-oozie/files/oozieSmoke.sh b/ambari-agent/src/main/puppet/modules/hdp-oozie/files/oozieSmoke.sh
deleted file mode 100644
index 2446544..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-oozie/files/oozieSmoke.sh
+++ /dev/null
@@ -1,93 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-function getValueFromField {
-  xmllint $1 | grep "<name>$2</name>" -C 2 | grep '<value>' | cut -d ">" -f2 | cut -d "<" -f1
-  return $?
-}
-
-function checkOozieJobStatus {
-  local job_id=$1
-  local num_of_tries=$2
-  #default num_of_tries to 10 if not present
-  num_of_tries=${num_of_tries:-10}
-  local i=0
-  local rc=1
-  local cmd="source ${oozie_conf_dir}/oozie-env.sh ; /usr/bin/oozie job -oozie ${OOZIE_SERVER} -info $job_id"
-  su - ${smoke_test_user} -c "$cmd"
-  while [ $i -lt $num_of_tries ] ; do
-    cmd_output=`su - ${smoke_test_user} -c "$cmd"`
-    (IFS='';echo $cmd_output)
-    act_status=$(IFS='';echo $cmd_output | grep ^Status | cut -d':' -f2 | sed 's| ||g')
-    echo "workflow_status=$act_status"
-    if [ "RUNNING" == "$act_status" ]; then
-      #increment the couner and get the status again after waiting for 15 secs
-      sleep 15
-      (( i++ ))
-      elif [ "SUCCEEDED" == "$act_status" ]; then
-        rc=0;
-        break;
-      else
-        rc=1
-        break;
-      fi
-    done
-    return $rc
-}
-
-export oozie_conf_dir=$1
-export hadoop_conf_dir=$2
-export smoke_test_user=$3
-export security_enabled=$4
-export smoke_user_keytab=$5
-export kinit_path_local=$6
-
-export OOZIE_EXIT_CODE=0
-export JOBTRACKER=`getValueFromField ${hadoop_conf_dir}/mapred-site.xml mapred.job.tracker`
-export NAMENODE=`getValueFromField ${hadoop_conf_dir}/core-site.xml fs.default.name`
-export OOZIE_SERVER=`getValueFromField ${oozie_conf_dir}/oozie-site.xml oozie.base.url | tr '[:upper:]' '[:lower:]'`
-export OOZIE_EXAMPLES_DIR=`rpm -ql oozie-client | grep 'oozie-examples.tar.gz$' | xargs dirname`
-cd $OOZIE_EXAMPLES_DIR
-
-tar -zxf oozie-examples.tar.gz
-sed -i "s|nameNode=hdfs://localhost:8020|nameNode=$NAMENODE|g"  examples/apps/map-reduce/job.properties
-sed -i "s|nameNode=hdfs://localhost:9000|nameNode=$NAMENODE|g"  examples/apps/map-reduce/job.properties
-sed -i "s|jobTracker=localhost:8021|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
-sed -i "s|jobTracker=localhost:9001|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
-sed -i "s|oozie.wf.application.path=hdfs://localhost:9000|oozie.wf.application.path=$NAMENODE|g" examples/apps/map-reduce/job.properties
-
-if [[ $security_enabled == "true" ]]; then
-  kinitcmd="${kinit_path_local} -kt ${smoke_user_keytab} ${smoke_test_user}; "
-else 
-  kinitcmd=""
-fi
-
-su - ${smoke_test_user} -c "hadoop dfs -rmr examples"
-su - ${smoke_test_user} -c "hadoop dfs -rmr input-data"
-su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples examples"
-su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples/input-data input-data"
-
-cmd="${kinitcmd}source ${oozie_conf_dir}/oozie-env.sh ; /usr/bin/oozie job -oozie $OOZIE_SERVER -config $OOZIE_EXAMPLES_DIR/examples/apps/map-reduce/job.properties  -run"
-job_info=`su - ${smoke_test_user} -c "$cmd" | grep "job:"`
-job_id="`echo $job_info | cut -d':' -f2`"
-checkOozieJobStatus "$job_id"
-OOZIE_EXIT_CODE="$?"
-exit $OOZIE_EXIT_CODE

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-oozie/files/oozieSmoke2.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-oozie/files/oozieSmoke2.sh b/ambari-agent/src/main/puppet/modules/hdp-oozie/files/oozieSmoke2.sh
deleted file mode 100644
index 2cb5a7a..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-oozie/files/oozieSmoke2.sh
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-function getValueFromField {
-  xmllint $1 | grep "<name>$2</name>" -C 2 | grep '<value>' | cut -d ">" -f2 | cut -d "<" -f1
-  return $?
-}
-
-function checkOozieJobStatus {
-  local job_id=$1
-  local num_of_tries=$2
-  #default num_of_tries to 10 if not present
-  num_of_tries=${num_of_tries:-10}
-  local i=0
-  local rc=1
-  local cmd="source ${oozie_conf_dir}/oozie-env.sh ; /usr/bin/oozie job -oozie ${OOZIE_SERVER} -info $job_id"
-  su - ${smoke_test_user} -c "$cmd"
-  while [ $i -lt $num_of_tries ] ; do
-    cmd_output=`su - ${smoke_test_user} -c "$cmd"`
-    (IFS='';echo $cmd_output)
-    act_status=$(IFS='';echo $cmd_output | grep ^Status | cut -d':' -f2 | sed 's| ||g')
-    echo "workflow_status=$act_status"
-    if [ "RUNNING" == "$act_status" ]; then
-      #increment the couner and get the status again after waiting for 15 secs
-      sleep 15
-      (( i++ ))
-      elif [ "SUCCEEDED" == "$act_status" ]; then
-        rc=0;
-        break;
-      else
-        rc=1
-        break;
-      fi
-    done
-    return $rc
-}
-
-export oozie_conf_dir=$1
-export hadoop_conf_dir=$2
-export smoke_test_user=$3
-export security_enabled=$4
-export smoke_user_keytab=$5
-export kinit_path_local=$6
-
-export OOZIE_EXIT_CODE=0
-export JOBTRACKER=`getValueFromField ${hadoop_conf_dir}/yarn-site.xml yarn.resourcemanager.address`
-export NAMENODE=`getValueFromField ${hadoop_conf_dir}/core-site.xml fs.defaultFS`
-export OOZIE_SERVER=`getValueFromField ${oozie_conf_dir}/oozie-site.xml oozie.base.url | tr '[:upper:]' '[:lower:]'`
-export OOZIE_EXAMPLES_DIR=`rpm -ql oozie-client | grep 'oozie-examples.tar.gz$' | xargs dirname`
-cd $OOZIE_EXAMPLES_DIR
-
-tar -zxf oozie-examples.tar.gz
-sed -i "s|nameNode=hdfs://localhost:8020|nameNode=$NAMENODE|g"  examples/apps/map-reduce/job.properties
-sed -i "s|nameNode=hdfs://localhost:9000|nameNode=$NAMENODE|g"  examples/apps/map-reduce/job.properties
-sed -i "s|jobTracker=localhost:8021|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
-sed -i "s|jobTracker=localhost:9001|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
-sed -i "s|jobTracker=localhost:8032|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
-sed -i "s|oozie.wf.application.path=hdfs://localhost:9000|oozie.wf.application.path=$NAMENODE|g" examples/apps/map-reduce/job.properties
-
-if [[ $security_enabled == "true" ]]; then
-  kinitcmd="${kinit_path_local} -kt ${smoke_user_keytab} ${smoke_test_user}; "
-else 
-  kinitcmd=""
-fi
-
-su - ${smoke_test_user} -c "hdfs dfs -rm -r examples"
-su - ${smoke_test_user} -c "hdfs dfs -rm -r input-data"
-su - ${smoke_test_user} -c "hdfs dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples examples"
-su - ${smoke_test_user} -c "hdfs dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples/input-data input-data"
-
-cmd="${kinitcmd}source ${oozie_conf_dir}/oozie-env.sh ; /usr/bin/oozie -Doozie.auth.token.cache=false job -oozie $OOZIE_SERVER -config $OOZIE_EXAMPLES_DIR/examples/apps/map-reduce/job.properties  -run"
-echo $cmd
-job_info=`su - ${smoke_test_user} -c "$cmd" | grep "job:"`
-job_id="`echo $job_info | cut -d':' -f2`"
-checkOozieJobStatus "$job_id"
-OOZIE_EXIT_CODE="$?"
-exit $OOZIE_EXIT_CODE

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-oozie/files/wrap_ooziedb.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-oozie/files/wrap_ooziedb.sh b/ambari-agent/src/main/puppet/modules/hdp-oozie/files/wrap_ooziedb.sh
deleted file mode 100644
index 97a513c..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-oozie/files/wrap_ooziedb.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-OUT=`cd /var/tmp/oozie && /usr/lib/oozie/bin/ooziedb.sh "$@" 2>&1`
-EC=$?
-echo $OUT
-GRVAR=`echo ${OUT} | grep -o "java.lang.Exception: DB schema exists"`
-if [ ${EC} -ne 0 ] && [ -n "$GRVAR" ]
-then
-  exit 0
-else
-  exit $EC
-fi  

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/client.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/client.pp b/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/client.pp
deleted file mode 100644
index f80c356..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/client.pp
+++ /dev/null
@@ -1,40 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-oozie::client(
-  $service_state = $hdp::params::cluster_client_state,
-  $oozie_server = undef
-) inherits hdp::params
-{ 
-  if ($service_state == 'no_op') {
-   } elsif ($service_state in ['installed_and_configured','uninstalled']) {
-     if ($hdp::params::service_exists['hdp-oozie::server'] != true) {
-       #installs package, creates user, sets configuration
-       class { 'hdp-oozie' :
-         service_state => $service_state
-       }
-      if ($oozie_server != undef) {
-        Hdp-Oozie::Configfile<||>{oozie_server => $oozie_server}
-      }
-    }
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/download-ext-zip.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/download-ext-zip.pp b/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/download-ext-zip.pp
deleted file mode 100644
index c63ebd1..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/download-ext-zip.pp
+++ /dev/null
@@ -1,31 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-oozie::download-ext-zip()
-{
-  anchor { 'hdp-oozie::download-ext-zip::begin':}
-
-   hdp::package { 'extjs' :
-     require   => Anchor['hdp-oozie::download-ext-zip::begin']
-   }
-
-   anchor { 'hdp-oozie::download-ext-zip::end':}
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/init.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/init.pp b/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/init.pp
deleted file mode 100644
index f1256c1..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/init.pp
+++ /dev/null
@@ -1,150 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-oozie(
-  $service_state = undef,
-  $server = false,
-  $setup = false
-)
-{
-  include hdp-oozie::params 
-
-# Configs generation  
-
-  $oozie_user = $hdp-oozie::params::oozie_user
-  $oozie_config_dir = $hdp-oozie::params::conf_dir
-
-  if has_key($configuration, 'oozie-site') {
-    configgenerator::configfile{'oozie-site':
-      modulespath => $oozie_config_dir, 
-      filename => 'oozie-site.xml',
-      module => 'hdp-oozie',
-      configuration => $configuration['oozie-site'],
-      owner => $oozie_user,
-      group => $hdp::params::user_group,
-      mode => '0664'
-    }
-  } else {
-    file { "${oozie_config_dir}/oozie-site.xml":
-      owner => $oozie_user,
-      group => $hdp::params::user_group,
-      mode => '0664'
-    }
-  }
-
-  if ($service_state == 'uninstalled') {
-    hdp::package { 'oozie-client' : 
-      ensure => 'uninstalled'
-    }
-    if ($server == true ) {
-      hdp::package { 'oozie-server' :
-        ensure => 'uninstalled'
-      }
-    }
-    hdp::directory { $oozie_config_dir:
-      service_state => $service_state,
-      force => true
-    }
-
-    anchor { 'hdp-oozie::begin': } -> Hdp::Package['oozie-client'] -> Hdp::Directory[$oozie_config_dir] ->  anchor { 'hdp-oozie::end': }
-
-    if ($server == true ) {
-       Hdp::Package['oozie-server'] -> Hdp::Package['oozie-client'] ->  Anchor['hdp-oozie::end']
-     }
-  } else {
-    hdp::package { 'oozie-client' : }
-    if ($server == true ) {
-      hdp::package { 'oozie-server':}
-      class { 'hdp-oozie::download-ext-zip': }
-    }
-
-     
-
-     hdp::directory { $oozie_config_dir: 
-       service_state => $service_state,
-       force => true,
-       owner => $oozie_user,
-       group => $hdp::params::user_group,
-       override_owner => true
-     }
-
-     hdp-oozie::configfile { 'oozie-env.sh': }
-
-     if ($service_state == 'installed_and_configured') {
-       hdp-oozie::configfile { 'oozie-log4j.properties': }
-
-       if ($hdp::params::oozie_jdbc_driver == "com.mysql.jdbc.Driver" or $hdp::params::oozie_jdbc_driver == "oracle.jdbc.driver.OracleDriver") {
-         hdp::exec { "download DBConnectorVerification.jar" :
-           command => "/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf --retry 5 ${hdp::params::jdk_location}${hdp::params::check_db_connection_jar_name} -o ${hdp::params::check_db_connection_jar_name}'",
-           unless  => "[ -f ${check_db_connection_jar} ]"
-         }
-       }
-     }
-
-     hdp-oozie::ownership { 'ownership': }
-
-    anchor { 'hdp-oozie::begin': } -> Hdp::Package['oozie-client'] -> Hdp::Directory[$oozie_config_dir] -> Hdp-oozie::Configfile<||> -> Hdp-oozie::Ownership['ownership'] -> anchor { 'hdp-oozie::end': }
-
-     if ($server == true ) { 
-       Hdp::Package['oozie-server'] -> Hdp::Package['oozie-client'] -> Class['hdp-oozie::download-ext-zip'] ->  Anchor['hdp-oozie::end']
-     }
- }
-}
-
-### config files
-define hdp-oozie::configfile(
-  $mode = undef,
-  $oozie_server = undef
-) 
-{
-  hdp::configfile { "${hdp-oozie::params::conf_dir}/${name}":
-    component       => 'oozie',
-    owner           => $hdp-oozie::params::oozie_user,
-    mode            => $mode,
-    oozie_server    => $oozie_server
-  }
-}
-
-define hdp-oozie::ownership {
-  file { "${hdp-oozie::params::conf_dir}/adminusers.txt":
-    owner => $hdp-oozie::params::oozie_user,
-    group => $hdp::params::user_group
-  }
-
-  file { "${hdp-oozie::params::conf_dir}/hadoop-config.xml":
-    owner => $hdp-oozie::params::oozie_user,
-    group => $hdp::params::user_group
-  }
-
-  file { "${hdp-oozie::params::conf_dir}/oozie-default.xml":
-    owner => $hdp-oozie::params::oozie_user,
-    group => $hdp::params::user_group
-  }
-
-  file { "${hdp-oozie::params::conf_dir}/action-conf":
-    owner => $hdp-oozie::params::oozie_user,
-    group => $hdp::params::user_group
-  }
-
-  file { "${hdp-oozie::params::conf_dir}/action-conf/hive.xml":
-    owner => $hdp-oozie::params::oozie_user,
-    group => $hdp::params::user_group
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/oozie/service_check.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/oozie/service_check.pp b/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/oozie/service_check.pp
deleted file mode 100644
index 83911e3..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/oozie/service_check.pp
+++ /dev/null
@@ -1,73 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-oozie::oozie::service_check()
-{
-  include hdp-oozie::params
-
-  $smoke_shell_files = ['oozieSmoke.sh']
-
-  if (hdp_get_major_stack_version($hdp::params::stack_version) >= 2) {
-    $smoke_test_file_name = 'oozieSmoke2.sh'
-  } else {
-    $smoke_test_file_name = 'oozieSmoke.sh'
-  }
-
-  anchor { 'hdp-oozie::oozie::service_check::begin':}
-
-  hdp-oozie::smoke_shell_file { $smoke_shell_files:
-    smoke_shell_file_name => $smoke_test_file_name
-  }
-
-  anchor{ 'hdp-oozie::oozie::service_check::end':}
-}
-
-define hdp-oozie::smoke_shell_file(
-  $smoke_shell_file_name
-)
-{
-  $smoke_test_user = $hdp::params::smokeuser
-  $conf_dir = $hdp::params::oozie_conf_dir
-  $hadoopconf_dir = $hdp::params::hadoop_conf_dir 
-  $security_enabled=$hdp::params::security_enabled
-  $kinit_path_local = $hdp::params::kinit_path_local
-  if ($security_enabled == true) {
-    $security = "true"
-  } else {
-    $security = "false"
-  }
-  $smoke_user_keytab = $hdp::params::smokeuser_keytab
-  $realm=$hdp::params::kerberos_domain
-
-  file { "/tmp/${smoke_shell_file_name}":
-    ensure => present,
-    source => "puppet:///modules/hdp-oozie/${smoke_shell_file_name}",
-    mode => '0755'
-  }
-
-  exec { "/tmp/${smoke_shell_file_name}":
-    command   => "sh /tmp/${smoke_shell_file_name} ${conf_dir} ${hadoopconf_dir} ${smoke_test_user} ${security} ${smoke_user_keytab} ${kinit_path_local}",
-    tries     => 3,
-    try_sleep => 5,
-    require   => File["/tmp/${smoke_shell_file_name}"],
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    logoutput => "true"
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/params.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/params.pp b/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/params.pp
deleted file mode 100644
index da7c908..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/params.pp
+++ /dev/null
@@ -1,65 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-oozie::params() inherits hdp::params
-{
-  $oozie_user = $hdp::params::oozie_user 
-
-  ###ext url
-  $download_url = $hdp::params::gpl_artifacts_download_url
-  $ext_zip_url = "${download_url}/ext-2.2.zip"
-  $ext_zip_name = hdp_default("ext_zip_name","ext-2.2.zip")
-
-  ###oozie jdbc
-  $oozie_metastore_user_name = hdp_default("oozie-site/oozie.service.JPAService.jdbc.username","oozie")
-  $oozie_metastore_user_passwd = hdp_default("oozie-site/oozie.service.JPAService.jdbc.password","")
-  $oozie_jdbc_connection_url = hdp_default("oozie-site/oozie.service.JPAService.jdbc.url", "")
-
-  ### oozie-env
-  $conf_dir = $hdp::params::oozie_conf_dir
-  $hadoop_prefix = hdp_default("hadoop_prefix","/usr")
-
-  ### oozie-env
-  $oozie_log_dir = hdp_default("oozie_log_dir","/var/log/oozie")
-
-  $oozie_pid_dir = hdp_default("oozie_pid_dir","/var/run/oozie/")
-  $oozie_pid_file = hdp_default("oozie_pid_file","$oozie_pid_dir/oozie.pid")
-
-  $oozie_data_dir = hdp_default("oozie_data_dir","/var/data/oozie")
-
-  $oozie_tmp_dir = hdp_default("oozie_tmp_dir","/var/tmp/oozie")
-
-  $oozie_lib_dir = hdp_default("oozie_lib_dir","/var/lib/oozie/")
-  
-  $oozie_webapps_dir = hdp_default("oozie_webapps_dir","/var/lib/oozie/oozie-server/webapps/")
-  
-  ### oozie-site
-  $keytab_path = hdp_default("keytab_path","/etc/security/keytabs")
-  $oozie_service_keytab = hdp_default("oozie-site/oozie.service.HadoopAccessorService.keytab.file", "${keytab_path}/oozie.service.keytab")
-  $oozie_principal = hdp_default("oozie-site/oozie.service.HadoopAccessorService.kerberos.principal", "oozie")
-
-  if ($security_enabled == true) {
-    $oozie_sasl_enabled = "true"
-    $oozie_security_type = "kerberos"
-  } else {
-    $oozie_sasl_enabled = "false"
-    $oozie_security_type = "simple"
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/server.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/server.pp b/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/server.pp
deleted file mode 100644
index 5930296..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/server.pp
+++ /dev/null
@@ -1,72 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-oozie::server(
-  $service_state = $hdp::params::cluster_service_state,
-  $setup = false,
-  $opts = {}
-) inherits  hdp-oozie::params
-{   
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) {
-    $hdp::params::service_exists['hdp-oozie::server'] = true
-
-    if ( ($service_state == 'installed_and_configured') and
-         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
-       $masterHost = $kerberos_adminclient_host[0]
-       hdp::download_keytab { 'oozie_service_keytab' :
-         masterhost => $masterHost,
-         keytabdst => "${$keytab_path}/oozie.service.keytab",
-         keytabfile => 'oozie.service.keytab',
-         owner => $hdp::params::oozie_user
-       }
-
-       if ( ($hdp::params::service_exists['hdp-hadoop::namenode'] != true) and
-            ($hdp::params::service_exists['hdp-hadoop::snamenode'] != true) ) {
-         hdp::download_keytab { 'oozie_spnego_keytab' :
-           masterhost => $masterHost,
-           keytabdst => "${$keytab_path}/spnego.service.keytab",
-           keytabfile => 'spnego.service.keytab',
-           owner => $hdp::params::oozie_user,
-           group => $hdp::params::user_group,
-           mode => '0440'
-         }
-      }
-    }
-
-    #installs package, creates user, sets configuration
-    class{ 'hdp-oozie' : 
-      service_state => $service_state,
-      server        => true
-    } 
-  
-    Hdp-Oozie::Configfile<||>{oozie_server => $hdp::params::oozie_server}
-
-    class { 'hdp-oozie::service' :
-      ensure       => $service_state,
-      setup         => $setup
-    }
-  
-    #top level does not need anchors
-    Class['hdp-oozie'] -> Class['hdp-oozie::service']
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}


[13/15] AMBARI-5747 Remove facter-1.6.10, Ruby, Puppet dependencies from pom.xml and src (dsen)

Posted by ds...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/files/task-log4j.properties
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/files/task-log4j.properties b/ambari-agent/src/main/puppet/modules/hdp-hadoop/files/task-log4j.properties
deleted file mode 100644
index c8939fc..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/files/task-log4j.properties
+++ /dev/null
@@ -1,132 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-
-# Define some default values that can be overridden by system properties
-hadoop.root.logger=INFO,console
-hadoop.log.dir=.
-hadoop.log.file=hadoop.log
-
-#
-# Job Summary Appender 
-#
-# Use following logger to send summary to separate file defined by 
-# hadoop.mapreduce.jobsummary.log.file rolled daily:
-# hadoop.mapreduce.jobsummary.logger=INFO,JSA
-# 
-hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
-hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hadoop.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshhold=ALL
-
-#
-# Daily Rolling File Appender
-#
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this 
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
-#
-# TaskLog Appender
-#
-
-#Default values
-hadoop.tasklog.taskid=null
-hadoop.tasklog.iscleanup=false
-hadoop.tasklog.noKeepSplits=4
-hadoop.tasklog.totalLogFileSize=100
-hadoop.tasklog.purgeLogSplits=true
-hadoop.tasklog.logsRetainHours=12
-
-log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
-log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
-log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
-
-log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
-log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-
-#
-# Rolling File Appender
-#
-
-#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Logfile size and and 30-day backups
-#log4j.appender.RFA.MaxFileSize=1MB
-#log4j.appender.RFA.MaxBackupIndex=30
-
-#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-# Custom Logging levels
-
-hadoop.metrics.log.level=INFO
-#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
-#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
-#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
-log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
-
-# Jets3t library
-log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
-
-#
-# Null Appender
-# Trap security logger on the hadoop client side
-#
-log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_mode.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_mode.rb b/ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_mode.rb
deleted file mode 100644
index 828b593..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_mode.rb
+++ /dev/null
@@ -1,65 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#to handle differences in how args passed in
-module Puppet::Parser::Functions
-  newfunction(:hdp_hadoop_get_mode, :type => :rvalue) do |args|
-  
-    dir = args[0]
-
-    oozie_dir = lookupvar("::hdp::params::oozie_hdfs_user_dir")
-    oozie_dir_mode = lookupvar("::hdp::params::oozie_hdfs_user_mode") 
-    
-    hcat_dir = lookupvar("::hdp::params::hcat_hdfs_user_dir")
-    hcat_dir_mode = lookupvar("::hdp::params::hcat_hdfs_user_mode") 
-    
-    webhcat_dir = lookupvar("::hdp::params::webhcat_hdfs_user_dir")
-    webhcat_dir_mode = lookupvar("::hdp::params::webhcat_hdfs_user_mode") 
-    
-    hive_dir = lookupvar("::hdp::params::hive_hdfs_user_dir")
-    hive_dir_mode = lookupvar("::hdp::params::hive_hdfs_user_mode") 
-    
-    smoke_dir = lookupvar("::hdp::params::smoke_hdfs_user_dir")
-    smoke_dir_mode = lookupvar("::hdp::params::smoke_hdfs_user_mode") 
-    
-    modes = []
-    modes.push({:dir => oozie_dir, :mode => oozie_dir_mode})
-    modes.push({:dir => hcat_dir, :mode => hcat_dir_mode})
-    modes.push({:dir => webhcat_dir, :mode => webhcat_dir_mode})
-    modes.push({:dir => hive_dir, :mode => hive_dir_mode})
-    modes.push({:dir => smoke_dir, :mode => smoke_dir_mode})
-
-    modes_grouped = {}
-    modes.each do |item|
-      if modes_grouped[item[:dir]].nil?
-        modes_grouped[item[:dir]]=[]
-      end
-      modes_grouped[item[:dir]]=modes_grouped[item[:dir]] + [(item[:mode])]
-    end
-
-    modes_max = {}
-    
-    modes_grouped.each_key do |key|
-      modes_max[key] = modes_grouped[key].max
-    end
-
-    modes_max[dir]
-  end
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_namenode_id.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_namenode_id.rb b/ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_namenode_id.rb
deleted file mode 100644
index 719d1e6..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_namenode_id.rb
+++ /dev/null
@@ -1,47 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#to get namenode service id in HA setup
-
-module Puppet::Parser::Functions
-  newfunction(:hdp_hadoop_get_namenode_id, :type => :rvalue) do |args|
-    namenode_id = ""
-    if args.length > 1
-      # Get hdfs-site to lookup hostname properties
-      lookup_property = args[0]
-      siteName = args[1]
-      siteConfig = lookupvar("#{siteName}")
-      nn_ids_str = lookupvar("::hdp::params::dfs_ha_namenode_ids")
-      hostname = lookupvar("::hdp::params::hostname")
-      nn_ids = nn_ids_str.to_s.split(',')
-
-      if nn_ids.length > 1
-        nn_ids.each do |id|
-          lookup_key = lookup_property + "." + id.to_s.strip
-          property_val = siteConfig.fetch(lookup_key, "")
-          if property_val != "" and property_val.include? hostname
-            namenode_id = id
-          end
-        end
-      end
-    end
-    namenode_id.strip
-  end
-end
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_owner.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_owner.rb b/ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_owner.rb
deleted file mode 100644
index 9ae36ef..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_owner.rb
+++ /dev/null
@@ -1,51 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#to handle differences in how args passed in
-module Puppet::Parser::Functions
-  newfunction(:hdp_hadoop_get_owner, :type => :rvalue) do |args|
-  
-    dir = args[0]
-    
-    oozie_dir = lookupvar("::hdp::params::oozie_hdfs_user_dir")
-    oozie_user = lookupvar("::hdp::params::oozie_user") 
-
-    hcat_dir = lookupvar("::hdp::params::hcat_hdfs_user_dir")
-    hcat_user = lookupvar("::hdp::params::hcat_user") 
-
-    webhcat_dir = lookupvar("::hdp::params::webhcat_hdfs_user_dir")
-    webhcat_user = lookupvar("::hdp::params::webhcat_user") 
-
-    hive_dir = lookupvar("::hdp::params::hive_hdfs_user_dir")
-    hive_user = lookupvar("::hdp::params::hive_user") 
-
-    smoke_dir = lookupvar("::hdp::params::smoke_hdfs_user_dir")
-    smoke_user = lookupvar("::hdp::params::smokeuser") 
-
-    dirs_to_owners = {}
-    dirs_to_owners[oozie_dir] = oozie_user
-    dirs_to_owners[hcat_dir] = hcat_user
-    dirs_to_owners[webhcat_dir] = webhcat_user
-    dirs_to_owners[hive_dir] = hive_user
-    dirs_to_owners[smoke_dir] = smoke_user
-
-    dirs_to_owners[dir]
-  end
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/client.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/client.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/client.pp
deleted file mode 100644
index 97629a8..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/client.pp
+++ /dev/null
@@ -1,56 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::client(
-  $service_state = $hdp::params::cluster_client_state
-) inherits hdp-hadoop::params
-{
-  $hdp::params::service_exists['hdp-hadoop::client'] = true
-
-  Hdp-hadoop::Common<||>{service_state => $service_state}
-
-  if ($hdp::params::use_32_bits_on_slaves == true) {
-    Hdp-hadoop::Package<||>{include_32_bit => true}
-    Hdp-hadoop::Configfile<||>{sizes +> 32}
-  } else {
-    Hdp-hadoop::Package<||>{include_64_bit => true}
-    Hdp-hadoop::Configfile<||>{sizes +> 64}
-  }
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['installed_and_configured','uninstalled']) {
-    #adds package, users and directories, and common hadoop configs
-    include hdp-hadoop::initialize
-
-    if ( ($service_state == 'installed_and_configured') and
-         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
-      $masterHost = $kerberos_adminclient_host[0]
-      hdp::download_keytab { 'hadoop_client_ambari_qa_keytab' :
-        masterhost => $masterHost,
-        keytabdst => "${$keytab_path}/${smokeuser}.headless.keytab",
-        keytabfile => "${smokeuser}.headless.keytab",
-        owner => $smokeuser,
-        hostnameInPrincipals => 'no'
-      }
-    }
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/datanode.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/datanode.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/datanode.pp
deleted file mode 100644
index 04a0d8e..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/datanode.pp
+++ /dev/null
@@ -1,100 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::datanode(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-hadoop::params 
-{
-
-  $hdp::params::service_exists['hdp-hadoop::datanode'] = true
-
-  Hdp-hadoop::Common<||>{service_state => $service_state}
-
-  if ($hdp::params::use_32_bits_on_slaves == true) {
-    Hdp-hadoop::Package<||>{include_32_bit => true}
-    Hdp-hadoop::Configfile<||>{sizes +> 32}
-  } else {
-    Hdp-hadoop::Package<||>{include_64_bit => true}
-    Hdp-hadoop::Configfile<||>{sizes +> 64}
-  }
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
-    $dfs_data_dir = $hdp-hadoop::params::dfs_data_dir
-  
-    if (($hdp::params::service_exists['hdp-hadoop::namenode'] == true) or ($hdp::params::service_exists['hdp-hadoop::snamenode'] == true)){
-      $a_namenode_on_node = true
-    } else {
-      $a_namenode_on_node = false
-    }
-
-    #adds package, users and directories, and common hadoop configs
-    include hdp-hadoop::initialize
-
-    if ( ($service_state == 'installed_and_configured') and
-         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
-      $masterHost = $kerberos_adminclient_host[0]
-      hdp::download_keytab { 'datanode_service_keytab' :
-        masterhost => $masterHost,
-        keytabdst => "${$keytab_path}/dn.service.keytab",
-        keytabfile => 'dn.service.keytab',
-        owner => $hdp-hadoop::params::hdfs_user
-      }
-    }
-
-  
-    hdp-hadoop::datanode::create_data_dirs { $dfs_data_dir: 
-      service_state => $service_state
-    }
-
-    if ($a_namenode_on_node == true){
-      $create_pid_dir = false
-      $create_log_dir = false
-    } else {
-      $create_pid_dir = true
-      $create_log_dir = true
-    }
-    
-    hdp-hadoop::service{ 'datanode':
-      ensure         => $service_state,
-      user           => $hdp-hadoop::params::hdfs_user,
-      create_pid_dir => $create_pid_dir,
-      create_log_dir => $create_log_dir
-    }
-    
-    #top level does not need anchors
-    Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Datanode::Create_data_dirs<||> -> Hdp-hadoop::Service['datanode'] -> Anchor['hdp-hadoop::end'] 
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
-
-define hdp-hadoop::datanode::create_data_dirs($service_state)
-{
-  $dirs = hdp_array_from_comma_list($name)
-  hdp::directory_recursive_create_ignore_failure { $dirs :
-    owner => $hdp-hadoop::params::hdfs_user,
-    mode => '0750',
-    service_state => $service_state,
-    force => true
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/glusterfs.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/glusterfs.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/glusterfs.pp
deleted file mode 100644
index 9d044ff..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/glusterfs.pp
+++ /dev/null
@@ -1,36 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::glusterfs_client(
-  $service_state = $hdp::params::cluster_client_state,
-  $opts = {}
-) inherits hdp-hadoop::params
-{
-  $hdp::params::service_exists['hdp-hadoop::glusterfs_client'] = true
-  Hdp-hadoop::Common<||>{service_state => $service_state}
-  Hdp-hadoop::Package<||>{include_64_bit => true}
-  Hdp-hadoop::Configfile<||>{sizes +> 64}
-  
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) {
-  	#adds package, users and directories, and common hadoop configs
-  	include hdp-hadoop::initialize
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/glusterfs_service_check.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/glusterfs_service_check.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/glusterfs_service_check.pp
deleted file mode 100644
index 87c03c8..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/glusterfs_service_check.pp
+++ /dev/null
@@ -1,26 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::glusterfs_service_check(
-  $service_state = $hdp::params::cluster_client_state
-) inherits hdp-hadoop::params
-{
-  $hdp::params::service_exists['hdp-hadoop::glusterfs'] = true
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/copyfromlocal.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/copyfromlocal.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/copyfromlocal.pp
deleted file mode 100644
index d8099df..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/copyfromlocal.pp
+++ /dev/null
@@ -1,84 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-define hdp-hadoop::hdfs::copyfromlocal(
-  $service_state,
-  $owner = unset,
-  $group = unset,
-  $recursive_chown = false,
-  $mode = undef,
-  $recursive_chmod = false,
-  $dest_dir = undef,
-  $kinit_if_needed = undef
-) 
-{
- 
-  if ($service_state == 'running') {
-    $copy_cmd = "fs -copyFromLocal ${name} ${dest_dir}"
-    if ($kinit_if_needed == undef) {
-      $unless_cmd = "hadoop fs -ls ${dest_dir} >/dev/null 2>&1"
-    } else {
-      $unless_cmd = "${kinit_if_needed} hadoop fs -ls ${dest_dir} >/dev/null 2>&1"
-    }
-    ## exec-hadoop does a kinit based on user, but unless does not
-    hdp-hadoop::exec-hadoop { $copy_cmd:
-      command => $copy_cmd,
-      unless => $unless_cmd,
-      user => $owner
-    }
-    if ($owner == unset) {
-      $chown = ""
-    } else {
-      if ($group == unset) {
-        $chown = $owner
-      } else {
-        $chown = "${owner}:${group}"
-     } 
-    }  
- 
-    if (chown != "") {
-      #TODO: see if there is a good 'unless test'
-      if ($recursive_chown == true) {
-        $chown_cmd = "fs -chown -R ${chown} ${dest_dir}"
-      } else {
-        $chown_cmd = "fs -chown ${chown} ${dest_dir}"
-      }
-      hdp-hadoop::exec-hadoop {$chown_cmd :
-        command => $chown_cmd,
-        user => $owner
-      }
-      Hdp-hadoop::Exec-hadoop[$copy_cmd] -> Hdp-hadoop::Exec-hadoop[$chown_cmd]
-    }
-  
-    if ($mode != undef) {
-      #TODO: see if there is a good 'unless test'
-      if ($recursive_mode == true) {
-        $chmod_cmd = "fs -chmod -R ${mode} ${dest_dir}"
-      } else {
-        $chmod_cmd = "fs -chmod ${mode} ${dest_dir}"
-      }
-      hdp-hadoop::exec-hadoop {$chmod_cmd :
-        command => $chmod_cmd,
-        user => $owner
-      }
-      Hdp-hadoop::Exec-hadoop[$copy_cmd] -> Hdp-hadoop::Exec-hadoop[$chmod_cmd]
-    }
-  }       
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/decommission.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/decommission.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/decommission.pp
deleted file mode 100644
index 68ef792..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/decommission.pp
+++ /dev/null
@@ -1,49 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::hdfs::decommission(
-) inherits hdp-hadoop::params
-{
-  if hdp_is_empty($configuration[hdfs-site]['dfs.hosts.exclude']) {
-    hdp_fail("There is no path to exclude file in configuration!")
-  }
-
-  $kinit_path = $hdp::params::kinit_path_local
-  $keytab_path = $hdp::params::hdfs_user_keytab
-  $hdfs_user = $hdp::params::hdfs_user
-  $kinit_cmd = "su - ${hdfs_user} -c '${kinit_path} -kt ${keytab_path} ${hdfs_user}'"
-
-  if ($hdp::params::security_enabled == true) {
-    exec { 'kinit_before_decommission' :
-      command => $kinit_cmd,
-      path => ['/bin'],
-      before => Hdp-Hadoop::Hdfs::Generate_Exclude_File['exclude_file']
-    }
-  }
-
-  hdp-hadoop::hdfs::generate_exclude_file{'exclude_file':}
-
-  hdp::exec{"hadoop dfsadmin -refreshNodes":
-      command => "hadoop dfsadmin -refreshNodes",
-      user => $hdp::params::hdfs_user,
-      require => Hdp-Hadoop::Hdfs::Generate_Exclude_File['exclude_file']
-    }
-  
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/directory.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/directory.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/directory.pp
deleted file mode 100644
index f0852ae..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/directory.pp
+++ /dev/null
@@ -1,121 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#TODO: unset should br changed to undef; just to be consistent
-define hdp-hadoop::hdfs::directory(
-  $service_state = 'running',
-  $owner = unset,
-  $group = unset,
-  $recursive_chown = false,
-  $mode = undef,
-  $recursive_chmod = false
-) 
-{
-  $dir_exists = "hadoop fs -ls ${name} >/dev/null 2>&1"
-  $namenode_safe_mode_off = "hadoop dfsadmin -safemode get|grep 'Safe mode is OFF'"
-
-  # Short circuit the expensive dfs client checks if directory was already created
-  $stub_dir = $hdp-hadoop::params::namenode_dirs_created_stub_dir
-  $stub_filename = $hdp-hadoop::params::namenode_dirs_stub_filename
-  $dir_absent_in_stub = "grep -q '^${name}$' ${stub_dir}/${stub_filename} > /dev/null 2>&1; test $? -ne 0"
-  $record_dir_in_stub = "echo '${name}' >> ${stub_dir}/${stub_filename}"
-  $tries = 30
-  $try_sleep = 10
-
-  if ($hdp::params::dfs_ha_enabled == true) {
-     $namenode_id = $hdp-hadoop::params::namenode_id
-     if (hdp_is_empty($namenode_id) == false) {
-       $dfs_check_nn_status_cmd = "hdfs haadmin -getServiceState $namenode_id | grep active > /dev/null"
-     }
-   } else {
-     $dfs_check_nn_status_cmd = "true"
-   }
-
-  if ($service_state == 'running') {
-    if (hdp_get_major_stack_version($hdp::params::stack_version) >= 2) {
-      $mkdir_cmd = "fs -mkdir -p ${name}"
-    } else {
-      $mkdir_cmd = "fs -mkdir ${name}"
-    }
-
-    hdp-hadoop::exec-hadoop { $mkdir_cmd:
-      command   => $mkdir_cmd,
-      unless    => "$dir_absent_in_stub && $dfs_check_nn_status_cmd && $dir_exists && ! $namenode_safe_mode_off",
-      onlyif    => "$dir_absent_in_stub && $dfs_check_nn_status_cmd && ! $dir_exists",
-      try_sleep => $try_sleep,
-      tries     => $tries
-    }
-
-    hdp::exec { $record_dir_in_stub:
-      command => $record_dir_in_stub,
-      user => $hdp-hadoop::params::hdfs_user,
-      onlyif => $dir_absent_in_stub
-    }
-
-    Hdp-hadoop::Exec-hadoop[$mkdir_cmd] ->
-    Hdp::Exec[$record_dir_in_stub]
-
-    if ($owner == unset) {
-      $chown = ""
-    } else {
-      if ($group == unset) {
-        $chown = $owner
-      } else {
-        $chown = "${owner}:${group}"
-     } 
-    }  
- 
-    if (chown != "") {
-      #TODO: see if there is a good 'unless test'
-      if ($recursive_chown == true) {
-        $chown_cmd = "fs -chown -R ${chown} ${name}"
-      } else {
-        $chown_cmd = "fs -chown ${chown} ${name}"
-      }
-      hdp-hadoop::exec-hadoop {$chown_cmd :
-        command   => $chown_cmd,
-        onlyif    => "$dir_absent_in_stub && $dfs_check_nn_status_cmd && $namenode_safe_mode_off && $dir_exists",
-        try_sleep => $try_sleep,
-        tries     => $tries
-      }
-      Hdp-hadoop::Exec-hadoop[$mkdir_cmd] ->
-      Hdp-hadoop::Exec-hadoop[$chown_cmd] ->
-      Hdp::Exec[$record_dir_in_stub]
-    }
-  
-    if ($mode != undef) {
-      #TODO: see if there is a good 'unless test'
-      if ($recursive_chmod == true) {
-        $chmod_cmd = "fs -chmod -R ${mode} ${name}"
-      } else {
-        $chmod_cmd = "fs -chmod ${mode} ${name}"
-      }
-      hdp-hadoop::exec-hadoop { $chmod_cmd :
-        command   => $chmod_cmd,
-        onlyif    => "$dir_absent_in_stub && $dfs_check_nn_status_cmd && $namenode_safe_mode_off && $dir_exists",
-        try_sleep => $try_sleep,
-        tries     => $tries
-      }
-      Hdp-hadoop::Exec-hadoop[$mkdir_cmd] ->
-      Hdp-hadoop::Exec-hadoop[$chmod_cmd] ->
-      Hdp::Exec[$record_dir_in_stub]
-    }
-  }       
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/generate_exclude_file.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/generate_exclude_file.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/generate_exclude_file.pp
deleted file mode 100644
index 5053e73..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/generate_exclude_file.pp
+++ /dev/null
@@ -1,42 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-define hdp-hadoop::hdfs::generate_exclude_file()
-{
-  $exlude_file_path = $configuration['hdfs-site']['dfs.hosts.exclude']
-  ## Generate exclude file if exists value of $configuration['hdfs-exclude-file']['datanodes']
-  ## or value for $configuration['hdfs-exclude-file']['datanodes'] is empty
-  if (hdp_is_empty($configuration) == false and
-    hdp_is_empty($configuration['hdfs-exclude-file']) == false) and
-    (hdp_is_empty($configuration['hdfs-exclude-file']['datanodes']) == false)
-    or has_key($configuration['hdfs-exclude-file'], 'datanodes') {
-    ##Create file with list of excluding hosts
-    $exlude_hosts_list = hdp_array_from_comma_list($configuration['hdfs-exclude-file']['datanodes'])
-    file { $exlude_file_path :
-      ensure => file,
-      content => template('hdp-hadoop/exclude_hosts_list.erb')
-    }
-  }
-}
-
-
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/service_check.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/service_check.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/service_check.pp
deleted file mode 100644
index 37d0eea..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/service_check.pp
+++ /dev/null
@@ -1,170 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::hdfs::service_check()
-{
-  $unique = hdp_unique_id_and_date()
-  $dir = '/tmp'
-  $tmp_file = "${dir}/${unique}"
-
-  $safemode_command = "dfsadmin -safemode get | grep OFF"
-
-  $create_dir_cmd = "fs -mkdir ${dir} ; hadoop fs -chmod -R 777 ${dir}"
-  $test_dir_exists = "hadoop fs -test -e ${dir}" #TODO: may fix up fact that test needs explicit hadoop while command does not
-  $cleanup_cmd = "fs -rm ${tmp_file}"
-  #cleanup put below to handle retries; if retrying there wil be a stale file that needs cleanup; exit code is fn of second command
-  $create_file_cmd = "${cleanup_cmd}; hadoop fs -put /etc/passwd ${tmp_file}" #TODO: inconsistent that second comamnd needs hadoop
-  $test_cmd = "fs -test -e ${tmp_file}"
-
-  anchor { 'hdp-hadoop::hdfs::service_check::begin':}
-
-  hdp-hadoop::exec-hadoop { 'hdfs::service_check::check_safemode':
-    command   => $safemode_command,
-    tries     => 20,
-    try_sleep => 15,
-    logoutput => true,
-    user      => $hdp::params::smokeuser,
-    require   => Anchor['hdp-hadoop::hdfs::service_check::begin']
-  }
-
-  hdp-hadoop::exec-hadoop { 'hdfs::service_check::create_dir':
-    command   => $create_dir_cmd,
-    unless    => $test_dir_exists,
-    tries     => 3,
-    try_sleep => 5,
-    user      => $hdp::params::smokeuser,
-    require   => Hdp-hadoop::Exec-hadoop['hdfs::service_check::check_safemode']
-  }
-
-  hdp-hadoop::exec-hadoop { 'hdfs::service_check::create_file':
-    command   => $create_file_cmd,
-    tries     => 3,
-    try_sleep => 5,
-    user      => $hdp::params::smokeuser,
-    require   => Hdp-hadoop::Exec-hadoop['hdfs::service_check::create_dir'],
-    notify    => Hdp-hadoop::Exec-hadoop['hdfs::service_check::test']
-  }
-
-
-   #TODO: put in after testing
- #  hdp-hadoop::exec-hadoop { 'hdfs::service_check::cleanup':
- #   command     => $cleanup_cmd,
- #   refreshonly => true,
- #   require     => Hdp-hadoop::Exec-hadoop['hdfs::service_check::test'],
- #   before      => Anchor['hdp-hadoop::hdfs::service_check::end']
-  #}
-
-  hdp-hadoop::exec-hadoop { 'hdfs::service_check::test':
-    command     => $test_cmd,
-    refreshonly => true,
-    user      => $hdp::params::smokeuser,
-    require     => Hdp-hadoop::Exec-hadoop['hdfs::service_check::create_file'],
-    before      => Anchor['hdp-hadoop::hdfs::service_check::journalnode_check:begin']
-  }
-
-  anchor { 'hdp-hadoop::hdfs::service_check::journalnode_check:begin':}
-
-  if hdp_is_empty($hdp::params::journalnode_hosts) {
-    ##No journalnode hosts
-    Anchor['hdp-hadoop::hdfs::service_check::journalnode_check:begin'] ->
-      Anchor['hdp-hadoop::hdfs::service_check::journalnode_check:end']
-
-  } else {
-    ## Cluster has journalnode hosts, run test of journalnodes
-    $journalnode_hosts_comma_sep = hdp_comma_list_from_array($hdp::params::journalnode_hosts)
-    class { 'hdp-hadoop::journalnode::service_check':
-      journalnode_hosts => $journalnode_hosts_comma_sep,
-      require          => Anchor['hdp-hadoop::hdfs::service_check::journalnode_check:begin'],
-      before           => Anchor['hdp-hadoop::hdfs::service_check::journalnode_check:end']
-    }
-  }
-
-  anchor { 'hdp-hadoop::hdfs::service_check::journalnode_check:end':} ->
-    anchor { 'hdp-hadoop::hdfs::service_check::zkfc_check:begin':}
-
-  if hdp_is_empty($hdp::params::zkfc_hosts) {
-    ## No zkfc hosts
-    Anchor['hdp-hadoop::hdfs::service_check::zkfc_check:begin'] ->
-      Anchor['hdp-hadoop::hdfs::service_check::zkfc_check:end']
-  } else {
-    ## Cluster has zkfc hosts, run test of local zkfc daemon if current host
-    ## is namenode. If namenode has not ZKFC installed, it is also considered
-    ## as a misconfiguration.
-    if ($hdp::params::is_namenode_master) {
-      class { 'hdp-hadoop::zkfc::service_check':
-        require          => Anchor['hdp-hadoop::hdfs::service_check::zkfc_check:begin'],
-        before           => Anchor['hdp-hadoop::hdfs::service_check::zkfc_check:end']
-      }
-    }
-  }
-
-  anchor { 'hdp-hadoop::hdfs::service_check::zkfc_check:end':} ->
-    anchor{ 'hdp-hadoop::hdfs::service_check::end':}
-
-}
-
-class hdp-hadoop::journalnode::service_check($journalnode_hosts)
-{
-  $journalnode_port = $hdp::params::journalnode_port
-  $smoke_test_user = $hdp::params::smokeuser
-  
-  $checkWebUIFileName = "checkWebUI.py"
-  $checkWebUIFilePath = "/tmp/$checkWebUIFileName"
-
-  $checkWebUICmd = "su - ${smoke_test_user} -c 'python $checkWebUIFilePath -m $journalnode_hosts -p $journalnode_port'"
-
-  file { $checkWebUIFilePath:
-    ensure => present,
-    source => "puppet:///modules/hdp-hadoop/$checkWebUIFileName",
-    mode => '0755'
-  }
-
-  exec { $checkWebUIFilePath:
-    command   => $checkWebUICmd,
-    tries     => 3,
-    try_sleep => 5,
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    logoutput => "true"
-}
-  anchor{"hdp-hadoop::smoketest::begin":} -> File[$checkWebUIFilePath] -> Exec[$checkWebUIFilePath] -> anchor{"hdp-hadoop::smoketest::end":}
-}
-
-class hdp-hadoop::zkfc::service_check() inherits hdp-hadoop::params
-{
-  $hdfs_user = $hdp::params::hdfs_user
-  $pid_dir = "${hdp-hadoop::params::hadoop_pid_dir_prefix}/${hdfs_user}"
-  $pid_file = "${pid_dir}/hadoop-${hdfs_user}-zkfc.pid"
-
-  # Here we check if pid file exists and if yes, then we run 'ps pid' command
-  # that returns 1 if process is not running
-  $check_zkfc_process_cmd = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
-
-  exec { $check_zkfc_process_cmd:
-    command   => $check_zkfc_process_cmd,
-    tries     => 3,
-    try_sleep => 5,
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    logoutput => "true"
-  }
-
-  anchor{"hdp-hadoop::zkfc::service_check::begin":} -> Exec[$check_zkfc_process_cmd] ->
-    anchor{"hdp-hadoop::zkfc::service_check::end":}
-
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp
deleted file mode 100644
index 8389bd2..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp
+++ /dev/null
@@ -1,547 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#singleton for use with <||> form so that namenode, datanode, etc can pass state to hdp-hadoop and still use include
-define hdp-hadoop::common(
-  $service_state
-)
-{
-  class { 'hdp-hadoop':
-    service_state => $service_state
-  }
-  anchor{'hdp-hadoop::common::begin':} -> Class['hdp-hadoop'] -> anchor{'hdp-hadoop::common::end':} 
-}
-
-class hdp-hadoop::initialize()
-{
-  if ($hdp::params::component_exists['hdp-hadoop'] == true) {
-  } else {
-    $hdp::params::component_exists['hdp-hadoop'] = true
-  }
-  hdp-hadoop::common { 'common':}
-  anchor{'hdp-hadoop::initialize::begin':} -> Hdp-hadoop::Common['common'] -> anchor{'hdp-hadoop::initialize::end':}
-
-  # Configs generation
-  debug('##Configs generation for hdp-hadoop')
-
-  if has_key($configuration, 'mapred-queue-acls') {
-    configgenerator::configfile{'mapred-queue-acls': 
-      modulespath => $hdp-hadoop::params::conf_dir,
-      filename => 'mapred-queue-acls.xml',
-      module => 'hdp-hadoop',
-      configuration => $configuration['mapred-queue-acls'],
-      owner => $hdp-hadoop::params::mapred_user,
-      group => $hdp::params::user_group
-    }
-  } else { # Manually overriding ownership of file installed by hadoop package
-    file { "${hdp-hadoop::params::conf_dir}/mapred-queue-acls.xml":
-      owner => $hdp-hadoop::params::mapred_user,
-      group => $hdp::params::user_group
-    }
-  }
-  
-  if has_key($configuration, 'hadoop-policy') {
-    configgenerator::configfile{'hadoop-policy': 
-      modulespath => $hdp-hadoop::params::conf_dir,
-      filename => 'hadoop-policy.xml',
-      module => 'hdp-hadoop',
-      configuration => $configuration['hadoop-policy'],
-      owner => $hdp-hadoop::params::hdfs_user,
-      group => $hdp::params::user_group
-    }
-  } else { # Manually overriding ownership of file installed by hadoop package
-    file { "${hdp-hadoop::params::conf_dir}/hadoop-policy.xml":
-      owner => $hdp-hadoop::params::hdfs_user,
-      group => $hdp::params::user_group
-    }
-  }
-
-  if has_key($configuration, 'core-site') {
-      configgenerator::configfile{'core-site': 
-        modulespath => $hdp-hadoop::params::conf_dir,
-        filename => 'core-site.xml',
-        module => 'hdp-hadoop',
-        configuration => $configuration['core-site'],
-        owner => $hdp-hadoop::params::hdfs_user,
-        group => $hdp::params::user_group
-      }
-  } else { # Manually overriding ownership of file installed by hadoop package
-    file { "${hdp-hadoop::params::conf_dir}/core-site.xml":
-      owner => $hdp-hadoop::params::hdfs_user,
-      group => $hdp::params::user_group
-    }
-  }
-
-  if has_key($configuration, 'mapred-site') {
-    configgenerator::configfile{'mapred-site': 
-      modulespath => $hdp-hadoop::params::conf_dir,
-      filename => 'mapred-site.xml',
-      module => 'hdp-hadoop',
-      configuration => $configuration['mapred-site'],
-      owner => $hdp-hadoop::params::mapred_user,
-      group => $hdp::params::user_group
-    }
-  } else { # Manually overriding ownership of file installed by hadoop package
-    file { "${hdp-hadoop::params::conf_dir}/mapred-site.xml":
-      owner => $hdp-hadoop::params::mapred_user,
-      group => $hdp::params::user_group
-    }
-  }
-
-  $task_log4j_properties_location = "${hdp-hadoop::params::conf_dir}/task-log4j.properties"
-  
-  file { $task_log4j_properties_location:
-    owner   => $hdp-hadoop::params::mapred_user,
-    group   => $hdp::params::user_group,
-    mode    => 644,
-    ensure  => present,
-    source  => "puppet:///modules/hdp-hadoop/task-log4j.properties",
-    replace => false
-  }
-
-  if has_key($configuration, 'capacity-scheduler') {
-    configgenerator::configfile{'capacity-scheduler':
-      modulespath => $hdp-hadoop::params::conf_dir,
-      filename => 'capacity-scheduler.xml',
-      module => 'hdp-hadoop',
-      configuration => $configuration['capacity-scheduler'],
-      owner => $hdp-hadoop::params::hdfs_user,
-      group => $hdp::params::user_group,
-    }
-  } else { # Manually overriding ownership of file installed by hadoop package
-    file { "${hdp-hadoop::params::conf_dir}/capacity-scheduler.xml":
-      owner => $hdp-hadoop::params::hdfs_user,
-      group => $hdp::params::user_group
-    }
-  } 
-
-
-  if has_key($configuration, 'hdfs-site') {
-    configgenerator::configfile{'hdfs-site': 
-      modulespath => $hdp-hadoop::params::conf_dir,
-      filename => 'hdfs-site.xml',
-      module => 'hdp-hadoop',
-      configuration => $configuration['hdfs-site'],
-      owner => $hdp-hadoop::params::hdfs_user,
-      group => $hdp::params::user_group
-    }
-  } else { # Manually overriding ownership of file installed by hadoop package
-    file { "${hdp-hadoop::params::conf_dir}/hdfs-site.xml":
-      owner => $hdp-hadoop::params::hdfs_user,
-      group => $hdp::params::user_group
-    }
-  }
-
-  if has_key($configuration, 'hdfs-exclude-file') {
-    hdp-hadoop::hdfs::generate_exclude_file{'exclude_file':}
-  }
-
-  hdp::package {'ambari-log4j':
-    package_type  => 'ambari-log4j'
-  }
-
-  file { '/usr/lib/hadoop/lib/hadoop-tools.jar':
-    ensure => 'link',
-    target => '/usr/lib/hadoop/hadoop-tools.jar',
-    mode => 755,
-  }
-
-  file { "${hdp-hadoop::params::conf_dir}/configuration.xsl":
-    owner => $hdp-hadoop::params::hdfs_user,
-    group => $hdp::params::user_group
-  }
-
-  file { "${hdp-hadoop::params::conf_dir}/fair-scheduler.xml":
-    owner => $hdp-hadoop::params::mapred_user,
-    group => $hdp::params::user_group
-  }
-
-  file { "${hdp-hadoop::params::conf_dir}/masters":
-    owner => $hdp-hadoop::params::hdfs_user,
-    group => $hdp::params::user_group
-  }
-
-  file { "${hdp-hadoop::params::conf_dir}/ssl-client.xml.example":
-    owner => $hdp-hadoop::params::mapred_user,
-    group => $hdp::params::user_group
-  }
-
-  file { "${hdp-hadoop::params::conf_dir}/ssl-server.xml.example":
-    owner => $hdp-hadoop::params::mapred_user,
-    group => $hdp::params::user_group
-  }
-
-  if (hdp_get_major_stack_version($hdp::params::stack_version) >= 2) {
-    if (hdp_is_empty($configuration) == false and hdp_is_empty($configuration['hdfs-site']) == false) {
-      if (hdp_is_empty($configuration['hdfs-site']['dfs.hosts.exclude']) == false) and
-         (hdp_is_empty($configuration['hdfs-exclude-file']) or
-          has_key($configuration['hdfs-exclude-file'], 'datanodes') == false) {
-        $exlude_file_path = $configuration['hdfs-site']['dfs.hosts.exclude']
-        file { $exlude_file_path :
-        ensure => present,
-        owner => $hdp-hadoop::params::hdfs_user,
-        group => $hdp::params::user_group
-        }
-      }
-      if (hdp_is_empty($hdp::params::slave_hosts) == false and hdp_is_empty($configuration['hdfs-site']['dfs.hosts']) == false) {
-        $include_file_path = $configuration['hdfs-site']['dfs.hosts']
-        $include_hosts_list = $hdp::params::slave_hosts
-        file { $include_file_path :
-        ensure => present,
-        owner => $hdp-hadoop::params::hdfs_user,
-        group => $hdp::params::user_group,
-        content => template('hdp-hadoop/include_hosts_list.erb')
-        }
-      }
-    }
-  }
-
-}
-
-class hdp-hadoop(
-  $service_state
-)
-{
-  include hdp-hadoop::params
-  $hadoop_config_dir = $hdp-hadoop::params::conf_dir
-  $mapred_user = $hdp-hadoop::params::mapred_user  
-  $hdfs_user = $hdp-hadoop::params::hdfs_user
-  $hadoop_tmp_dir = $hdp-hadoop::params::hadoop_tmp_dir
-
-  anchor{'hdp-hadoop::begin':} 
-  anchor{'hdp-hadoop::end':} 
-
-  if ($service_state=='uninstalled') {
-    hdp-hadoop::package { 'hadoop':
-      ensure => 'uninstalled'
-    }
-
-    hdp::directory_recursive_create { $hadoop_config_dir:
-      service_state => $::service_state,
-      force => true
-    }
-
-    Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Package<||> -> Hdp::Directory_recursive_create[$hadoop_config_dir] -> Anchor['hdp-hadoop::end']
-  } else {
-    
-    hdp-hadoop::package { 'hadoop':}
-
-    #Replace limits config file
-    hdp::configfile {"${hdp::params::limits_conf_dir}/hdfs.conf":
-      component => 'hadoop',
-      owner => 'root',
-      group => 'root',
-      require => Hdp-hadoop::Package['hadoop'],
-      before  => Anchor['hdp-hadoop::end'],
-      mode => 644    
-    }
-
-    hdp::directory_recursive_create { $hadoop_config_dir:
-      service_state => $::service_state,
-      force => true,
-      owner => 'root',
-      group => 'root'
-    }
- 
-    hdp::user{ 'hdfs_user':
-      user_name => $hdfs_user,
-      groups => [$hdp::params::user_group]
-    }
-    
-    hdp::user { 'mapred_user':
-      user_name => $mapred_user,
-      groups => [$hdp::params::user_group]
-    }
-
-    $logdirprefix = $hdp-hadoop::params::hdfs_log_dir_prefix
-    hdp::directory_recursive_create { $logdirprefix: 
-        owner => 'root'
-    }
-    $piddirprefix = $hdp-hadoop::params::hadoop_pid_dir_prefix
-    hdp::directory_recursive_create { $piddirprefix: 
-        owner => 'root'
-    }
-
-    $dfs_domain_socket_path_dir = hdp_get_directory_from_filepath($hdp-hadoop::params::dfs_domain_socket_path)
-    hdp::directory_recursive_create { $dfs_domain_socket_path_dir:
-      owner => $hdfs_user,
-      group => $hdp::params::user_group,
-      mode  => '0644'
-    }
- 
-    #taskcontroller.cfg properties conditional on security
-    if ($hdp::params::security_enabled == true) {
-      file { "${hdp::params::hadoop_bin}/task-controller":
-        owner   => 'root',
-        group   => $hdp-hadoop::params::mapred_tt_group,
-        mode    => '6050',
-        require => Hdp-hadoop::Package['hadoop'],
-        before  => Anchor['hdp-hadoop::end']
-      }
-      $tc_owner = 'root'
-      $tc_mode = '0644'
-    } else {
-      $tc_owner = $hdfs_user
-      $tc_mode = undef
-    }
-    hdp-hadoop::configfile { 'taskcontroller.cfg' :
-      tag   => 'common',
-      owner => $tc_owner,
-      mode  => $tc_mode
-    }
-
-    $template_files = [ 'hadoop-env.sh', 'commons-logging.properties', 'slaves']
-    hdp-hadoop::configfile { $template_files:
-      tag   => 'common', 
-      owner => $hdfs_user
-    }
-
-    if (hdp_get_major_stack_version($hdp::params::stack_version) >= 2) {
-      hdp-hadoop::configfile { 'health_check' :
-        tag   => 'common',
-        owner => $hdfs_user,
-        template_tag => 'v2'
-      }
-    } else {
-      hdp-hadoop::configfile { 'health_check' :
-        tag   => 'common',
-        owner => $hdfs_user
-      }
-    }
-
-    # log4j.properties has to be installed just one time to prevent
-    # manual changes overwriting
-    if ($service_state=='installed_and_configured') {
-      hdp-hadoop::configfile { 'log4j.properties' :
-        tag   => 'common',
-        owner => $hdfs_user,
-      }
-    }
-
-    # updating log4j.properties with data which is sent from server
-    hdp-hadoop::update-log4j-properties { 'log4j.properties': }
-    
-    hdp-hadoop::configfile { 'hadoop-metrics2.properties' : 
-      tag   => 'common', 
-      owner => $hdfs_user,
-    }
-
-    # Copy database drivers for rca enablement
-    $server_db_name = $hdp::params::server_db_name
-    $hadoop_lib_home = $hdp::params::hadoop_lib_home
-    $db_driver_filename = $hdp::params::db_driver_file
-    $oracle_driver_url = $hdp::params::oracle_jdbc_url
-    $mysql_driver_url = $hdp::params::mysql_jdbc_url
-
-    if ($server_db_name == 'oracle' and $oracle_driver_url != "") {
-      $db_driver_dload_cmd = "curl -kf --retry 5 $oracle_driver_url -o ${hadoop_lib_home}/${db_driver_filename}"
-    } elsif ($server_db_name == 'mysql' and $mysql_driver_url != "") {
-      $db_driver_dload_cmd = "curl -kf --retry 5 $mysql_driver_url -o ${hadoop_lib_home}/${db_driver_filename}"
-    }
-    if ($db_driver_dload_cmd != undef) {
-      exec { '${db_driver_dload_cmd}':
-        command => $db_driver_dload_cmd,
-        unless  => "test -e ${hadoop_lib_home}/${db_driver_filename}",
-        creates => "${hadoop_lib_home}/${db_driver_filename}",
-        path    => ["/bin","/usr/bin/"],
-        require => Hdp-hadoop::Package['hadoop'],
-        before  => Anchor['hdp-hadoop::end']
-      }
-    }
-
-    if (hdp_get_major_stack_version($hdp::params::stack_version) >= 2) {
-      hdp::directory_recursive_create { "$hadoop_tmp_dir":
-        service_state => $service_state,
-        force => true,
-        owner => $hdfs_user
-      }
-    }
-
-    if (hdp_get_major_stack_version($hdp::params::stack_version) >= 2) {
-      Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Package<||> ->  Hdp::User<|title == $hdfs_user or title == $mapred_user|>  ->
-      Hdp::Directory_recursive_create[$hadoop_config_dir] -> Hdp-hadoop::Configfile<|tag == 'common'|> -> Hdp-hadoop::Update-log4j-properties['log4j.properties'] ->
-      Hdp::Directory_recursive_create[$logdirprefix] -> Hdp::Directory_recursive_create[$piddirprefix] -> Hdp::Directory_recursive_create["$hadoop_tmp_dir"] -> Anchor['hdp-hadoop::end']
-    } else {
-      Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Package<||> ->  Hdp::User<|title == $hdfs_user or title == $mapred_user|>  ->
-      Hdp::Directory_recursive_create[$hadoop_config_dir] -> Hdp-hadoop::Configfile<|tag == 'common'|> -> Hdp-hadoop::Update-log4j-properties['log4j.properties'] ->
-      Hdp::Directory_recursive_create[$logdirprefix] -> Hdp::Directory_recursive_create[$piddirprefix] -> Anchor['hdp-hadoop::end']
-    }
-
-  }
-}
-
-class hdp-hadoop::enable-ganglia()
-{
-  Hdp-hadoop::Configfile<|title  == 'hadoop-metrics2.properties'|>{template_tag => 'GANGLIA'}
-}
-
-###config file helper
-define hdp-hadoop::configfile(
-  $owner = undef,
-  $hadoop_conf_dir = $hdp-hadoop::params::conf_dir,
-  $mode = undef,
-  $namenode_host = undef,
-  $jtnode_host = undef,
-  $snamenode_host = undef,
-  $template_tag = undef,
-  $size = undef, #TODO: deprecate
-  $sizes = []
-) 
-{
-  #TODO: may need to be fixed 
-  if ($jtnode_host == undef) {
-    $calc_jtnode_host = $namenode_host
-  } else {
-    $calc_jtnode_host = $jtnode_host 
-  }
- 
-  #only set 32 if theer is a 32 bit component and no 64 bit components
-  if (64 in $sizes) {
-    $common_size = 64
-  } elsif (32 in $sizes) {
-    $common_size = 32
-  } else {
-    $common_size = 6
-  }
-  
-  hdp::configfile { "${hadoop_conf_dir}/${name}":
-    component      => 'hadoop',
-    owner          => $owner,
-    mode           => $mode,
-    namenode_host  => $namenode_host,
-    snamenode_host => $snamenode_host,
-    jtnode_host    => $calc_jtnode_host,
-    template_tag   => $template_tag,
-    size           => $common_size
-  }
-}
-
-#####
-define hdp-hadoop::exec-hadoop(
-  $command,
-  $unless = undef,
-  $refreshonly = undef,
-  $echo_yes = false,
-  $kinit_override = false,
-  $tries = 1,
-  $timeout = 900,
-  $try_sleep = undef,
-  $user = undef,
-  $logoutput = undef,
-  $onlyif = undef,
-  $path = undef
-)
-{
-  include hdp-hadoop::params
-  $security_enabled = $hdp::params::security_enabled
-  $conf_dir = $hdp-hadoop::params::conf_dir
-  $hdfs_user = $hdp-hadoop::params::hdfs_user
-  $hbase_user = $hdp-hadoop::params::hbase_user
-
-  if ($user == undef) {
-    $run_user = $hdfs_user
-  } else {
-    $run_user = $user
-  }
-
-  if (($security_enabled == true) and ($kinit_override == false)) {
-    if ($run_user in [$hdfs_user,'root']) {
-      $keytab = $hdp::params::hdfs_user_keytab
-      $principal = $hdfs_user
-    } elsif ($run_user in [$hbase_user]) {
-      $keytab = $hdp::params::hbase_user_keytab
-      $principal = $hbase_user
-    } else {
-      $keytab = $hdp::params::smokeuser_keytab
-      $principal = $hdp::params::smokeuser
-    }
-    $kinit_if_needed = "su - ${run_user} -c '${hdp::params::kinit_path_local} -kt ${keytab} ${principal}'"
-  } else {
-    $kinit_if_needed = ""
-  }
-  
-  if ($path == undef) {
-    if ($echo_yes == true) {
-      $cmd = "yes Y | hadoop --config ${conf_dir} ${command}"
-    } else {
-      $cmd = "hadoop --config ${conf_dir} ${command}"
-    } 
-    } else {
-      $cmd = "${path} ${command}"
-    }
-  
-  if ($kinit_if_needed != "") {
-    exec { "kinit_before_${cmd}":
-      command => $kinit_if_needed,
-      path => ['/bin'],
-      before => Hdp::Exec[$cmd]
-    }
-  }
-
-  hdp::exec { $cmd:
-    command     => $cmd,
-    user        => $run_user,
-    unless      => $unless,
-    refreshonly => $refreshonly,
-    tries       => $tries,
-    timeout     => $timeout,
-    try_sleep   => $try_sleep,
-    logoutput   => $logoutput,
-    onlyif      => $onlyif,
-  }
-}
-
-#####
-define hdp-hadoop::update-log4j-properties(
-  $hadoop_conf_dir = $hdp-hadoop::params::conf_dir
-)
-{
-  $properties = [
-    { name => 'ambari.jobhistory.database', value => $hdp-hadoop::params::ambari_db_rca_url },
-    { name => 'ambari.jobhistory.driver', value => $hdp-hadoop::params::ambari_db_rca_driver },
-    { name => 'ambari.jobhistory.user', value => $hdp-hadoop::params::ambari_db_rca_username },
-    { name => 'ambari.jobhistory.password', value => $hdp-hadoop::params::ambari_db_rca_password },
-    { name => 'ambari.jobhistory.logger', value => 'DEBUG,JHA' },
-
-    { name => 'log4j.appender.JHA', value => 'org.apache.ambari.log4j.hadoop.mapreduce.jobhistory.JobHistoryAppender' },
-    { name => 'log4j.appender.JHA.database', value => '${ambari.jobhistory.database}' },
-    { name => 'log4j.appender.JHA.driver', value => '${ambari.jobhistory.driver}' },
-    { name => 'log4j.appender.JHA.user', value => '${ambari.jobhistory.user}' },
-    { name => 'log4j.appender.JHA.password', value => '${ambari.jobhistory.password}' },
-
-    { name => 'log4j.logger.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger', value => '${ambari.jobhistory.logger}' },
-    { name => 'log4j.additivity.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger', value => 'true' }
-  ]
-  hdp-hadoop::update-log4j-property { $properties :
-    log4j_file      => $name,
-    hadoop_conf_dir => $hadoop_conf_dir
-  }
-}
-
-#####
-define hdp-hadoop::update-log4j-property(
-  $log4j_file,
-  $hadoop_conf_dir = $hdp-hadoop::params::conf_dir
-)
-{
-  hdp::exec{ "sed -i 's~\\(${hdp-hadoop::params::rca_disabled_prefix}\\)\\?${name[name]}=.*~${hdp-hadoop::params::rca_prefix}${name[name]}=${name[value]}~' ${hadoop_conf_dir}/${log4j_file}":
-    command => "sed -i 's~\\(${hdp-hadoop::params::rca_disabled_prefix}\\)\\?${name[name]}=.*~${hdp-hadoop::params::rca_prefix}${name[name]}=${name[value]}~' ${hadoop_conf_dir}/${log4j_file}"
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/jobtracker.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/jobtracker.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/jobtracker.pp
deleted file mode 100644
index 23503da..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/jobtracker.pp
+++ /dev/null
@@ -1,96 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::jobtracker(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-hadoop::params
-{
-  $hdp::params::service_exists['hdp-hadoop::jobtracker'] = true
-  Hdp-hadoop::Common<||>{service_state => $service_state}
-  Hdp-hadoop::Package<||>{include_64_bit => true}
-  Hdp-hadoop::Configfile<||>{sizes +> 64}
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) {
-    $mapred_user = $hdp-hadoop::params::mapred_user
-    $mapred_local_dir = $hdp-hadoop::params::mapred_local_dir 
-  
-    #adds package, users and directories, and common hadoop configs
-    include hdp-hadoop::initialize
-
-    if ( ($service_state == 'installed_and_configured') and
-         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
-      $masterHost = $kerberos_adminclient_host[0]
-      hdp::download_keytab { 'jobtracker_service_keytab' :
-        masterhost => $masterHost,
-        keytabdst => "${$keytab_path}/jt.service.keytab",
-        keytabfile => 'jt.service.keytab',
-        owner => $hdp-hadoop::params::mapred_user
-      }
-    }
-     
-    hdp-hadoop::jobtracker::create_local_dirs { $mapred_local_dir: 
-      service_state => $service_state
-    }
-
-    #TODO: cleanup 
-    Hdp-Hadoop::Configfile<||>{jtnode_host => $hdp::params::host_address}
-
-    #TODO: do we keep precondition here?
-    if ($service_state == 'running' and $hdp-hadoop::params::use_preconditions == true) {
-      class { 'hdp-hadoop::hdfs::service_check':
-        before => Hdp-hadoop::Service['jobtracker'],
-        require => Class['hdp-hadoop']
-      }
-    }
-
-    hdp-hadoop::service{ 'jobtracker':
-      ensure       => $service_state,
-      user         => $mapred_user
-    }
-  
-    hdp-hadoop::service{ 'historyserver':
-      ensure         => $service_state,
-      user           => $mapred_user,
-      create_pid_dir => false,
-      create_log_dir => false
-    }
-
-    #top level does not need anchors
-    Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Service['jobtracker'] -> Hdp-hadoop::Service['historyserver'] 
-    -> Anchor['hdp-hadoop::end']
-    Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Jobtracker::Create_local_dirs<||> -> Hdp-hadoop::Service['jobtracker'] 
-    -> Anchor['hdp-hadoop::end']
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
-
-define hdp-hadoop::jobtracker::create_local_dirs($service_state)
-{
-    $dirs = hdp_array_from_comma_list($name)
-    hdp::directory_recursive_create { $dirs :
-      owner => $hdp-hadoop::params::mapred_user,
-      mode => '0755',
-      service_state => $service_state,
-      force => true
-    }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/jobtracker/service_check.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/jobtracker/service_check.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/jobtracker/service_check.pp
deleted file mode 100644
index af5e095..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/jobtracker/service_check.pp
+++ /dev/null
@@ -1,29 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::jobtracker::service_check()
-{
-  hdp-hadoop::exec-hadoop { 'jobtracker::service_check':
-    command   => 'job -list',
-    tries     => 3,
-    try_sleep => 5,
-    user => $hdp::params::smokeuser
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/journalnode.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/journalnode.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/journalnode.pp
deleted file mode 100644
index f45c684..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/journalnode.pp
+++ /dev/null
@@ -1,60 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::journalnode(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-hadoop::params 
-{
-
-  $hdp::params::service_exists['hdp-hadoop::journalnode'] = true
-  
-  Hdp-hadoop::Common<||>{service_state => $service_state}
-  Hdp-hadoop::Package<||>{include_64_bit => true}
-  Hdp-hadoop::Configfile<||>{sizes +> 64}
-  
-  $jn_edits_dir = $hdp-hadoop::params::jn_edits_dir
-  $hdfs_user = $hdp-hadoop::params::hdfs_user
-  
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
-  
-    #adds package, users and directories, and common hadoop configs
-    include hdp-hadoop::initialize
-    
-    hdp::directory_recursive_create{ $jn_edits_dir:
-        service_state => $service_state,
-        force => true,
-        owner => $hdfs_user
-      }
-      
-    hdp-hadoop::service{ 'journalnode':
-      ensure         => $service_state,
-      user           => $hdp-hadoop::params::hdfs_user,
-      create_pid_dir => true,
-      create_log_dir => true
-    }
-    
-    #top level does not need anchors
-    Anchor['hdp-hadoop::begin'] -> Hdp::Directory_recursive_create[$jn_edits_dir] -> Hdp-hadoop::Service['journalnode'] -> Anchor['hdp-hadoop::end'] 
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/mapred/service_check.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/mapred/service_check.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/mapred/service_check.pp
deleted file mode 100644
index df4ba7b..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/mapred/service_check.pp
+++ /dev/null
@@ -1,75 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::mapred::service_check() 
-{
-  $smoke_test_user = $hdp::params::smokeuser
-  $jar_location = $hdp::params::hadoop_jar_location
-  $input_file = 'mapredsmokeinput'
-  $output_file = "mapredsmokeoutput"
-
-  $cleanup_cmd = "dfs -rmr ${output_file} ${input_file}"
-  #cleanup put below to handle retries; if retrying there wil be a stale file that needs cleanup; exit code is fn of second command
-  $create_file_cmd = "$cleanup_cmd ; hadoop dfs -put /etc/passwd ${input_file} " #TODO: inconsistent that second comamnd needs hadoop
-  $test_cmd = "fs -test -e ${output_file}" 
-  $run_wordcount_job = "jar ${jar_location}/hadoop-examples.jar  wordcount ${input_file} ${output_file}"
-  
-  anchor { 'hdp-hadoop::mapred::service_check::begin':}
-
-  hdp-hadoop::exec-hadoop { 'mapred::service_check::create_file':
-    command   => $create_file_cmd,
-    tries     => 1,
-    try_sleep => 5,
-    require   => Anchor['hdp-hadoop::mapred::service_check::begin'],
-  #  notify    => Hdp-hadoop::Exec-hadoop['mapred::service_check::run_wordcount'],
-    user      => $smoke_test_user
-  }
-
-  hdp-hadoop::exec-hadoop { 'mapred::service_check::run_wordcount':
-    command   => $run_wordcount_job,
-    tries     => 1,
-    try_sleep => 5,
-    require   => Hdp-hadoop::Exec-hadoop['mapred::service_check::create_file'],
-    notify    => Hdp-hadoop::Exec-hadoop['mapred::service_check::test'],
-    user      => $smoke_test_user,
-    logoutput => "true"
-  }
-
-#  exec { 'runjob':
-#    command   => "hadoop jar ${jar_location}/hadoop-examples.jar  wordcount ${input_file} ${output_file}",
-#    tries     => 1,
-#    try_sleep => 5,
-#    require   => Hdp-hadoop::Exec-hadoop['mapred::service_check::create_file'],
-#    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-#    notify    => Hdp-hadoop::Exec-hadoop['mapred::service_check::test'],
-#    logoutput => "true",
-#    user      => $smoke_test_user
-#  }
-
-  hdp-hadoop::exec-hadoop { 'mapred::service_check::test':
-    command     => $test_cmd,
-    refreshonly => true,
-    require     => Hdp-hadoop::Exec-hadoop['mapred::service_check::run_wordcount'],
-    before      => Anchor['hdp-hadoop::mapred::service_check::end'], #TODO: remove after testing
-    user        => $smoke_test_user
-  }
-  
-  anchor{ 'hdp-hadoop::mapred::service_check::end':}
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode.pp
deleted file mode 100644
index d0fc226..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode.pp
+++ /dev/null
@@ -1,285 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::namenode(
-  $service_state = $hdp::params::cluster_service_state,
-  $slave_hosts = [],
-  $format = true,
-  $opts = {}
-) inherits hdp-hadoop::params
-{
-  $hdp::params::service_exists['hdp-hadoop::namenode'] = true
-
-  Hdp-hadoop::Common<||>{service_state => $service_state}
-  Hdp-hadoop::Package<||>{include_64_bit => true}
-  Hdp-hadoop::Configfile<||>{sizes +> 64}
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
-    $dfs_name_dir = $hdp-hadoop::params::dfs_name_dir
-  
-    #adds package, users and directories, and common hadoop configs
-    include hdp-hadoop::initialize
-
-    if ( ($service_state == 'installed_and_configured') and 
-         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
-      $masterHost = $kerberos_adminclient_host[0]
-      hdp::download_keytab { 'namenode_service_keytab' :
-        masterhost => $masterHost,
-        keytabdst => "${$keytab_path}/nn.service.keytab",
-        keytabfile => 'nn.service.keytab',
-        owner => $hdp-hadoop::params::hdfs_user
-      }
-      hdp::download_keytab { 'namenode_hdfs_headless_keytab' :   
-        masterhost => $masterHost,
-        keytabdst => "${$keytab_path}/hdfs.headless.keytab",
-        keytabfile => 'hdfs.headless.keytab', 
-        owner => $hdp-hadoop::params::hdfs_user, 
-        hostnameInPrincipals => 'no'
-      }
-      hdp::download_keytab { 'namenode_spnego_keytab' :   
-        masterhost => $masterHost,
-        keytabdst => "${$keytab_path}/spnego.service.keytab",
-        keytabfile => 'spnego.service.keytab', 
-        owner => $hdp-hadoop::params::hdfs_user, 
-        mode => '0440',
-        group => $hdp::params::user_group
-      }
-    }
-
-    hdp-hadoop::namenode::create_name_dirs { $dfs_name_dir: 
-      service_state => $service_state
-    }
-   
-    Hdp-Hadoop::Configfile<||>{namenode_host => $hdp::params::host_address}
-    Hdp::Configfile<||>{namenode_host => $hdp::params::host_address} #for components other than hadoop (e.g., hbase) 
-  
-    if ($service_state == 'running' and $format == true) {
-      class {'hdp-hadoop::namenode::format' : }
-    }
-
-    hdp-hadoop::service{ 'namenode':
-      ensure       => $service_state,
-      user         => $hdp-hadoop::params::hdfs_user,
-      initial_wait => hdp_option_value($opts,'wait')
-    }
-
-    hdp-hadoop::namenode::create_app_directories { 'create_app_directories' :
-      service_state => $service_state
-    }
-
-    hdp-hadoop::namenode::create_user_directories { 'create_user_directories' :
-      service_state => $service_state
-    }
-
-    Anchor['hdp-hadoop::begin'] ->
-    Hdp-hadoop::Namenode::Create_name_dirs<||> ->
-    Hdp-hadoop::Service['namenode'] ->
-    Hdp-hadoop::Namenode::Create_app_directories<||> ->
-    Hdp-hadoop::Namenode::Create_user_directories<||> ->
-    Anchor['hdp-hadoop::end']
-
-    if ($service_state == 'running' and $format == true) {
-      Anchor['hdp-hadoop::begin'] ->
-      Hdp-hadoop::Namenode::Create_name_dirs<||> ->
-      Class['hdp-hadoop::namenode::format'] ->
-      Hdp-hadoop::Service['namenode'] ->
-      Anchor['hdp-hadoop::end']
-    }
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
-
-define hdp-hadoop::namenode::create_name_dirs($service_state)
-{
-  $dirs = hdp_array_from_comma_list($name)
-  hdp::directory_recursive_create { $dirs :
-    owner => $hdp-hadoop::params::hdfs_user,
-    mode => '0755',
-    service_state => $service_state,
-    force => true
-  }
-}
-
-define hdp-hadoop::namenode::create_app_directories($service_state)
-{
-
-  if ($service_state == 'running') {
-   
-    hdp-hadoop::hdfs::directory{ "/tmp" :
-      service_state => $service_state,
-      owner => $hdp-hadoop::params::hdfs_user,
-      mode => '777'
-    }
-
-    hdp-hadoop::hdfs::directory{ '/mapred' :
-      service_state => $service_state,
-      owner         => $hdp-hadoop::params::mapred_user
-    }
-
-    hdp-hadoop::hdfs::directory{ '/mapred/system' :
-      service_state => $service_state,
-      owner         => $hdp-hadoop::params::mapred_user
-    }
-
-    Hdp-hadoop::Hdfs::Directory['/mapred'] -> Hdp-hadoop::Hdfs::Directory['/mapred/system']
-
-    if ($hdp::params::hbase_master_hosts != "") {
-
-      hdp-hadoop::hdfs::directory { $hdp-hadoop::params::hdfs_root_dir:
-        owner         => $hdp::params::hbase_user,
-        service_state => $service_state
-      }
-
-      $hbase_staging_dir = $hdp::params::hbase_staging_dir
-      hdp-hadoop::hdfs::directory { $hbase_staging_dir:
-        owner         => $hdp::params::hbase_user,
-        service_state => $service_state,
-        mode             => '711'
-      }
-    }
-
-    if ($hdp::params::hive_server_host != "") {
-      $hive_user = $hdp::params::hive_user
-      $hive_apps_whs_dir = $hdp::params::hive_apps_whs_dir
-
-      hdp-hadoop::hdfs::directory{ $hive_apps_whs_dir:
-        service_state   => $service_state,
-        owner            => $hive_user,
-        mode             => '777',
-        recursive_chmod  => true
-      }
-    }
-
-    if ($hdp::params::webhcat_server_host != "") {
-      $webhcat_user = $hdp::params::webhcat_user
-      $webhcat_apps_dir = hdp_get_directory_from_filepath(hdp_get_dir_from_url(hdp_default("webhcat-site/templeton.streaming.jar",""), "/apps/webhcat"))
-
-      hdp-hadoop::hdfs::directory{ $webhcat_apps_dir:
-        service_state => $service_state,
-        owner => $webhcat_user,
-        mode  => '755',
-        recursive_chmod => true
-      }
-    }
-
-    if (hdp_get_major_stack_version($hdp::params::stack_version) >= 2) {
-      if ($hdp::params::nm_hosts != "") {
-        if ($hdp::params::yarn_log_aggregation_enabled == "true") {
-          $yarn_user = $hdp::params::yarn_user
-          $yarn_nm_app_log_dir = $hdp::params::yarn_nm_app_log_dir
-
-          hdp-hadoop::hdfs::directory{ $yarn_nm_app_log_dir:
-            service_state => $service_state,
-            owner => $yarn_user,
-            group => $hdp::params::user_group,
-            mode  => '1777',
-            recursive_chmod => true
-          }
-        }
-      }
-
-
-      if ($hdp::params::hs_host != "") {
-        $mapred_user = $hdp::params::mapred_user
-        $mapreduce_jobhistory_intermediate_done_dir = $hdp::params::mapreduce_jobhistory_intermediate_done_dir
-        $group = $hdp::params::user_group
-        $mapreduce_jobhistory_done_dir = $hdp::params::mapreduce_jobhistory_done_dir
-
-        hdp-hadoop::hdfs::directory{ $mapreduce_jobhistory_intermediate_done_dir:
-          service_state => $service_state,
-          owner => $mapred_user,
-          group => $group,
-          mode  => '1777'
-        }
-
-        hdp-hadoop::hdfs::directory{ $mapreduce_jobhistory_done_dir:
-          service_state => $service_state,
-          owner => $mapred_user,
-          group => $group,
-          mode  => '1777'
-        }
-      }
-    }
-  }
-}
-
-
-define hdp-hadoop::namenode::create_user_directories($service_state)
-{
-  if ($service_state == 'running') {
-    $smoke_hdfs_user_dir = $hdp::params::smoke_hdfs_user_dir
-
-    $smoke_user_dir_item="$smoke_hdfs_user_dir,"
-
-    if ($hdp::params::hive_server_host != "") {
-      $hive_hdfs_user_dir = $hdp::params::hive_hdfs_user_dir
-      $hive_dir_item="$hive_hdfs_user_dir,"
-    } else {
-      $hive_dir_item=""
-    }
-
-    if ($hdp::params::oozie_server != "") {
-      $oozie_hdfs_user_dir = $hdp::params::oozie_hdfs_user_dir
-      $oozie_dir_item="$oozie_hdfs_user_dir,"
-    } else {
-      $oozie_dir_item=""
-    }
-    
-    if ($hdp::params::webhcat_server_host != "") {
-      $hcat_hdfs_user_dir = $hdp::params::hcat_hdfs_user_dir
-      $webhcat_hdfs_user_dir = $hdp::params::webhcat_hdfs_user_dir
-      $webhcat_dir_item="$webhcat_hdfs_user_dir,"
-      if ($hcat_hdfs_user_dir != webhcat_hdfs_user_dir) {
-        $hcat_dir_item="$hcat_hdfs_user_dir,"
-      } else {
-        $hcat_dir_item=""
-      }
-    } else {
-      $webhcat_dir_item=""
-    }
-
-    $users_dir_list_comm_sep = "$smoke_user_dir_item $hive_dir_item $oozie_dir_item $hcat_dir_item $webhcat_dir_item"
-
-    #Get unique users directories set
-    $users_dirs_set = hdp_set_from_comma_list($users_dir_list_comm_sep)
-
-    hdp-hadoop::namenode::create_user_directory{ $users_dirs_set:
-      service_state => $service_state
-    }
-  }
-  
-}
-
-define hdp-hadoop::namenode::create_user_directory($service_state)
-{
-  
-  $owner = hdp_hadoop_get_owner($name)
-  $mode = hdp_hadoop_get_mode($name)
-  debug("## Creating user directory: $name, owner: $owner, mode: $mode")
-  hdp-hadoop::hdfs::directory{ $name:
-   service_state   => $service_state,
-   mode            => $mode,
-   owner           => $owner,
-   recursive_chmod => true
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode/format.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode/format.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode/format.pp
deleted file mode 100644
index fb9d2ab..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode/format.pp
+++ /dev/null
@@ -1,61 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::namenode::format(
-  $force = false
-)
-{
-  $mark_dir = $hdp-hadoop::params::namenode_formatted_mark_dir
-  $dfs_name_dir = $hdp-hadoop::params::dfs_name_dir
-  $hdfs_user = $hdp::params::hdfs_user
-  $hadoop_conf_dir = $hdp-hadoop::params::conf_dir
-
-  # Avoid formatting standby namenode in a HA cluster
-  if ($hdp::params::dfs_ha_enabled == false) {
-    if ($force == true) {
-        hdp-hadoop::exec-hadoop { 'namenode -format' :
-        command => 'namenode -format',
-        kinit_override => true,
-        notify  => Hdp::Exec['set namenode mark']
-      }
-    } else {
-
-      file { '/tmp/checkForFormat.sh':
-        ensure => present,
-        source => "puppet:///modules/hdp-hadoop/checkForFormat.sh",
-        mode => '0755'
-      }
-
-      exec { '/tmp/checkForFormat.sh':
-        command   => "sh /tmp/checkForFormat.sh ${hdfs_user} ${hadoop_conf_dir} ${mark_dir} ${dfs_name_dir} ",
-        unless   => "test -d ${mark_dir}",
-        require   => File['/tmp/checkForFormat.sh'],
-        path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-        logoutput => "true",
-        notify   => Hdp::Exec['set namenode mark']
-      }
-    }
-
-    hdp::exec { 'set namenode mark' :
-      command     => "mkdir -p ${mark_dir}",
-      refreshonly => true
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode/service_check.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode/service_check.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode/service_check.pp
deleted file mode 100644
index d4c0523..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode/service_check.pp
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::namenode::service_check()
-{
-  hdp-hadoop::exec-hadoop { 'namenode::service_check':
-    command   => 'dfs -ls /',
-    tries     => 3,
-    try_sleep => 5
-  }
-}


[11/15] AMBARI-5747 Remove facter-1.6.10, Ruby, Puppet dependencies from pom.xml and src (dsen)

Posted by ds...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/master-conn.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/master-conn.pp b/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/master-conn.pp
deleted file mode 100644
index f9c5d36..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/master-conn.pp
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hbase::master-conn($hbase_master_hosts)
-{
-  Hdp-Hbase::Configfile<||>{hbase_master_hosts => $hbase_master_hosts}
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/master.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/master.pp b/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/master.pp
deleted file mode 100644
index c16b1af..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/master.pp
+++ /dev/null
@@ -1,66 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hbase::master(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-hbase::params 
-{
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) {    
-    $hdp::params::service_exists['hdp-hbase::master'] = true
-
-    if ( ($service_state == 'installed_and_configured') and
-         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
-       $masterHost = $kerberos_adminclient_host[0]
-       hdp::download_keytab { 'hbase_master_service_keytab' :
-         masterhost => $masterHost,
-         keytabdst => "${$keytab_path}/hm.service.keytab",
-         keytabfile => 'hm.service.keytab',
-         owner => $hdp::params::hbase_user
-       }
-    }
-  
-    #adds package, users, directories, and common configs
-    class { 'hdp-hbase': 
-      type          => 'master',
-      service_state => $service_state
-    }
-
-    Hdp-hbase::Configfile<||>{hbase_master_hosts => $hdp::params::host_address}
-  
-    hdp-hbase::service{ 'master':
-      ensure => $service_state
-    }
-
-    #top level does not need anchors
-    Class['hdp-hbase'] -> Hdp-hbase::Service['master'] 
-    } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
-
-#assumes that master and regionserver will not be on same machine
-class hdp-hbase::master::enable-ganglia()
-{
-  Hdp-hbase::Configfile<|title  == $metric-prop-file-name |>{template_tag => 'GANGLIA-MASTER'}
-}
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/params.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/params.pp b/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/params.pp
deleted file mode 100644
index 4eb5ad0..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/params.pp
+++ /dev/null
@@ -1,110 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hbase::params() inherits hdp::params 
-{
-  
-  ####### users
-  $hbase_user = $hdp::params::hbase_user
-  
-  ### hbase-env
-  $hadoop_conf_dir = hdp_default("hadoop_conf_dir")
-  $conf_dir = $hdp::params::hbase_conf_dir
-
-  $hbase_log_dir = hdp_default("hbase_log_dir","/var/log/hbase")
-
-  $hbase_master_heapsize = hdp_default("hbase_master_heapsize","1000m")
-
-  $hbase_pid_dir = hdp_default("hbase_pid_dir","/var/run/hbase")
-
-  $hbase_regionserver_heapsize = hdp_default("hbase_regionserver_heapsize","1000m")
-
-  $hbase_regionserver_xmn_size = hdp_calc_xmn_from_xms("$hbase_regionserver_heapsize","0.2","512")
-
-  ### hbase-site.xml
-  $hbase_tmp_dir = hdp_default("hbase-site/hbase.tmp.dir","$hbase_log_dir")
-
-
-  #TODO: check if any of these 'hdfs' vars need to be euated with vars in hdp-hadoop
-  $hdfs_enable_shortcircuit_read = hdp_default("hbase-site/hdfs.enable.shortcircuit.read",true)
-
-  $hdfs_enable_shortcircuit_skipchecksum = hdp_default("hbase-site/hdfs.enable.shortcircuit.skipchecksum",false)
-
-  $hdfs_support_append = hdp_default("hbase-site/hdfs.support.append",true)
-
-  $hfile_blockcache_size = hdp_default("hbase-site/hfile.blockcache.size","0.25")
-
-  $hfile_max_keyvalue_size = hdp_default("hbase-site/hfile.max.keyvalue.size",10485760)
-
-  $zookeeper_sessiontimeout = hdp_default("hbase-site/zookeeper.sessiontimeout",60000)
-
-  $client_scannercaching = hdp_default("hbase-site/client.scannercaching",100)
-
-  $hstore_blockingstorefiles = hdp_default("hbase-site/hstore.blockingstorefiles",7)
-
-  $hstore_compactionthreshold = hdp_default("hbase-site/hstore.compactionthreshold",3)
-
-  $hstorefile_maxsize = hdp_default("hbase-site/hstorefile.maxsize",1073741824)
-
-  $hregion_blockmultiplier = hdp_default("hbase-site/hregion.blockmultiplier",2)
-
-  $hregion_memstoreflushsize = hdp_default("hbase-site/hregion.memstoreflushsize",134217728)
-
-  $regionserver_handlers = hdp_default("hbase-site/regionserver.handlers", 30)
-
-  $hregion_majorcompaction = hdp_default("hbase-site/hregion.majorcompaction", 86400000)
-
-  $preloaded_mastercoprocessor_classes = hdp_default("hbase-site/preloaded.mastercoprocessor.classes")
-
-  $preloaded_regioncoprocessor_classes = hdp_default("hbase-site/preloaded.regioncoprocessor.classes")
-
-  $regionserver_memstore_lab = hdp_default("hbase-site/regionserver.memstore.lab",true)
-
-  $regionserver_memstore_lowerlimit = hdp_default("hbase-site/regionserver.memstore.lowerlimit","0.35")
-
-  $regionserver_memstore_upperlimit = hdp_default("hbase-site/regionserver.memstore.upperlimit","0.4")
-
-  $keytab_path = hdp_default("keytab_path","/etc/security/keytabs")
-  $hbase_client_jaas_config_file = hdp_default("hbase_client_jaas_config_file", "${conf_dir}/hbase_client_jaas.conf")
-  $hbase_master_jaas_config_file = hdp_default("hbase_master_jaas_config_file", "${conf_dir}/hbase_master_jaas.conf")
-  $hbase_regionserver_jaas_config_file = hdp_default("hbase_regionserver_jaas_config_file", "${conf_dir}/hbase_regionserver_jaas.conf")
-
-  $hbase_master_keytab_path = hdp_default("hbase-site/hbase.master.keytab.file", "${keytab_path}/hbase.service.keytab")
-  $hbase_master_principal = hdp_default("hbase-site/hbase.master.kerberos.principal", "hbase/_HOST@${kerberos_domain}")
-  $hbase_regionserver_keytab_path = hdp_default("hbase-site/hbase.regionserver.keytab.file", "${keytab_path}/hbase.service.keytab")
-  $hbase_regionserver_principal = hdp_default("hbase-site/hbase.regionserver.kerberos.principal", "hbase/_HOST@${kerberos_domain}")
-
-  $hbase_primary_name = hdp_default("hbase_primary_name", "hbase")
-  $hostname = $hdp::params::hostname
-  if ($use_hostname_in_principal) {
-    $hbase_master_jaas_princ = "${hbase_master_primary_name}/${hostname}@${kerberos_domain}"
-    $hbase_regionserver_jaas_princ = "${hbase_regionserver_primary_name}/${hostname}@${kerberos_domain}"
-  } else {
-    $hbase_master_jaas_princ = "${hbase_master_principal_name}@${kerberos_domain}"
-    $hbase_regionserver_jaas_princ = "${hbase_regionserver_primary_name}@${kerberos_domain}"
-  }
-
-  if (hdp_get_major_stack_version($hdp::params::stack_version) >= 2) {
-    $metric-prop-file-name = "hadoop-metrics2-hbase.properties"
-  } else {
-    $metric-prop-file-name = "hadoop-metrics.properties"
-  }
-  $smokeuser_permissions = hdp_default("smokeuser_permissions", "RWXCA")
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/regionserver.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/regionserver.pp b/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/regionserver.pp
deleted file mode 100644
index 434f4f1..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/regionserver.pp
+++ /dev/null
@@ -1,73 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hbase::regionserver(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-hbase::params
-{
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) {    
-    $hdp::params::service_exists['hdp-hbase::regionserver'] = true       
-
-    if ( ($service_state == 'installed_and_configured') and
-         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
-       $masterHost = $kerberos_adminclient_host[0]
-       hdp::download_keytab { 'hbase_rs_service_keytab' :
-         masterhost => $masterHost,
-         keytabdst => "${$keytab_path}/rs.service.keytab",
-         keytabfile => 'rs.service.keytab',
-         owner => $hdp::params::hbase_user
-       }
-    }
-
-    if ($hdp::params::service_exists['hdp-hbase::master'] != true) {
-      #adds package, users, directories, and common configs
-      class { 'hdp-hbase': 
-        type          => 'regionserver',
-        service_state => $service_state
-      } 
-      $create_pid_dir = true
-      $create_conf_dir = true
-    } else {
-      $create_pid_dir = false
-      $create_conf_dir = false
-    }
-
-
-    hdp-hbase::service{ 'regionserver':
-      ensure         => $service_state,
-      create_pid_dir => $create_pid_dir,
-      create_conf_dir => $create_conf_dir
-    }
-
-    #top level does not need anchors
-    Class['hdp-hbase'] ->  Hdp-hbase::Service['regionserver']
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
-
-#assumes that master and regionserver will not be on same machine
-class hdp-hbase::regionserver::enable-ganglia()
-{
-  Hdp-hbase::Configfile<|title  == $metric-prop-file-name |>{template_tag => 'GANGLIA-RS'}
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/service.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/service.pp b/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/service.pp
deleted file mode 100644
index 8ab9645..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/service.pp
+++ /dev/null
@@ -1,82 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-define hdp-hbase::service(
-  $ensure = 'running',
-  $create_pid_dir = true,
-  $create_conf_dir = true,
-  $initial_wait = undef)
-{
-  include hdp-hbase::params
-
-  $role = $name
-  $user = $hdp-hbase::params::hbase_user
-
-  $conf_dir = $hdp::params::hbase_conf_dir
-  $hbase_daemon = $hdp::params::hbase_daemon_script
-  $cmd = "$hbase_daemon --config ${conf_dir}"
-  $pid_dir = $hdp-hbase::params::hbase_pid_dir
-  $pid_file = "${pid_dir}/hbase-hbase-${role}.pid"
-  $hbase_log_dir = $hdp-hbase::params::hbase_log_dir
-  $hbase_tmp_dir = $hdp-hbase::params::hbase_tmp_dir
-
-  if ($ensure == 'running') {
-    $daemon_cmd = "su - ${user} -c  '${cmd} start ${role}'"
-    $no_op_test = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
-  } elsif ($ensure == 'stopped') {
-    $daemon_cmd = "su - ${user} -c  '${cmd} stop ${role}' && rm -f ${pid_file}"
-    $no_op_test = undef
-  } else {
-    $daemon_cmd = undef
-  }
-
-  $tag = "hbase_service-${name}"
-  
-  if ($create_pid_dir == true) {
-    hdp::directory_recursive_create { $pid_dir: 
-      owner => $user,
-      tag   => $tag,
-      service_state => $ensure,
-      force => true
-    }
-  }
-  if ($create_conf_dir == true) {
-   # To avoid duplicate resource definitions
-    $hbase_conf_dirs = hdp_set_from_comma_list("${hbase_tmp_dir},${hbase_log_dir}")
-
-    hdp::directory_recursive_create_ignore_failure { $hbase_conf_dirs:
-      owner => $user,
-      context_tag => 'hbase_service',
-      service_state => $ensure,
-      force => true
-    }
-  }
-
-  if ($daemon_cmd != undef) { 
-    hdp::exec { $daemon_cmd:
-      command      => $daemon_cmd,
-      unless       => $no_op_test,
-      initial_wait => $initial_wait
-    }
-    anchor{"hdp-hbase::service::${name}::begin":} -> Hdp::Directory_recursive_create<|tag == $tag|> -> Hdp::Exec[$daemon_cmd] -> anchor{"hdp-hbase::service::${name}::end":}
-  } else {
-    anchor{"hdp-hbase::service::${name}::begin":} -> Hdp::Directory_recursive_create<|tag == $tag|> -> anchor{"hdp-hbase::service::${name}::end":}  
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/zk-conn.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/zk-conn.pp b/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/zk-conn.pp
deleted file mode 100644
index 6c67cd4..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/zk-conn.pp
+++ /dev/null
@@ -1,26 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hbase::zk-conn(
- $zookeeper_hosts
-)
-{
-  Hdp::Configfile<||>{zookeeper_hosts => $zookeeper_hosts}
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hadoop-metrics.properties-GANGLIA-MASTER.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hadoop-metrics.properties-GANGLIA-MASTER.erb b/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hadoop-metrics.properties-GANGLIA-MASTER.erb
deleted file mode 100644
index f3988d9..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hadoop-metrics.properties-GANGLIA-MASTER.erb
+++ /dev/null
@@ -1,50 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See http://wiki.apache.org/hadoop/GangliaMetrics
-#
-# Make sure you know whether you are using ganglia 3.0 or 3.1.
-# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
-# And, yes, this file is named hadoop-metrics.properties rather than
-# hbase-metrics.properties because we're leveraging the hadoop metrics
-# package and hadoop-metrics.properties is an hardcoded-name, at least
-# for the moment.
-#
-# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
-
-# HBase-specific configuration to reset long-running stats (e.g. compactions)
-# If this variable is left out, then the default is no expiration.
-hbase.extendedperiod = 3600
-
-# Configuration of the "hbase" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-hbase.period=10
-hbase.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8663
-
-# Configuration of the "jvm" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-jvm.period=10
-jvm.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8663
-
-# Configuration of the "rpc" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-rpc.period=10
-rpc.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8663

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hadoop-metrics.properties-GANGLIA-RS.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hadoop-metrics.properties-GANGLIA-RS.erb b/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hadoop-metrics.properties-GANGLIA-RS.erb
deleted file mode 100644
index 386376d..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hadoop-metrics.properties-GANGLIA-RS.erb
+++ /dev/null
@@ -1,50 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See http://wiki.apache.org/hadoop/GangliaMetrics
-#
-# Make sure you know whether you are using ganglia 3.0 or 3.1.
-# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
-# And, yes, this file is named hadoop-metrics.properties rather than
-# hbase-metrics.properties because we're leveraging the hadoop metrics
-# package and hadoop-metrics.properties is an hardcoded-name, at least
-# for the moment.
-#
-# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
-
-# HBase-specific configuration to reset long-running stats (e.g. compactions)
-# If this variable is left out, then the default is no expiration.
-hbase.extendedperiod = 3600
-
-# Configuration of the "hbase" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-hbase.period=10
-hbase.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-
-# Configuration of the "jvm" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-jvm.period=10
-jvm.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-
-# Configuration of the "rpc" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-rpc.period=10
-rpc.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hadoop-metrics.properties.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hadoop-metrics.properties.erb b/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hadoop-metrics.properties.erb
deleted file mode 100644
index f3988d9..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hadoop-metrics.properties.erb
+++ /dev/null
@@ -1,50 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See http://wiki.apache.org/hadoop/GangliaMetrics
-#
-# Make sure you know whether you are using ganglia 3.0 or 3.1.
-# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
-# And, yes, this file is named hadoop-metrics.properties rather than
-# hbase-metrics.properties because we're leveraging the hadoop metrics
-# package and hadoop-metrics.properties is an hardcoded-name, at least
-# for the moment.
-#
-# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
-
-# HBase-specific configuration to reset long-running stats (e.g. compactions)
-# If this variable is left out, then the default is no expiration.
-hbase.extendedperiod = 3600
-
-# Configuration of the "hbase" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-hbase.period=10
-hbase.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8663
-
-# Configuration of the "jvm" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-jvm.period=10
-jvm.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8663
-
-# Configuration of the "rpc" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-rpc.period=10
-rpc.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8663

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.erb b/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.erb
deleted file mode 100644
index 3a05d5e..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.erb
+++ /dev/null
@@ -1,62 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See http://wiki.apache.org/hadoop/GangliaMetrics
-#
-# Make sure you know whether you are using ganglia 3.0 or 3.1.
-# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
-# And, yes, this file is named hadoop-metrics.properties rather than
-# hbase-metrics.properties because we're leveraging the hadoop metrics
-# package and hadoop-metrics.properties is an hardcoded-name, at least
-# for the moment.
-#
-# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
-
-# HBase-specific configuration to reset long-running stats (e.g. compactions)
-# If this variable is left out, then the default is no expiration.
-hbase.extendedperiod = 3600
-
-# Configuration of the "hbase" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-hbase.period=10
-hbase.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8663
-
-# Configuration of the "jvm" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-jvm.period=10
-jvm.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8663
-
-# Configuration of the "rpc" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-rpc.period=10
-rpc.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8663
-
-#Ganglia following hadoop example
-hbase.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
-hbase.sink.ganglia.period=10
-
-# default for supportsparse is false
-*.sink.ganglia.supportsparse=true
-
-.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
-.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
-
-hbase.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8663

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.erb b/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.erb
deleted file mode 100644
index 2d74c16..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.erb
+++ /dev/null
@@ -1,62 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See http://wiki.apache.org/hadoop/GangliaMetrics
-#
-# Make sure you know whether you are using ganglia 3.0 or 3.1.
-# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
-# And, yes, this file is named hadoop-metrics.properties rather than
-# hbase-metrics.properties because we're leveraging the hadoop metrics
-# package and hadoop-metrics.properties is an hardcoded-name, at least
-# for the moment.
-#
-# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
-
-# HBase-specific configuration to reset long-running stats (e.g. compactions)
-# If this variable is left out, then the default is no expiration.
-hbase.extendedperiod = 3600
-
-# Configuration of the "hbase" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-hbase.period=10
-hbase.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-
-# Configuration of the "jvm" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-jvm.period=10
-jvm.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-
-# Configuration of the "rpc" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-rpc.period=10
-rpc.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-
-#Ganglia following hadoop example
-hbase.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
-hbase.sink.ganglia.period=10
-
-# default for supportsparse is false
-*.sink.ganglia.supportsparse=true
-
-.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
-.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
-
-hbase.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hbase-env.sh.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hbase-env.sh.erb b/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hbase-env.sh.erb
deleted file mode 100644
index ea19d23..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hbase-env.sh.erb
+++ /dev/null
@@ -1,83 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Set environment variables here.
-
-# The java implementation to use. Java 1.6 required.
-export JAVA_HOME=<%=scope.function_hdp_template_var("::hdp::params::java64_home")%>
-
-# HBase Configuration directory
-export HBASE_CONF_DIR=${HBASE_CONF_DIR:-<%=scope.function_hdp_template_var("hbase_conf_dir")%>}
-
-# Extra Java CLASSPATH elements. Optional.
-export HBASE_CLASSPATH=${HBASE_CLASSPATH}
-
-# The maximum amount of heap to use, in MB. Default is 1000.
-# export HBASE_HEAPSIZE=1000
-
-# Extra Java runtime options.
-# Below are what we set by default. May only work with SUN JVM.
-# For more on why as well as other possible settings,
-# see http://wiki.apache.org/hadoop/PerformanceTuning
-export HBASE_OPTS="-XX:+UseConcMarkSweepGC -XX:ErrorFile=<%=scope.function_hdp_template_var("hbase_log_dir")%>/hs_err_pid%p.log"
-export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:<%=scope.function_hdp_template_var("hbase_log_dir")%>/gc.log-`date +'%Y%m%d%H%M'`"
-# Uncomment below to enable java garbage collection logging.
-# export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log"
-
-# Uncomment and adjust to enable JMX exporting
-# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
-# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
-#
-# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
-export HBASE_MASTER_OPTS="-Xmx<%=scope.function_hdp_template_var("hbase_master_heapsize")%>"
-export HBASE_REGIONSERVER_OPTS="-Xmn<%=scope.function_hdp_template_var("hbase_regionserver_xmn_size")%> -XX:CMSInitiatingOccupancyFraction=70  -Xms<%=scope.function_hdp_template_var("hbase_regionserver_heapsize")%> -Xmx<%=scope.function_hdp_template_var("hbase_regionserver_heapsize")%>"
-# export HBASE_THRIFT_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
-# export HBASE_ZOOKEEPER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
-
-# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
-export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers
-
-# Extra ssh options. Empty by default.
-# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
-
-# Where log files are stored. $HBASE_HOME/logs by default.
-export HBASE_LOG_DIR=<%=scope.function_hdp_template_var("hbase_log_dir")%>
-
-# A string representing this instance of hbase. $USER by default.
-# export HBASE_IDENT_STRING=$USER
-
-# The scheduling priority for daemon processes. See 'man nice'.
-# export HBASE_NICENESS=10
-
-# The directory where pid files are stored. /tmp by default.
-export HBASE_PID_DIR=<%=scope.function_hdp_template_var("hbase_pid_dir")%>
-
-# Seconds to sleep between slave commands. Unset by default. This
-# can be useful in large clusters, where, e.g., slave rsyncs can
-# otherwise arrive faster than the master can service them.
-# export HBASE_SLAVE_SLEEP=0.1
-
-# Tell HBase whether it should manage it's own instance of Zookeeper or not.
-export HBASE_MANAGES_ZK=false
-
-
-<% if scope.function_hdp_template_var("::hdp::params::security_enabled") == true %>
-export HBASE_OPTS="$HBASE_OPTS -Djava.security.auth.login.config=<%=scope.function_hdp_template_var("::hdp-hbase::params::hbase_client_jaas_config_file")%>"
-export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Djava.security.auth.login.config=<%=scope.function_hdp_template_var("::hdp-hbase::params::hbase_master_jaas_config_file")%>"
-export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Djava.security.auth.login.config=<%=scope.function_hdp_template_var("::hdp-hbase::params::hbase_regionserver_jaas_config_file")%>"
-<% end %>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hbase-smoke.sh.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hbase-smoke.sh.erb b/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hbase-smoke.sh.erb
deleted file mode 100644
index 827717b..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hbase-smoke.sh.erb
+++ /dev/null
@@ -1,26 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-disable 'ambarismoketest'
-drop 'ambarismoketest'
-create 'ambarismoketest','family'
-put 'ambarismoketest','row01','family:col01','<%=scope.function_hdp_template_var("::hdp-hbase::hbase::service_check::serviceCheckData")%>'
-scan 'ambarismoketest'
-exit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hbase_client_jaas.conf.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hbase_client_jaas.conf.erb b/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hbase_client_jaas.conf.erb
deleted file mode 100644
index 696718e..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hbase_client_jaas.conf.erb
+++ /dev/null
@@ -1,5 +0,0 @@
-Client {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=false
-useTicketCache=true;
-};

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hbase_grant_permissions.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hbase_grant_permissions.erb b/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hbase_grant_permissions.erb
deleted file mode 100644
index e8f3d1d..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hbase_grant_permissions.erb
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-grant '<%=scope.function_hdp_template_var("::hdp::params::smokeuser")%>', '<%=scope.function_hdp_template_var("::hdp-hbase::params::smokeuser_permissions")%>'
-exit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hbase_master_jaas.conf.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hbase_master_jaas.conf.erb b/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hbase_master_jaas.conf.erb
deleted file mode 100644
index 68bf733..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hbase_master_jaas.conf.erb
+++ /dev/null
@@ -1,8 +0,0 @@
-Client {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=true
-storeKey=true
-useTicketCache=false
-keyTab="<%=scope.function_hdp_template_var("::hdp-hbase::params::hbase_master_keytab_path")%>"
-principal="<%=scope.function_hdp_template_var("::hdp-hbase::params::hbase_master_jaas_princ")%>";
-};

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hbase_regionserver_jaas.conf.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hbase_regionserver_jaas.conf.erb b/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hbase_regionserver_jaas.conf.erb
deleted file mode 100644
index 87838ca..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hbase_regionserver_jaas.conf.erb
+++ /dev/null
@@ -1,8 +0,0 @@
-Client {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=true
-storeKey=true
-useTicketCache=false
-keyTab="<%=scope.function_hdp_template_var("::hdp-hbase::params::hbase_regionserver_keytab_path")%>"
-principal="<%=scope.function_hdp_template_var("::hdp-hbase::params::hbase_regionserver_jaas_princ")%>";
-};

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/regionservers.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/regionservers.erb b/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/regionservers.erb
deleted file mode 100644
index 159a2f6..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/regionservers.erb
+++ /dev/null
@@ -1,3 +0,0 @@
-<%h=scope.function_hdp_host("hbase_rs_hosts"); (h.kind_of?(Array) ? h : []).each do |host|-%>
-<%= host %>
-<%end-%>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hcat/files/hcatSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hcat/files/hcatSmoke.sh b/ambari-agent/src/main/puppet/modules/hdp-hcat/files/hcatSmoke.sh
deleted file mode 100644
index 695d56a..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hcat/files/hcatSmoke.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-export tablename=$1
-
-case "$2" in
-
-prepare)
-  hcat -e "show tables"
-  hcat -e "drop table IF EXISTS ${tablename}"
-  hcat -e "create table ${tablename} ( id INT, name string ) stored as rcfile ;"
-;;
-
-cleanup)
-  hcat -e "drop table IF EXISTS ${tablename}"
-;;
-
-esac
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hcat/files/pigSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hcat/files/pigSmoke.sh b/ambari-agent/src/main/puppet/modules/hdp-hcat/files/pigSmoke.sh
deleted file mode 100644
index 2e90ac0..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hcat/files/pigSmoke.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License
-
-A = load 'passwd' using PigStorage(':');
-B = foreach A generate \$0 as id;
-store B into 'pigsmoke.out';

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hcat/manifests/hcat/service_check.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hcat/manifests/hcat/service_check.pp b/ambari-agent/src/main/puppet/modules/hdp-hcat/manifests/hcat/service_check.pp
deleted file mode 100644
index f2aabb0..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hcat/manifests/hcat/service_check.pp
+++ /dev/null
@@ -1,73 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hcat::hcat::service_check() 
-{
-  include hdp-hcat::params
-  $unique = hdp_unique_id_and_date()
-  $smoke_test_user = $hdp::params::smokeuser
-  $output_file = "/apps/hive/warehouse/hcatsmoke${unique}"
-  $security_enabled=$hdp::params::security_enabled
-  $smoke_user_keytab = $hdp::params::smokeuser_keytab
-
-  if ($security_enabled == true) {
-    $smoke_user_kinitcmd="${hdp::params::kinit_path_local} -kt ${smoke_user_keytab} ${smoke_test_user}; "
-  } else {
-    $smoke_user_kinitcmd=""
-  }
-
-  $test_cmd = "fs -test -e ${output_file}" 
-  
-  anchor { 'hdp-hcat::hcat::service_check::begin':}
-
-  file { '/tmp/hcatSmoke.sh':
-    ensure => present,
-    source => "puppet:///modules/hdp-hcat/hcatSmoke.sh",
-    mode => '0755',
-  }
-
-  exec { 'hcatSmoke.sh prepare':
-    command   => "su - ${smoke_test_user} -c '${smoke_user_kinitcmd}sh /tmp/hcatSmoke.sh hcatsmoke${unique} prepare'",
-    tries     => 3,
-    try_sleep => 5,
-    require   => File['/tmp/hcatSmoke.sh'],
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    notify    => Hdp-hadoop::Exec-hadoop['hcat::service_check::test'],
-    logoutput => "true"
-  }
-
-  hdp-hadoop::exec-hadoop { 'hcat::service_check::test':
-    command     => $test_cmd,
-    refreshonly => true,
-    require     => Exec['hcatSmoke.sh prepare'],
-  }
-
-  exec { 'hcatSmoke.sh cleanup':
-    command   => "su - ${smoke_test_user} -c '${smoke_user_kinitcmd}sh /tmp/hcatSmoke.sh hcatsmoke${unique} cleanup'",
-    tries     => 3,
-    try_sleep => 5,
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    require   => Hdp-hadoop::Exec-hadoop['hcat::service_check::test'],
-    before    => Anchor['hdp-hcat::hcat::service_check::end'],
-    logoutput => "true"
-  }
-  
-  anchor{ 'hdp-hcat::hcat::service_check::end':}
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hcat/manifests/init.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hcat/manifests/init.pp b/ambari-agent/src/main/puppet/modules/hdp-hcat/manifests/init.pp
deleted file mode 100644
index 1fb0ced..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hcat/manifests/init.pp
+++ /dev/null
@@ -1,88 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hcat(
-  $service_state = $hdp::params::cluster_client_state
-) inherits hdp-hcat::params
-{
-  $hcat_config_dir = $hdp-hcat::params::hcat_conf_dir
-  $hcat_pid_dir = $hdp-hcat::params::hcat_pid_dir
-
-  if ($hdp::params::use_32_bits_on_slaves == false) {
-    $size = 64
-  } else {
-    $size = 32
-  }
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state == 'uninstalled') {
-    hdp::package { 'hcat' :
-      ensure => 'uninstalled', 
-      size   => $size
-    }
-
-    hdp::directory { $hcat_config_dir:
-      service_state => $service_state,
-      force => true
-    }
-
-    hdp::directory { $hcat_pid_dir:
-      service_state => $service_state,
-      force => true
-    }
-
-    Hdp::Package['hcat'] -> Hdp::Directory[$hcat_config_dir] -> Hdp::Directory[$hcat_pid_dir]
-
-  } elsif ($service_state == 'installed_and_configured') {
-    hdp::package { 'hcat' : 
-      size => $size
-    }
-
-    hdp::directory { $hcat_config_dir:
-      service_state => $service_state,
-      force => true,
-      owner => $hcat_user,
-      group => $hdp::params::user_group,
-      override_owner => true
-    }
-
-    hdp::directory_recursive_create { $hcat_pid_dir:
-      owner => $webhcat_user,
-      service_state => $service_state,
-      force => true
-    }
-
-    hdp-hcat::configfile { 'hcat-env.sh':}
-  
-    Hdp::Package['hcat'] -> Hdp::Directory[$hcat_config_dir] -> Hdp::Directory_recursive_create[$hcat_pid_dir] -> Hdp-hcat::Configfile<||> 
-
- } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
-
-### config files
-define hdp-hcat::configfile()
-{
-  hdp::configfile { "${hdp::params::hcat_conf_dir}/${name}":
-    component => 'hcat',
-    owner => $hdp::params::hcat_user
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hcat/manifests/params.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hcat/manifests/params.pp b/ambari-agent/src/main/puppet/modules/hdp-hcat/manifests/params.pp
deleted file mode 100644
index b9c2f5e..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hcat/manifests/params.pp
+++ /dev/null
@@ -1,36 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hcat::params() inherits hdp::params
-{
-  $hcat_conf_dir = $hdp::params::hcat_conf_dir
-
-  $hcat_metastore_port = hdp_default("hcat_metastore_port",9933)
-  $hcat_lib = hdp_default("hcat_lib","/usr/lib/hcatalog/share/hcatalog") #TODO: should I remove and just use hcat_dbroot
-
-  ### hcat-env
-  $hcat_dbroot = hdp_default("hcat_dbroot",$hcat_lib)
-
-  $hcat_log_dir = hdp_default("hcat_log_dir","/var/log/hcatalog")
-
-  $hcat_pid_dir = hdp_default("hcat_pid_dir","/var/run/hcatalog")
-
-  $keytab_path = hdp_default("keytab_path","/etc/security/keytabs")
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hcat/templates/hcat-env.sh.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hcat/templates/hcat-env.sh.erb b/ambari-agent/src/main/puppet/modules/hdp-hcat/templates/hcat-env.sh.erb
deleted file mode 100644
index cf46b5c..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hcat/templates/hcat-env.sh.erb
+++ /dev/null
@@ -1,25 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-JAVA_HOME=<%=scope.function_hdp_template_var("::hdp::params::java64_home")%>
-HCAT_PID_DIR=<%=scope.function_hdp_template_var("::hcat_pid_dir")%>/
-HCAT_LOG_DIR=<%=scope.function_hdp_template_var("::hcat_log_dir")%>/
-HCAT_CONF_DIR=<%=scope.function_hdp_template_var("hcat_conf_dir")%>
-HADOOP_HOME=${HADOOP_HOME:-<%=scope.function_hdp_template_var("::hdp::params::hadoop_home")%>}
-#DBROOT is the path where the connector jars are downloaded
-DBROOT=<%=scope.function_hdp_template_var("hcat_dbroot")%>
-USER=<%=scope.function_hdp_user("hcat_user")%>
-METASTORE_PORT=<%=scope.function_hdp_template_var("hcat_metastore_port")%>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hive/files/hiveSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hive/files/hiveSmoke.sh b/ambari-agent/src/main/puppet/modules/hdp-hive/files/hiveSmoke.sh
deleted file mode 100644
index 7e03524..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hive/files/hiveSmoke.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-export tablename=$1
-echo "CREATE EXTERNAL TABLE IF NOT EXISTS ${tablename} ( foo INT, bar STRING );" | hive
-echo "DESCRIBE ${tablename};" | hive

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hive/files/hiveserver2.sql
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hive/files/hiveserver2.sql b/ambari-agent/src/main/puppet/modules/hdp-hive/files/hiveserver2.sql
deleted file mode 100644
index 99a3865..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hive/files/hiveserver2.sql
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-CREATE EXTERNAL TABLE IF NOT EXISTS hiveserver2smoke20408 ( foo INT, bar STRING );
-DESCRIBE hiveserver2smoke20408;

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hive/files/hiveserver2Smoke.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hive/files/hiveserver2Smoke.sh b/ambari-agent/src/main/puppet/modules/hdp-hive/files/hiveserver2Smoke.sh
deleted file mode 100644
index 051a21e..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hive/files/hiveserver2Smoke.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-smokeout=`/usr/lib/hive/bin/beeline -u $1 -n fakeuser -p fakepwd -d org.apache.hive.jdbc.HiveDriver -e '!run $2' 2>&1| awk '{print}'|grep Error`
-
-if [ "x$smokeout" == "x" ]; then
-  echo "Smoke test of hiveserver2 passed"
-  exit 0
-else
-  echo "Smoke test of hiveserver2 wasnt passed"
-  echo $smokeout
-  exit 1
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hive/files/startHiveserver2.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hive/files/startHiveserver2.sh b/ambari-agent/src/main/puppet/modules/hdp-hive/files/startHiveserver2.sh
deleted file mode 100644
index fa90c2f..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hive/files/startHiveserver2.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-HIVE_CONF_DIR=$4 /usr/lib/hive/bin/hiveserver2 -hiveconf hive.metastore.uris=' ' > $1 2> $2 &
-echo $!|cat>$3

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hive/files/startMetastore.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hive/files/startMetastore.sh b/ambari-agent/src/main/puppet/modules/hdp-hive/files/startMetastore.sh
deleted file mode 100644
index 9350776..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hive/files/startMetastore.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-HIVE_CONF_DIR=$4 hive --service metastore > $1 2> $2 &
-echo $!|cat>$3

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/client.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/client.pp b/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/client.pp
deleted file mode 100644
index 9e95e25..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/client.pp
+++ /dev/null
@@ -1,40 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hive::client(
-  $service_state = $hdp::params::cluster_client_state,
-  $hive_server_host = undef
-) inherits hdp::params
-{ 
-  if ($service_state == 'no_op') {
-   } elsif ($service_state in ['installed_and_configured','uninstalled']) {
-    if ($hdp::params::service_exists['hdp-hive::server'] != true) {
-      #installs package, creates user, sets configuration
-      class { 'hdp-hive':
-        service_state => $service_state
-      } 
-      if ($hive_server_host != undef) {
-        Hdp-Hive::Configfile<||>{hive_server_host => $hive_server_host}
-      }
-    }
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/hive/service_check.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/hive/service_check.pp b/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/hive/service_check.pp
deleted file mode 100644
index cf47381..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/hive/service_check.pp
+++ /dev/null
@@ -1,88 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hive::hive::service_check() inherits hdp-hive::params
-{
-  $smoke_test_user = $hdp::params::smokeuser
-  $smoke_test_sql = "/tmp/$smoke_test_sql_file"
-  $smoke_test_path = "/tmp/$smoke_test_script"
-  $security_enabled = $hdp::params::security_enabled
-  $smoke_user_keytab = $hdp::params::smokeuser_keytab
-
-  if ($security_enabled == true) {
-    $kinit_cmd = "${hdp::params::kinit_path_local} -kt ${smoke_user_keytab} ${smoke_test_user};"
-    $hive_principal_ext = "principal=${hdp-hive::params::hive_metatore_keytab_path}"
-    $hive_url_ext = "${hive_url}/\\;${hive_principal_ext}"
-    $smoke_cmd = "${kinit_cmd} env JAVA_HOME=${hdp::params::java64_home} ${smoke_test_path} ${hive_url_ext} ${smoke_test_sql}"
-  } else {
-    $smoke_cmd = "env JAVA_HOME=$hdp::params::java64_home $smoke_test_path $hive_url $smoke_test_sql"
-  }
-
-
-  file { $smoke_test_path:
-    ensure => present,
-    source => "puppet:///modules/hdp-hive/$smoke_test_script",
-    mode => '0755',
-  }
-
-  file { $smoke_test_sql:
-    ensure => present,
-    source => "puppet:///modules/hdp-hive/$smoke_test_sql_file"
-  }
-
-  exec { $smoke_test_path:
-    command   => $smoke_cmd,
-    tries     => 3,
-    try_sleep => 5,
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    logoutput => "true",
-    user => $smoke_test_user
-  }
-
-#  $unique = hdp_unique_id_and_date()
-#  $output_file = "/apps/hive/warehouse/hivesmoke${unique}"
-#  $test_cmd = "fs -test -e ${output_file}"
-
-#  file { '/tmp/hiveSmoke.sh':
-#    ensure => present,
-#    source => "puppet:///modules/hdp-hive/hiveSmoke.sh",
-#    mode => '0755',
-#  }
-#
-#  exec { '/tmp/hiveSmoke.sh':
-#    command => "su - ${smoke_test_user} -c 'env JAVA_HOME=$hdp::params::java64_home sh /tmp/hiveSmoke.sh hivesmoke${unique}'",
-#    tries => 3,
-#    try_sleep => 5,
-#    path => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-#    notify => Hdp-hadoop::Exec-hadoop['hive::service_check::test'],
-#    logoutput => "true"
-#  }
-
-#  hdp-hadoop::exec-hadoop { 'hive::service_check::test':
-#    command => $test_cmd,
-#    refreshonly => true
-#  }
-
-#  File[$smoke_test_path] -> File[$smoke_test_sql] -> Exec[$smoke_test_path] -> File['/tmp/hiveSmoke.sh'] -> Exec['/tmp/hiveSmoke.sh'] -> Hdp-Hadoop::Exec-Hadoop['hive::service_check::test']
-
-  include hdp-hcat::hcat::service_check  
-
-  File[$smoke_test_path] -> File[$smoke_test_sql] -> Exec[$smoke_test_path]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/init.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/init.pp b/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/init.pp
deleted file mode 100644
index d7a58c1..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/init.pp
+++ /dev/null
@@ -1,145 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hive(
-  $service_state,
-  $server = false
-) 
-{
-  include hdp-hive::params
-  
-  $hive_user = $hdp-hive::params::hive_user
-  if ($server == true) {
-    $hive_config_dir = $hdp-hive::params::hive_server_conf_dir
-    $config_file_mode = '0600'
-  } else {
-    $hive_config_dir = $hdp-hive::params::hive_conf_dir
-    $config_file_mode = '0644'
-  }
-
-  # Configs generation
-  if has_key($configuration, 'hive-site') {
-    configgenerator::configfile{'hive-site':
-      modulespath => $hive_config_dir, 
-      filename => 'hive-site.xml',
-      module => 'hdp-hive',
-      configuration => $configuration['hive-site'],
-      owner => $hive_user,
-      group => $hdp::params::user_group,
-      mode => $config_file_mode
-    }
-  } else {
-    file { "${hive_config_dir}/hive-site.xml":
-      owner => $hive_user,
-      group => $hdp::params::user_group,
-      mode => $config_file_mode
-    }
-  }
-
-  anchor { 'hdp-hive::begin': }
-  anchor { 'hdp-hive::end': }
-
-  if ($service_state == 'installed_and_configured' and ($hive_jdbc_driver == "com.mysql.jdbc.Driver" or $hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver")) {
-    hdp::exec { "download DBConnectorVerification.jar" :
-      command => "/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf --retry 5 ${hdp::params::jdk_location}${hdp::params::check_db_connection_jar_name} -o ${hdp::params::check_db_connection_jar_name}'",
-      unless  => "[ -f ${check_db_connection_jar} ]"
-    }
-  }
-
-  if ($service_state == 'uninstalled') {
-    hdp::package { 'hive' : 
-      ensure => 'uninstalled'
-    }
-
-    hdp::directory_recursive_create { $hive_config_dir:
-      service_state => $service_state,
-      ensure => "directory",
-      force => true
-    }
-
-    Anchor['hdp-hive::begin'] -> Hdp::Package['hive'] -> Hdp::Directory_recursive_create[$hive_config_dir] ->  Anchor['hdp-hive::end']
-
-  } else {
-    hdp::package { 'hive' : }
-    if ($server == true ) {
-      class { 'hdp-hive::jdbc-connector': }
-    }
-
-    hdp::directory_recursive_create { $hive_config_dir:
-      service_state => $service_state,
-      force => true,
-      owner => $hive_user,
-      group => $hdp::params::user_group,
-      ensure => "directory",
-      override_owner => true
-    }
-
-    hdp-hive::configfile { 'hive-env.sh': config_dir => $hive_config_dir }
-
-    hdp-hive::ownership { 'ownership': config_dir => $hive_config_dir }
-  
-    Anchor['hdp-hive::begin'] -> Hdp::Package['hive'] -> 
-     Hdp::Directory_recursive_create[$hive_config_dir] -> Hdp-hive::Configfile<||> -> Hdp-hive::Ownership['ownership'] -> Anchor['hdp-hive::end']
-
-     if ($server == true ) {
-       Hdp::Package['hive'] -> Class['hdp-hive::jdbc-connector'] -> Anchor['hdp-hive::end']
-    }
-  }
-}
-
-### config files
-define hdp-hive::configfile(
-  $mode = undef,
-  $hive_server_host = undef,
-  $config_dir = $hdp-hive::params::hive_conf_dir
-) 
-{
-  hdp::configfile { "${config_dir}/${name}":
-    component        => 'hive',
-    owner            => $hdp-hive::params::hive_user,
-    mode             => $mode,
-    hive_server_host => $hive_server_host 
-  }
-}
-
-define hdp-hive::ownership(
-  $config_dir = $hdp-hive::params::hive_conf_dir
-)
-{
-  file { "${config_dir}/hive-default.xml.template":
-    owner => $hdp-hive::params::hive_user,
-    group => $hdp::params::user_group
-  }
-
-  file { "${config_dir}/hive-env.sh.template":
-    owner => $hdp-hive::params::hive_user,
-    group => $hdp::params::user_group
-  }
-
-  file { "${config_dir}/hive-exec-log4j.properties.template":
-    owner => $hdp-hive::params::hive_user,
-    group => $hdp::params::user_group
-  }
-
-  file { "${config_dir}/hive-log4j.properties.template":
-    owner => $hdp-hive::params::hive_user,
-    group => $hdp::params::user_group
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/jdbc-connector.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/jdbc-connector.pp b/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/jdbc-connector.pp
deleted file mode 100644
index 8bd9302..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/jdbc-connector.pp
+++ /dev/null
@@ -1,63 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hive::jdbc-connector()
-{
-  include hdp-hive::params
-
-  $jdbc_jar_name = $hdp-hive::params::jdbc_jar_name
-  
-  $java_share_dir = "/usr/share/java"
-  $driver_curl_target = "${java_share_dir}/${jdbc_jar_name}"
-
-  $hive_lib = $hdp-hive::params::hive_lib
-  $target = "${hive_lib}/${jdbc_jar_name}"
-  
-  $jdk_location = $hdp::params::jdk_location
-  $driver_curl_source = "${jdk_location}${jdbc_jar_name}"
-
-  anchor { 'hdp-hive::jdbc-connector::begin':}
-
-   hdp::package { 'mysql-connector-java' :
-     require   => Anchor['hdp-hive::jdbc-connector::begin']
-   }
-
-  if ($hive_jdbc_driver == "com.mysql.jdbc.Driver"){
-   hdp::exec { 'hive mkdir -p ${artifact_dir} ;  cp /usr/share/java/${jdbc_jar_name}  ${target}':
-       command => "mkdir -p ${::artifact_dir} ;  cp /usr/share/java/${jdbc_jar_name}  ${target}",
-       unless  => "test -f ${target}",
-       creates => $target,
-       path    => ["/bin","/usr/bin/"],
-       require => Hdp::Package['mysql-connector-java'],
-       notify  =>  Anchor['hdp-hive::jdbc-connector::end'],
-   }
-  } elsif ($hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver") {
-   hdp::exec { 'hive mkdir -p ${artifact_dir} ; curl -kf --retry 10 ${driver_curl_source} -o ${driver_curl_target} &&  cp ${driver_curl_target} ${target}':
-       command => "mkdir -p ${::artifact_dir} ; curl -kf --retry 10 ${driver_curl_source} -o ${driver_curl_target} &&  cp ${driver_curl_target} ${target}",
-       unless  => "test -f ${target}",
-       path    => ["/bin","/usr/bin/"],
-       notify  =>  Anchor['hdp-hive::jdbc-connector::end'],
-     }  
-  }
-
-
-   anchor { 'hdp-hive::jdbc-connector::end':}
-
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/metastore.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/metastore.pp b/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/metastore.pp
deleted file mode 100644
index 29bfd97..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/metastore.pp
+++ /dev/null
@@ -1,61 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hive::metastore(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits  hdp-hive::params
-{ 
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
-
-    $hdp::params::service_exists['hdp-hive::server'] = true
-
-    if ( ($service_state == 'installed_and_configured') and
-         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
-      $masterHost = $kerberos_adminclient_host[0]
-      hdp::download_keytab { 'hive_server_service_keytab' :
-        masterhost => $masterHost,
-        keytabdst => "${$keytab_path}/hive.service.keytab",
-        keytabfile => 'hive.service.keytab',
-        owner => $hdp-hive::params::hive_user
-      }
-    }
-
-    #installs package, creates user, sets configuration
-    class{ 'hdp-hive' : 
-      service_state => $service_state,
-      server        => true
-    } 
-  
-    Hdp-Hive::Configfile<||>{hive_server_host => $hdp::params::host_address}
-
-    class { 'hdp-hive::service' :
-      ensure => $service_state,
-      service_type => "metastore"
-    }
-  
-    #top level does not need anchors
-    Class['hdp-hive'] -> Class['hdp-hive::service']
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/params.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/params.pp b/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/params.pp
deleted file mode 100644
index 65f321e..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/params.pp
+++ /dev/null
@@ -1,86 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hive::params() inherits hdp::params
-{
-
-  #TODO: will move to globals
-  $hive_metastore_user_name = hdp_default("hive-site/javax.jdo.option.ConnectionUserName","dbusername")
-  $hive_metastore_user_passwd = hdp_default("hive-site/javax.jdo.option.ConnectionPassword","dbpassword")
-  $hive_server_conf_dir = hdp_default("hive_server_conf_dir", "/etc/hive/conf.server")
-  $hive_jdbc_connection_url = hdp_default("hive-site/javax.jdo.option.ConnectionURL", "")
-
-  ### users
-  $hive_user = $hdp::params::hive_user
-  
-  ### JDBC driver jar name
-  if ($hive_jdbc_driver == "com.mysql.jdbc.Driver"){
-    $jdbc_jar_name = "mysql-connector-java.jar"
-  } elsif ($hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver") {
-    $jdbc_jar_name = "ojdbc6.jar"  
-  }
-  
-  ### common
-  $hive_metastore_port = hdp_default("hive_metastore_port",9083)
-  $hive_lib = hdp_default("hive_lib","/usr/lib/hive/lib/") #TODO: should I remove and just use hive_dbroot
-  $hive_var_lib = hdp_default("hive_var_lib","/var/lib/hive")  
-  $hive_url = "jdbc:hive2://${hive_server_host}:10000"
-
-  ### hive-env
-  $hive_conf_dir = $hdp::params::hive_conf_dir
-
-  $hive_dbroot = hdp_default("hive_dbroot",$hive_lib)
-
-  $hive_log_dir = hdp_default("hive_log_dir","/var/log/hive")
-
-  $hive_pid_dir = hdp_default("hive_pid_dir","/var/run/hive")
-  $hive_pid = hdp_default("hive_pid","hive-server.pid")
-
-  
-  ### hive-site
-  $hive_database_name = hdp_default("hive-site/hive.database.name","hive")
-
-  if ($hdp::params::security_enabled == true) {
-    $hive_metastore_sasl_enabled = true
-  } else {
-    $hive_metastore_sasl_enabled = false
-  }
-
-  $keytab_path = hdp_default("keytab_path","/etc/security/keytabs")
-  $hive_metatore_keytab_path = hdp_default("hive-site/hive.metastore.kerberos.keytab.file","/etc/security/keytabs/hive.service.keytab")
-
-  #TODO: using instead hive_server_host in hdp::params 
-  #$hive_metastore_server_host = hdp_default("hive-site/hive.metastore.server.host")
-  
-  ###mysql connector
-  $download_url = $hdp::params::gpl_artifacts_download_url
-  $mysql_connector_url = "${download_url}/mysql-connector-java-5.1.18.zip"
-  $hive_aux_jars_path =  hdp_default("hive_aux_jars_path","/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar")
-
-  ##smoke test
-  $smoke_test_sql_file = 'hiveserver2.sql'
-  $smoke_test_script = 'hiveserver2Smoke.sh'
-
-  ##Starting hiveserver2
-  $start_hiveserver2_script = 'startHiveserver2.sh'
-
-  ##Starting metastore
-  $start_metastore_script = 'startMetastore.sh'
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/server.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/server.pp b/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/server.pp
deleted file mode 100644
index 7f4db1f..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/server.pp
+++ /dev/null
@@ -1,61 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hive::server(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits  hdp-hive::params
-{ 
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
-
-    $hdp::params::service_exists['hdp-hive::server'] = true
-
-    if ( ($service_state == 'installed_and_configured') and
-         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
-      $masterHost = $kerberos_adminclient_host[0]
-      hdp::download_keytab { 'hive_server_service_keytab' :
-        masterhost => $masterHost,
-        keytabdst => "${$keytab_path}/hive.service.keytab",
-        keytabfile => 'hive.service.keytab',
-        owner => $hdp-hive::params::hive_user
-      }
-    }
-
-    #installs package, creates user, sets configuration
-    class{ 'hdp-hive' : 
-      service_state => $service_state,
-      server        => true
-    } 
-  
-    Hdp-Hive::Configfile<||>{hive_server_host => $hdp::params::host_address}
-
-    class { 'hdp-hive::service' :
-      ensure => $service_state,
-      service_type => "hiveserver2"
-    }
-  
-    #top level does not need anchors
-    Class['hdp-hive'] -> Class['hdp-hive::service']
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}


[09/15] AMBARI-5747 Remove facter-1.6.10, Ruby, Puppet dependencies from pom.xml and src (dsen)

Posted by ds...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-mysql/manifests/params.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-mysql/manifests/params.pp b/ambari-agent/src/main/puppet/modules/hdp-mysql/manifests/params.pp
deleted file mode 100644
index 039c58d..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-mysql/manifests/params.pp
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-mysql::params() inherits hdp-hive::params
-{
-   $db_name = "$hdp-hive::params::hive_database_name"
-   $db_user = $hdp-hive::params::hive_metastore_user_name
-   $db_pw = $hdp-hive::params::hive_metastore_user_passwd
-   $mysql_user = hdp_default("mysql_user","mysql")
-   $mysql_group = hdp_default("mysql_group","mysql")
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-mysql/manifests/server.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-mysql/manifests/server.pp b/ambari-agent/src/main/puppet/modules/hdp-mysql/manifests/server.pp
deleted file mode 100644
index 54b480d..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-mysql/manifests/server.pp
+++ /dev/null
@@ -1,141 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-mysql::server(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits  hdp-mysql::params
-{ 
-  if ($service_state in ['no_op','uninstalled']) {
-   } elsif ($service_state in ['running','stopped','installed_and_configured']) {
-   
-    $db_user = $hdp-mysql::params::db_user
-    $db_pw = hdp_escape_spec_characters($hdp-mysql::params::db_pw)
-    $db_name = $hdp-mysql::params::db_name
-    $host = $hdp::params::hive_mysql_host 
-
-    anchor { 'hdp-mysql::server::begin':}
-
-    hdp::package { 'mysql' :
-      size   => 64,
-      require   => Anchor['hdp-mysql::server::begin']
-    }
-
-
-    if ($hdp::params::hdp_os_type == "suse") {
-      # On Suse, creating symlink from default mysqld pid file to expected /var/run location
-	  
-      hdp::directory_recursive_create {'/var/run/mysqld/':
-        require => Hdp::Package['mysql'],
-        owner => $mysql_user,
-        group => $mysql_group
-      }
-	  
-      file { '/var/run/mysqld/mysqld.pid':
-        ensure => 'link',
-        target => '/var/lib/mysql/mysqld.pid',
-        require => Hdp::Directory_recursive_create['/var/run/mysqld/'],
-      }
-    }
-
-
-    if hdp_is_empty($hdp::params::services_names[mysql]) {
-      hdp_fail("There is no service name for service mysql")
-    }
-    else {
-      $service_name_by_os = $hdp::params::services_names[mysql]
-    }
-
-    if hdp_is_empty($service_name_by_os[$hdp::params::hdp_os_type]) {
-      
-      if hdp_is_empty($service_name_by_os['ALL']) {
-        hdp_fail("There is no service name for service mysql")
-      }
-      else {
-        $service_name = $service_name_by_os['ALL']
-      }
-    }
-    else {
-      $service_name = $service_name_by_os[$hdp::params::hdp_os_type]
-    }
-
-    $mysqld_state = $service_state ? {
-     'running' => 'running',
-     'installed_and_configured' => 'running',
-      default =>  'stopped',
-    }
-
-    if ($mysqld_state == 'running') {
-      $command = "service $service_name start"
-    } else {
-      $command = "service $service_name stop"
-    }
-
-    if ($hdp::params::hdp_os_type == "suse") {
-      exec { $service_name:
-        command => $command,
-        path    => "/usr/local/bin/:/bin/:/sbin/",
-        require => File['/var/run/mysqld/mysqld.pid'],
-        notify  => File['/tmp/addMysqlUser.sh']
-       }
-     } else {
-      exec { $service_name:
-        command => $command,
-        path    => "/usr/local/bin/:/bin/:/sbin/", 
-        require => Hdp::Package['mysql'],
-        notify  => File['/tmp/addMysqlUser.sh']
-       }
-     }
-
-
-    if ($service_state == 'installed_and_configured') {
-
-      file {'/tmp/addMysqlUser.sh':
-        ensure => present,
-        source => "puppet:///modules/hdp-mysql/addMysqlUser.sh",
-        mode => '0755',
-        require => Exec[$service_name],
-        notify => Exec['/tmp/addMysqlUser.sh'],
-      }
-      # We start the DB and add a user
-      exec { '/tmp/addMysqlUser.sh':
-        command   => "bash -x /tmp/addMysqlUser.sh ${service_name} ${db_user} \"${db_pw}\" ${host}",
-        tries     => 3,
-        try_sleep => 5,
-        require   => File['/tmp/addMysqlUser.sh'],
-        path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-        notify   => Anchor['hdp-mysql::server::end'],
-        logoutput => "true",
-      }
-    } else {
-      # Now MySQL is running so we remove the temporary file
-      file {'/tmp/addMysqlUser.sh':
-        ensure => absent,
-        require => Exec[$service_name],
-        notify => Anchor['hdp-mysql::server::end'],
-      }
-    }
-
-    anchor { 'hdp-mysql::server::end':}
-
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_aggregate.php
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_aggregate.php b/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_aggregate.php
deleted file mode 100644
index f4063fb..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_aggregate.php
+++ /dev/null
@@ -1,243 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-  $options = getopt ("f:s:n:w:c:t:");
-  if (!array_key_exists('t', $options) || !array_key_exists('f', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options) || !array_key_exists('s', $options)) {
-    usage();
-    exit(3);
-  }
-  $status_file=$options['f'];
-  $status_code=$options['s'];
-  $type=$options['t'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  if ($type == "service" && !array_key_exists('n', $options)) {
-    echo "Service description not provided -n option\n";
-    exit(3);
-  }
-  if ($type == "service") {
-    $service_name=$options['n'];
-    /* echo "DESC: " . $service_name . "\n"; */
-  }
-
-  $result = array();
-  $status_file_content = file_get_contents($status_file);
-
-  $counts;
-  if ($type == "service") {
-    $counts=query_alert_count($status_file_content, $service_name, $status_code);
-  } else {
-    $counts=query_host_count($status_file_content, $status_code);
-  }
-
-  if ($counts['total'] == 0) {
-    $percent = 0;
-  } else {
-    $percent = ($counts['actual']/$counts['total'])*100;
-  }
-  if ($percent >= $crit) {
-    echo "CRITICAL: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
-    exit (2);
-  }
-  if ($percent >= $warn) {
-    echo "WARNING: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
-    exit (1);
-  }
-  echo "OK: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
-  exit(0);
-
-
-  # Functions
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -f <status_file_path> -t type(host/service) -s <status_codes> -n <service description> -w <warn%> -c <crit%>\n";
-  }
-
-  /* Query host count */
-  function query_host_count ($status_file_content, $status_code) {
-    $num_matches = preg_match_all("/hoststatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    $hostcounts_object = array ();
-    $total_hosts = 0;
-    $hosts = 0;
-    foreach ($matches[0] as $object) {
-      $total_hosts++;
-      if (getParameter($object, "current_state") == $status_code) {
-        $hosts++;
-      }
-    }
-    $hostcounts_object['total'] = $total_hosts;
-    $hostcounts_object['actual'] = $hosts;
-    return $hostcounts_object;
-  }
-
-  /* Query Alert counts */
-  function query_alert_count ($status_file_content, $service_name, $status_code) {
-    $num_matches = preg_match_all("/servicestatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    $alertcounts_objects = array ();
-    $total_alerts=0;
-    $alerts=0;
-    foreach ($matches[0] as $object) {
-      if (getParameter($object, "service_description") == $service_name) {
-        $total_alerts++;
-        if (getParameter($object, "current_state") >= $status_code) {
-          $alerts++;
-        }
-      }
-    }
-    $alertcounts_objects['total'] = $total_alerts;
-    $alertcounts_objects['actual'] = $alerts;
-    return $alertcounts_objects;
-  }
-
-  function get_service_type($service_description)
-  {
-    $pieces = explode("::", $service_description);
-    switch ($pieces[0]) {
-      case "NAMENODE":
-        $pieces[0] = "HDFS";
-        break;
-      case "JOBTRACKER":
-        $pieces[0] = "MAPREDUCE";
-        break;
-      case "HBASEMASTER":
-        $pieces[0] = "HBASE";
-        break;
-      case "SYSTEM":
-      case "HDFS":
-      case "MAPREDUCE":
-      case "HBASE":
-        break;
-      default:
-        $pieces[0] = "UNKNOWN";
-    }
-    return $pieces[0];
-  }
-
-  function getParameter($object, $key)
-  {
-    $pattern="/\s" . $key . "[\s= ]*([\S, ]*)\n/";
-    $num_mat = preg_match($pattern, $object, $matches);
-    $value = "";
-    if ($num_mat) {
-      $value = $matches[1];
-    }
-    return $value;
-  }
-
-function indent($json) {
-
-    $result      = '';
-    $pos         = 0;
-    $strLen      = strlen($json);
-    $indentStr   = '  ';
-    $newLine     = "\n";
-    $prevChar    = '';
-    $outOfQuotes = true;
-
-    for ($i=0; $i<=$strLen; $i++) {
-
-        // Grab the next character in the string.
-        $char = substr($json, $i, 1);
-
-        // Are we inside a quoted string?
-        if ($char == '"' && $prevChar != '\\') {
-            $outOfQuotes = !$outOfQuotes;
-
-        // If this character is the end of an element,
-        // output a new line and indent the next line.
-        } else if(($char == '}' || $char == ']') && $outOfQuotes) {
-            $result .= $newLine;
-            $pos --;
-            for ($j=0; $j<$pos; $j++) {
-                $result .= $indentStr;
-            }
-        }
-
-        // Add the character to the result string.
-        $result .= $char;
-
-        // If the last character was the beginning of an element,
-        // output a new line and indent the next line.
-        if (($char == ',' || $char == '{' || $char == '[') && $outOfQuotes) {
-            $result .= $newLine;
-            if ($char == '{' || $char == '[') {
-                $pos ++;
-            }
-
-            for ($j = 0; $j < $pos; $j++) {
-                $result .= $indentStr;
-            }
-        }
-
-        $prevChar = $char;
-    }
-
-    return $result;
-}
-
-/* JSON documment format */
-/*
-{
-  "programstatus":{
-    "last_command_check":"1327385743"
-  },
-  "hostcounts":{
-    "up_nodes":"",
-    "down_nodes":""
-  },
-  "hoststatus":[
-    {
-      "host_name"="ip-10-242-191-48.ec2.internal",
-      "current_state":"0",
-      "last_hard_state":"0",
-      "plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms",
-      "last_check":"1327385564",
-      "current_attempt":"1",
-      "last_hard_state_change":"1327362079",
-      "last_time_up":"1327385574",
-      "last_time_down":"0",
-      "last_time_unreachable":"0",
-      "is_flapping":"0",
-      "last_check":"1327385574",
-      "servicestatus":[
-      ]
-    }
-  ],
-  "servicestatus":[
-    {
-      "service_type":"HDFS",  {HBASE, MAPREDUCE, HIVE, ZOOKEEPER}
-      "service_description":"HDFS Current Load",
-      "host_name"="ip-10-242-191-48.ec2.internal",
-      "current_attempt":"1",
-      "current_state":"0",
-      "plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms",
-      "last_hard_state_change":"1327362079",
-      "last_time_ok":"1327385479",
-      "last_time_warning":"0",
-      "last_time_unknown":"0",
-      "last_time_critical":"0",
-      "last_check":"1327385574",
-      "is_flapping":"0"
-    }
-  ]
-}
-*/
-
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_cpu.pl
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_cpu.pl b/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_cpu.pl
deleted file mode 100644
index a5680f7..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_cpu.pl
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/usr/bin/perl -w 
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-use strict;
-use Net::SNMP;
-use Getopt::Long;
-
-# Variable
-my $base_proc = "1.3.6.1.2.1.25.3.3.1";   
-my $proc_load = "1.3.6.1.2.1.25.3.3.1.2"; 
-my $o_host = 	undef;
-my $o_community = undef;
-my $o_warn=	undef;
-my $o_crit=	undef;
-my $o_timeout = 15;
-my $o_port = 161;
-
-sub Usage {
-    print "Usage: $0 -H <host> -C <snmp_community> -w <warn level> -c <crit level>\n";
-}
-
-Getopt::Long::Configure ("bundling");
-GetOptions(
-  'H:s'   => \$o_host,	
-  'C:s'   => \$o_community,	
-  'c:s'   => \$o_crit,        
-  'w:s'   => \$o_warn
-          );
-if (!defined $o_host || !defined $o_community || !defined $o_crit || !defined $o_warn) {
-  Usage();
-  exit 3;
-}
-$o_warn =~ s/\%//g; 
-$o_crit =~ s/\%//g;
-alarm ($o_timeout);
-$SIG{'ALRM'} = sub {
- print "Unable to contact host: $o_host\n";
- exit 3;
-};
-
-# Connect to host
-my ($session,$error);
-($session, $error) = Net::SNMP->session(
-		-hostname  => $o_host,
-		-community => $o_community,
-		-port      => $o_port,
-		-timeout   => $o_timeout
-	  );
-if (!defined($session)) {
-   printf("Error opening session: %s.\n", $error);
-   exit 3;
-}
-
-my $exit_val=undef;
-my $resultat =  (Net::SNMP->VERSION < 4) ?
-	  $session->get_table($base_proc)
-	: $session->get_table(Baseoid => $base_proc);
-
-if (!defined($resultat)) {
-   printf("ERROR: Description table : %s.\n", $session->error);
-   $session->close;
-   exit 3;
-}
-
-$session->close;
-
-my ($cpu_used,$ncpu)=(0,0);
-foreach my $key ( keys %$resultat) {
-  if ($key =~ /$proc_load/) {
-    $cpu_used += $$resultat{$key};
-    $ncpu++;
-  }
-}
-
-if ($ncpu==0) {
-  print "Can't find CPU usage information : UNKNOWN\n";
-  exit 3;
-}
-
-$cpu_used /= $ncpu;
-
-print "$ncpu CPU, ", $ncpu==1 ? "load" : "average load";
-printf(" %.1f%%",$cpu_used);
-$exit_val=0;
-
-if ($cpu_used > $o_crit) {
- print " > $o_crit% : CRITICAL\n";
- $exit_val=2;
-} else {
-  if ($cpu_used > $o_warn) {
-   print " > $o_warn% : WARNING\n";
-   $exit_val=1;
-  }
-}
-print " < $o_warn% : OK\n" if ($exit_val eq 0);
-exit $exit_val;

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_datanode_storage.php
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_datanode_storage.php b/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_datanode_storage.php
deleted file mode 100644
index dee22b4..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_datanode_storage.php
+++ /dev/null
@@ -1,100 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * check the storage capacity remaining on local datanode storage
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:e:k:r:t:s:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options) 
-      || !array_key_exists('c', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['s'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-  /* Get the json document */
-  $ch = curl_init();
-  $username = rtrim(`id -un`, "\n");
-  curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=DataNode,name=FSDatasetState-*",
-                                CURLOPT_RETURNTRANSFER => true,
-                                CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                CURLOPT_USERPWD => "$username:",
-                                CURLOPT_SSL_VERIFYPEER => FALSE ));
-  $json_string = curl_exec($ch);
-  $info = curl_getinfo($ch);
-  if (intval($info['http_code']) == 401){
-    logout();
-    $json_string = curl_exec($ch);
-  }
-  $info = curl_getinfo($ch);
-  curl_close($ch);
-  $json_array = json_decode($json_string, true);
-  $object = $json_array['beans'][0];
-  $cap_remain = $object['Remaining']; /* Total capacity - any extenal files created in data directories by non-hadoop app */
-  $cap_total = $object['Capacity']; /* Capacity used by all data partitions minus space reserved for M/R */
-  if (count($object) == 0) {
-    echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-    exit(2);
-  }  
-  $percent_full = ($cap_total - $cap_remain)/$cap_total * 100;
-
-  $out_msg = "Capacity:[" . $cap_total . 
-             "], Remaining Capacity:[" . $cap_remain . 
-             "], percent_full:[" . $percent_full  . "]";
-  
-  if ($percent_full > $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($percent_full > $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_hdfs_blocks.php
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_hdfs_blocks.php b/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_hdfs_blocks.php
deleted file mode 100644
index 19347b4..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_hdfs_blocks.php
+++ /dev/null
@@ -1,115 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * check the corrupt or missing blocks % is > threshod
- * check_jmx -H hostaddress -p port -w 1% -c 1%
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:s:e:k:r:t:u:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options) || !array_key_exists('s', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $hosts=$options['h'];
-  $port=$options['p'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  $nn_jmx_property=$options['s'];
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['u'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-
-  foreach (preg_split('/,/', $hosts) as $host) {
-    /* Get the json document */
-
-    $ch = curl_init();
-    $username = rtrim(`id -un`, "\n");
-    curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=".$nn_jmx_property,
-                                  CURLOPT_RETURNTRANSFER => true,
-                                  CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                  CURLOPT_USERPWD => "$username:",
-                                  CURLOPT_SSL_VERIFYPEER => FALSE ));
-    $json_string = curl_exec($ch);
-    $info = curl_getinfo($ch);
-    if (intval($info['http_code']) == 401){
-      logout();
-      $json_string = curl_exec($ch);
-    }
-    $info = curl_getinfo($ch);
-    curl_close($ch);
-    $json_array = json_decode($json_string, true);
-    $m_percent = 0;
-    $c_percent = 0;
-    $object = $json_array['beans'][0];
-    $missing_blocks = $object['MissingBlocks'];
-    $corrupt_blocks = $object['CorruptBlocks'];
-    $total_blocks = $object['BlocksTotal'];
-    if (count($object) == 0) {
-      echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-      exit(2);
-    }    
-    if($total_blocks == 0) {
-      $m_percent = 0;
-      $c_percent = 0;
-    } else {
-      $m_percent = ($missing_blocks/$total_blocks)*100;
-      $c_percent = ($corrupt_blocks/$total_blocks)*100;
-      break;
-    }
-  }
-  $out_msg = "corrupt_blocks:<" . $corrupt_blocks .
-             ">, missing_blocks:<" . $missing_blocks .
-             ">, total_blocks:<" . $total_blocks . ">";
-
-  if ($m_percent > $crit || $c_percent > $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($m_percent > $warn || $c_percent > $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -s <namenode bean name> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_hdfs_capacity.php
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_hdfs_capacity.php b/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_hdfs_capacity.php
deleted file mode 100644
index af72723..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_hdfs_capacity.php
+++ /dev/null
@@ -1,109 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * check the % HDFS capacity used >= warn and critical limits.
- * check_jmx -H hostaddress -p port -w 1 -c 1
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:e:k:r:t:s:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $hosts=$options['h'];
-  $port=$options['p'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['s'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-
-  foreach (preg_split('/,/', $hosts) as $host) {
-    /* Get the json document */
-    $ch = curl_init();
-    $username = rtrim(`id -un`, "\n");
-    curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=FSNamesystemState",
-                                  CURLOPT_RETURNTRANSFER => true,
-                                  CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                  CURLOPT_USERPWD => "$username:",
-                                  CURLOPT_SSL_VERIFYPEER => FALSE ));
-    $json_string = curl_exec($ch);
-    $info = curl_getinfo($ch);
-    if (intval($info['http_code']) == 401){
-      logout();
-      $json_string = curl_exec($ch);
-    }
-    $info = curl_getinfo($ch);
-    curl_close($ch);
-    $json_array = json_decode($json_string, true);
-    $percent = 0;
-    $object = $json_array['beans'][0];
-    $CapacityUsed = $object['CapacityUsed'];
-    $CapacityRemaining = $object['CapacityRemaining'];
-    if (count($object) == 0) {
-      echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-      exit(2);
-    }    
-    $CapacityTotal = $CapacityUsed + $CapacityRemaining;
-    if($CapacityTotal == 0) {
-      $percent = 0;
-    } else {
-      $percent = ($CapacityUsed/$CapacityTotal)*100;
-      break;
-    }
-  }
-  $out_msg = "DFSUsedGB:<" . round ($CapacityUsed/(1024*1024*1024),1) .
-             ">, DFSTotalGB:<" . round($CapacityTotal/(1024*1024*1024),1) . ">";
-
-  if ($percent >= $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($percent >= $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_hive_metastore_status.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_hive_metastore_status.sh b/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_hive_metastore_status.sh
deleted file mode 100644
index 640c077..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_hive_metastore_status.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#The uri is of the form thrift://<hostname>:<port>
-HOST=$1
-PORT=$2
-JAVA_HOME=$3
-SEC_ENABLED=$4
-if [[ "$SEC_ENABLED" == "true" ]]; then
-  NAGIOS_KEYTAB=$5
-  NAGIOS_USER=$6
-  KINIT_PATH=$7
-  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
-  if [[ "$?" -ne 0 ]]; then
-    echo "CRITICAL: Error doing kinit for nagios [$out1]";
-    exit 2;
-  fi
-fi
-HCAT_URL=-Dhive.metastore.uris="thrift://$HOST:$PORT"
-export JAVA_HOME=$JAVA_HOME
-out=`hcat $HCAT_URL -e "show databases" 2>&1`
-if [[ "$?" -ne 0 ]]; then
-  echo "CRITICAL: Error accessing Hive Metastore status [$out]";
-  exit 2;
-fi
-echo "OK: Hive Metastore status OK";
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_hue_status.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_hue_status.sh b/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_hue_status.sh
deleted file mode 100644
index 076d9b3..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_hue_status.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-status=`/etc/init.d/hue status 2>&1`
-
-if [[ "$?" -ne 0 ]]; then
-	echo "WARNING: Hue is stopped";
-	exit 1;
-fi
-
-echo "OK: Hue is running";
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_mapred_local_dir_used.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_mapred_local_dir_used.sh b/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_mapred_local_dir_used.sh
deleted file mode 100644
index 15c85eb..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_mapred_local_dir_used.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-MAPRED_LOCAL_DIRS=$1
-CRITICAL=`echo $2 | cut -d % -f 1`
-IFS=","
-for mapred_dir in $MAPRED_LOCAL_DIRS
-do
-  percent=`df -hl $mapred_dir | awk '{percent=$5;} END{print percent}' | cut -d % -f 1`
-  if [ $percent -ge $CRITICAL ]; then
-    echo "CRITICAL: MapReduce local dir is full."
-    exit 2
-  fi
-done
-echo "OK: MapReduce local dir space is available."
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_name_dir_status.php
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_name_dir_status.php b/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_name_dir_status.php
deleted file mode 100644
index 186166d..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_name_dir_status.php
+++ /dev/null
@@ -1,93 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to namenode, get the jmx-json document
- * check the NameDirStatuses to find any offline (failed) directories
- * check_jmx -H hostaddress -p port -k keytab path -r principal name -t kinit path -s security enabled
- */
- 
-  include "hdp_nagios_init.php";
-
-  $options = getopt("h:p:e:k:r:t:s:");
-  //Check only for mandatory options
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['s'];
-  $ssl_enabled=$options['e'];
-  
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-  /* Get the json document */
-  $ch = curl_init();
-  $username = rtrim(`id -un`, "\n");
-  curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo",
-                                CURLOPT_RETURNTRANSFER => true,
-                                CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                CURLOPT_USERPWD => "$username:",
-                                CURLOPT_SSL_VERIFYPEER => FALSE ));
-  $json_string = curl_exec($ch);
-  $info = curl_getinfo($ch);
-  if (intval($info['http_code']) == 401){
-    logout();
-    $json_string = curl_exec($ch);
-  }
-  $info = curl_getinfo($ch);
-  curl_close($ch);
-  $json_array = json_decode($json_string, true);
-  $object = $json_array['beans'][0];
-  if ($object['NameDirStatuses'] == "") {
-    echo "WARNING: NameNode directory status not available via ".$protocol."://".$host.":".$port."/jmx url, code " . $info['http_code'] ."\n";
-    exit(1);
-  }
-  $NameDirStatuses = json_decode($object['NameDirStatuses'], true);
-  $failed_dir_count = count($NameDirStatuses['failed']);
-  $out_msg = "CRITICAL: Offline NameNode directories: ";
-  if ($failed_dir_count > 0) {
-    foreach ($NameDirStatuses['failed'] as $key => $value) {
-      $out_msg = $out_msg . $key . ":" . $value . ", ";
-    }
-    echo $out_msg . "\n";
-    exit (2);
-  }
-  echo "OK: All NameNode directories are active" . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled";
-  }
-?>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_namenodes_ha.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_namenodes_ha.sh b/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_namenodes_ha.sh
deleted file mode 100644
index 50b075a..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_namenodes_ha.sh
+++ /dev/null
@@ -1,82 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-IFS=',' read -a namenodes <<< "$1"
-port=$2
-totalNN=${#namenodes[@]}
-activeNN=()
-standbyNN=()
-unavailableNN=()
-
-for nn in "${namenodes[@]}"
-do
-  status=$(curl -m 5 -s http://$nn:$port/jmx?qry=Hadoop:service=NameNode,name=FSNamesystem | grep -i "tag.HAState" | grep -o -E "standby|active")
-  if [ "$status" == "active" ]; then
-    activeNN[${#activeNN[*]}]="$nn"
-  elif [ "$status" == "standby" ]; then
-    standbyNN[${#standbyNN[*]}]="$nn"
-  elif [ "$status" == "" ]; then
-    unavailableNN[${#unavailableNN[*]}]="$nn"
-  fi
-done
-
-message=""
-critical=false
-
-if [ ${#activeNN[@]} -gt 1 ]; then
-  critical=true
-  message=$message" Only one NN can have HAState=active;"
-elif [ ${#activeNN[@]} == 0 ]; then
-  critical=true
-  message=$message" No Active NN available;"
-elif [ ${#standbyNN[@]} == 0 ]; then
-  critical=true
-  message=$message" No Standby NN available;"
-fi
-
-NNstats=" Active<"
-for nn in "${activeNN[@]}"
-do
-  NNstats="$NNstats$nn;"
-done
-NNstats=${NNstats%\;}
-NNstats=$NNstats">, Standby<"
-for nn in "${standbyNN[@]}"
-do
-  NNstats="$NNstats$nn;"
-done
-NNstats=${NNstats%\;}
-NNstats=$NNstats">, Unavailable<"
-for nn in "${unavailableNN[@]}"
-do
-  NNstats="$NNstats$nn;"
-done
-NNstats=${NNstats%\;}
-NNstats=$NNstats">"
-
-if [ $critical == false ]; then
-  echo "OK: NameNode HA healthy;"$NNstats
-  exit 0
-fi
-
-echo "CRITICAL:"$message$NNstats
-exit 2

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_nodemanager_health.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_nodemanager_health.sh b/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_nodemanager_health.sh
deleted file mode 100644
index 020b41d..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_nodemanager_health.sh
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-HOST=$1
-PORT=$2
-NODEMANAGER_URL="http://$HOST:$PORT/ws/v1/node/info"
-SEC_ENABLED=$3
-export PATH="/usr/bin:$PATH"
-if [[ "$SEC_ENABLED" == "true" ]]; then
-  NAGIOS_KEYTAB=$4
-  NAGIOS_USER=$5
-  KINIT_PATH=$6
-  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
-  if [[ "$?" -ne 0 ]]; then
-    echo "CRITICAL: Error doing kinit for nagios [$out1]";
-    exit 2;
-  fi
-fi
-
-RESPONSE=`curl --negotiate -u : -s $NODEMANAGER_URL`
-if [[ "$RESPONSE" == *'"nodeHealthy":true'* ]]; then 
-  echo "OK: NodeManager healthy";
-  exit 0;
-fi
-echo "CRITICAL: NodeManager unhealthy";
-exit 2;

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_oozie_status.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_oozie_status.sh b/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_oozie_status.sh
deleted file mode 100644
index 820ee99..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_oozie_status.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-# OOZIE_URL is of the form http://<hostname>:<port>/oozie
-HOST=`echo $1 | tr '[:upper:]' '[:lower:]'`
-PORT=$2
-JAVA_HOME=$3
-SEC_ENABLED=$4
-if [[ "$SEC_ENABLED" == "true" ]]; then
-  NAGIOS_KEYTAB=$5
-  NAGIOS_USER=$6
-  KINIT_PATH=$7
-  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
-  if [[ "$?" -ne 0 ]]; then
-    echo "CRITICAL: Error doing kinit for nagios [$out1]";
-    exit 2;
-  fi
-fi
-OOZIE_URL="http://$HOST:$PORT/oozie"
-export JAVA_HOME=$JAVA_HOME
-out=`oozie admin -oozie ${OOZIE_URL} -status 2>&1`
-if [[ "$?" -ne 0 ]]; then 
-  echo "CRITICAL: Error accessing Oozie Server status [$out]";
-  exit 2;
-fi
-echo "OK: Oozie Server status [$out]";
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_rpcq_latency.php
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_rpcq_latency.php b/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_rpcq_latency.php
deleted file mode 100644
index 463f69b..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_rpcq_latency.php
+++ /dev/null
@@ -1,104 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * It checks the rpc wait time in the queue, RpcQueueTime_avg_time
- * check_rpcq_latency -h hostaddress -p port -t ServiceName -w 1 -c 1
- * Warning and Critical values are in seconds
- * Service Name = JobTracker, NameNode, JobHistoryServer
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:n:e:k:r:t:s:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options) || !array_key_exists('n', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-  $master=$options['n'];
-  $warn=$options['w'];
-  $crit=$options['c'];
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['s'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-
-  /* Get the json document */
-  $ch = curl_init();
-  $username = rtrim(`id -un`, "\n");
-  curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=".$master.",name=RpcActivityForPort*",
-                                CURLOPT_RETURNTRANSFER => true,
-                                CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                CURLOPT_USERPWD => "$username:",
-                                CURLOPT_SSL_VERIFYPEER => FALSE ));
-  $json_string = curl_exec($ch);
-  $info = curl_getinfo($ch);
-  if (intval($info['http_code']) == 401){
-    logout();
-    $json_string = curl_exec($ch);
-  }
-  $info = curl_getinfo($ch);
-  curl_close($ch);
-  $json_array = json_decode($json_string, true);
-  $object = $json_array['beans'][0];
-  if (count($object) == 0) {
-    echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-    exit(2);
-  } 
-  $RpcQueueTime_avg_time = round($object['RpcQueueTime_avg_time'], 2); 
-  $RpcProcessingTime_avg_time = round($object['RpcProcessingTime_avg_time'], 2);
-
-  $out_msg = "RpcQueueTime_avg_time:<" . $RpcQueueTime_avg_time .
-             "> Secs, RpcProcessingTime_avg_time:<" . $RpcProcessingTime_avg_time .
-             "> Secs";
-
-  if ($RpcQueueTime_avg_time >= $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($RpcQueueTime_avg_time >= $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -n <JobTracker/NameNode/JobHistoryServer> -w <warn_in_sec> -c <crit_in_sec> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_templeton_status.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_templeton_status.sh b/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_templeton_status.sh
deleted file mode 100644
index 7fbc4c4..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_templeton_status.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-# out='{"status":"ok","version":"v1"}<status_code:200>'
-HOST=$1
-PORT=$2
-VERSION=$3
-SEC_ENABLED=$4
-if [[ "$SEC_ENABLED" == "true" ]]; then 
-  NAGIOS_KEYTAB=$5
-  NAGIOS_USER=$6
-  KINIT_PATH=$7
-  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
-  if [[ "$?" -ne 0 ]]; then
-    echo "CRITICAL: Error doing kinit for nagios [$out1]";
-    exit 2;
-  fi
-fi
-regex="^.*\"status\":\"ok\".*<status_code:200>$"
-out=`curl --negotiate -u : -s -w '<status_code:%{http_code}>' http://$HOST:$PORT/templeton/$VERSION/status 2>&1`
-if [[ $out =~ $regex ]]; then
-  out=`echo "$out" | sed -e 's/{/[/g' | sed -e 's/}/]/g'` 
-  echo "OK: WebHCat Server status [$out]";
-  exit 0;
-fi
-echo "CRITICAL: Error accessing WebHCat Server, status [$out]";
-exit 2;

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_webui.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_webui.sh b/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_webui.sh
deleted file mode 100644
index b23045e..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_webui.sh
+++ /dev/null
@@ -1,87 +0,0 @@
-#!/bin/bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-checkurl () {
-  url=$1
-  curl $url -o /dev/null
-  echo $?
-}
-
-service=$1
-host=$2
-port=$3
-
-if [[ -z "$service" || -z "$host" ]]; then
-  echo "UNKNOWN: Invalid arguments; Usage: check_webui.sh service_name host_name";
-  exit 3;
-fi
-
-case "$service" in
-
-jobtracker) 
-    jtweburl="http://$host:$port"
-    if [[ `checkurl "$jtweburl"` -ne 0 ]]; then 
-      echo "WARNING: Jobtracker web UI not accessible : $jtweburl";
-      exit 1;
-    fi
-    ;;
-namenode)
-    nnweburl="http://$host:$port"
-    if [[ `checkurl "$nnweburl"` -ne 0 ]] ; then 
-      echo "WARNING: NameNode Web UI not accessible : $nnweburl";
-      exit 1;
-    fi
-    ;;
-jobhistory)
-    jhweburl="http://$host:$port/jobhistoryhome.jsp"
-    if [[ `checkurl "$jhweburl"` -ne 0 ]]; then 
-      echo "WARNING: HistoryServer Web UI not accessible : $jhweburl";
-      exit 1;
-    fi
-    ;;
-hbase)
-    hbaseweburl="http://$host:$port/master-status"
-    if [[ `checkurl "$hbaseweburl"` -ne 0 ]]; then 
-      echo "WARNING: HBase Master Web UI not accessible : $hbaseweburl";
-      exit 1;
-    fi
-    ;;
-resourcemanager)
-    rmweburl="http://$host:$port/cluster"
-    if [[ `checkurl "$rmweburl"` -ne 0 ]]; then 
-      echo "WARNING: ResourceManager Web UI not accessible : $rmweburl";
-      exit 1;
-    fi
-    ;;
-historyserver2)
-    hsweburl="http://$host:$port/jobhistory"
-    if [[ `checkurl "$hsweburl"` -ne 0 ]]; then 
-      echo "WARNING: HistoryServer Web UI not accessible : $hsweburl";
-      exit 1;
-    fi
-    ;;
-*) echo "UNKNOWN: Invalid service name [$service], valid options [jobtracker|jobhistory|hbase|namenode|resourcemanager|historyserver2]"
-   exit 3
-   ;;
-esac
-
-echo "OK: Successfully accessed $service Web UI"
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-nagios/files/hdp_nagios_init.php
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/files/hdp_nagios_init.php b/ambari-agent/src/main/puppet/modules/hdp-nagios/files/hdp_nagios_init.php
deleted file mode 100644
index 487eb43..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/files/hdp_nagios_init.php
+++ /dev/null
@@ -1,81 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* Common functions called from other alerts
- *
- */
- 
- /*
- * Function for kinit. Checks if security enabled and klist for this principal doesn't returns nothing,
- * make kinit call in this case.
- */
-  function kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name) {
-    if($security_enabled === 'true') {
-    
-      $is_logined = is_logined($principal_name);
-      
-      if (!$is_logined)
-        $status = kinit($kinit_path_local, $keytab_path, $principal_name);
-      else
-        $status = array(0, '');
-    } else {
-      $status = array(0, '');
-    }
-  
-    return $status;
-  }
-  
-  
-  /*
-  * Checks if user is logined on kerberos
-  */
-  function is_logined($principal_name) {
-    $check_cmd = "klist|grep $principal_name 1> /dev/null 2>/dev/null ; [[ $? != 0 ]] && echo 1";
-    $check_output =  shell_exec($check_cmd);
-    
-    if ($check_output)
-      return false;
-    else
-      return true;
-  }
-
-  /*
-  * Runs kinit command.
-  */
-  function kinit($kinit_path_local, $keytab_path, $principal_name) {
-    $init_cmd = "$kinit_path_local -kt $keytab_path $principal_name 2>&1";
-    $kinit_output = shell_exec($init_cmd);
-    if ($kinit_output) 
-      $status = array(1, $kinit_output);
-    else
-      $status = array(0, '');
-      
-    return $status;
-  }
-
-  function logout() {
-    if (shell_exec("rm -f /tmp/krb5cc_".trim(shell_exec('id -u'))) == "" ) 
-      $status = true;
-    else
-      $status = false;
-      
-    return $status;
-  }
- 
- ?>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-nagios/lib/puppet/parser/functions/hdp_nagios_all_hosts.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/lib/puppet/parser/functions/hdp_nagios_all_hosts.rb b/ambari-agent/src/main/puppet/modules/hdp-nagios/lib/puppet/parser/functions/hdp_nagios_all_hosts.rb
deleted file mode 100644
index 658c2ae..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/lib/puppet/parser/functions/hdp_nagios_all_hosts.rb
+++ /dev/null
@@ -1,35 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-module Puppet::Parser::Functions
-  newfunction(:hdp_nagios_all_hosts, :type => :rvalue) do 
-    hg_defs = function_hdp_template_var("hostgroup_defs")
-    ret = Array.new
-    if hg_defs.kind_of?(Hash)
-      hg_defs.each_value do |info|
-        h = function_hdp_host(info['host_member_info'])
-        unless function_hdp_is_empty(h)
-          ret += [h].flatten 
-        end
-      end
-    end
-    ret.uniq
-  end
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-nagios/lib/puppet/parser/functions/hdp_nagios_compute_target_hosts.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/lib/puppet/parser/functions/hdp_nagios_compute_target_hosts.rb b/ambari-agent/src/main/puppet/modules/hdp-nagios/lib/puppet/parser/functions/hdp_nagios_compute_target_hosts.rb
deleted file mode 100644
index 3a81d62..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/lib/puppet/parser/functions/hdp_nagios_compute_target_hosts.rb
+++ /dev/null
@@ -1,42 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-module Puppet::Parser::Functions
-  newfunction(:hdp_nagios_compute_target_hosts, :type => :rvalue) do |args|
-    args = function_hdp_args_as_array(args)
-    monitored_hosts = args[0]
-    component_name_mapping = args[1]
-    ret = Hash.new
-    monitored_hosts.each do |host_info|
-      hostname = host_info.keys.first
-      cmps = host_info.values.first
-      cmps.each do |cmp|
-        next unless host_var_info = component_name_mapping[cmp]
-        host_var = host_var_info['host_var']
-	if host_var_info['type'] == 'array'
-          (ret[host_var] ||= Array.new) << hostname
-	elsif host_var_info['type'] == 'scalar'
-	  ret[host_var] = hostname
-        end
-      end
-    end	
-    ret
-  end
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-nagios/lib/puppet/parser/functions/hdp_nagios_members_exist.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/lib/puppet/parser/functions/hdp_nagios_members_exist.rb b/ambari-agent/src/main/puppet/modules/hdp-nagios/lib/puppet/parser/functions/hdp_nagios_members_exist.rb
deleted file mode 100644
index 58fd0c2..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/lib/puppet/parser/functions/hdp_nagios_members_exist.rb
+++ /dev/null
@@ -1,34 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-module Puppet::Parser::Functions
-  newfunction(:hdp_nagios_members_exist, :type => :rvalue) do |args|
-    args = function_hdp_args_as_array(args)
-    host_type = args[0]
-    hg_defs = function_hdp_template_var("hostgroup_defs")
-    if  hg_defs.kind_of?(Hash)
-      #TODO: see if needed    Puppet::Parser::Functions.autoloader.loadall
-      member_info = (hg_defs[host_type]||{})['host_member_info']
-      member_info and not function_hdp_is_empty(function_hdp_host(member_info))
-    else
-      nil
-    end
-  end
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-nagios/lib/puppet/parser/functions/hdp_nagios_target_hosts.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/lib/puppet/parser/functions/hdp_nagios_target_hosts.rb b/ambari-agent/src/main/puppet/modules/hdp-nagios/lib/puppet/parser/functions/hdp_nagios_target_hosts.rb
deleted file mode 100644
index 5d777af..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/lib/puppet/parser/functions/hdp_nagios_target_hosts.rb
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-module Puppet::Parser::Functions
-  newfunction(:hdp_nagios_target_hosts, :type => :rvalue) do |args|
-    args = function_hdp_args_as_array(args)
-    host_types = args[0]
-#TODO: see if needed       Puppet::Parser::Functions.autoloader.loadall
-    host_types.map{|t|function_hdp_host(t)}.map{|h|function_hdp_is_empty(h) ? [] : [h].flatten}.flatten
-  end
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/init.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/init.pp b/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/init.pp
deleted file mode 100644
index 313ea90..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/init.pp
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-nagios(){}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/nagios/service_check.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/nagios/service_check.pp b/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/nagios/service_check.pp
deleted file mode 100644
index c3b57c3..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/nagios/service_check.pp
+++ /dev/null
@@ -1,36 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-nagios::nagios::service_check() 
-{
-  
-  anchor { 'hdp-nagios::nagios::service_check::begin':}
-
-  exec { 'nagios':
-    command   => "/etc/init.d/nagios status | grep 'is running'",
-    tries     => 3,
-    try_sleep => 5,
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    before      => Anchor['hdp-nagios::nagios::service_check::end'],
-    logoutput => "true"
-  }
-
-  anchor{ 'hdp-nagios::nagios::service_check::end':}
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/params.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/params.pp b/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/params.pp
deleted file mode 100644
index 0d811ba..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/params.pp
+++ /dev/null
@@ -1,98 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-nagios::params() inherits hdp::params
-{   
-  $conf_dir = hdp_default("nagios_conf_dir","/etc/nagios")
-
-  if (hdp_get_major_stack_version($hdp::params::stack_version) >= 2) {
-    $nn_metrics_property = "FSNamesystem"
-  } else {
-    $nn_metrics_property = "FSNamesystemMetrics"
-  }
-
-  if hdp_is_empty($hdp::params::services_names[httpd]) {
-    hdp_fail("There is no service name for service httpd")
-  } else {
-    $service_name_by_os = $hdp::params::services_names[httpd]
-  }
-
-  if hdp_is_empty($service_name_by_os[$hdp::params::hdp_os_type]) {
-    if hdp_is_empty($service_name_by_os['ALL']) {
-      hdp_fail("There is no service name for service httpd")
-    } else {
-      $service_name = $service_name_by_os['ALL']
-    }
-  } else {
-    $service_name = $service_name_by_os[$hdp::params::hdp_os_type]
-  }
-
-  $httpd_conf_file = "/etc/${service_name}/conf.d/nagios.conf"
-
-  $plugins_dir = "/usr/lib64/nagios/plugins"
-  $eventhandlers_dir = "/usr/lib/nagios/eventhandlers"  # Does not exist yet
-  $nagios_pid_dir = "/var/run/nagios"
-  $nagios_pid_file = "${nagios_pid_dir}/nagios.pid"
-  $nagios_log_dir = '/var/log/nagios'
-  $nagios_log_archives_dir = "${nagios_log_dir}/archives"
-  
-
-  $nagios_obj_dir = hdp_default("nagios_obj_dir","/etc/nagios/objects")
-  $nagios_var_dir = hdp_default("nagios_var_dir","/var/nagios")
-  $nagios_rw_dir = hdp_default("nagios_var_dir","/var/nagios/rw")
-  $nagios_host_cfg = hdp_default("nagios_host_cfg","${nagios_obj_dir}/hadoop-hosts.cfg")
-  $nagios_hostgroup_cfg = hdp_default("nagios_hostgroup_cfg","${nagios_obj_dir}/hadoop-hostgroups.cfg")
-  $nagios_servicegroup_cfg = hdp_default("nagios_servicegroup_cfg","${nagios_obj_dir}/hadoop-servicegroups.cfg")
-  $nagios_service_cfg = hdp_default("nagios_service_cfg","${nagios_obj_dir}/hadoop-services.cfg")
-  $nagios_command_cfg = hdp_default("nagios_command_cfg","${nagios_obj_dir}/hadoop-commands.cfg")
-  $nagios_resource_cfg = hdp_default("nagios_resource_cfg","${conf_dir}/resource.cfg")
-
-  $nagios_web_login = hdp_default("nagios_web_login","nagiosadmin")
-  $nagios_web_password = hdp_default("nagios_web_password","admin")
-  
-  $dfs_data_dir = $hdp::params::dfs_data_dir
-
-  $check_result_path = hdp_default("nagios_check_result_path","/var/nagios/spool/checkresults")
-   
-  $nagios_contact = hdp_default("nagios/nagios-contacts/nagios_contact","monitor\\@monitor.com")
-
-  $hostgroup_defs = {
-    namenode => {host_member_info => 'namenode_host'},
-    snamenode => {host_member_info => 'snamenode_host'},
-    slaves => {host_member_info => 'slave_hosts'},
-    tasktracker-servers => {host_member_info => 'mapred_tt_hosts'},
-    agent-servers => {host_member_info => 'all_hosts'},
-    nagios-server => {host_member_info => 'nagios_server_host'},
-    jobtracker  => {host_member_info => 'jtnode_host'},
-    ganglia-server => {host_member_info => 'ganglia_server_host'},
-    flume-servers => {host_member_info => 'flume_hosts'},
-    zookeeper-servers => {host_member_info => 'zookeeper_hosts'},
-    hbasemasters => {host_member_info => 'hbase_master_hosts'},
-    hiveserver => {host_member_info => 'hive_server_host'},
-    region-servers => {host_member_info => 'hbase_rs_hosts'},
-    oozie-server => {host_member_info => 'oozie_server'},
-    webhcat-server => {host_member_info => 'webhcat_server_host'},
-    hue-server => {host_member_info => 'hue_server_host'},
-    resourcemanager => {host_member_info => 'rm_host'},
-    nodemanagers => {host_member_info => 'nm_hosts'},
-    historyserver2 => {host_member_info => 'hs_host'},
-    journalnodes => {host_member_info => 'journalnode_hosts'}
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/server.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/server.pp b/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/server.pp
deleted file mode 100644
index b409ab8..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/server.pp
+++ /dev/null
@@ -1,298 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-nagios::server(
-  $service_state = $hdp::params::cluster_service_state
-) inherits hdp-nagios::params
-{
-  $nagios_var_dir = $hdp-nagios::params::nagios_var_dir
-  $nagios_rw_dir = $hdp-nagios::params::nagios_rw_dir
-  $nagios_config_dir = $hdp-nagios::params::conf_dir
-  $plugins_dir = $hdp-nagios::params::plugins_dir
-  $nagios_obj_dir = $hdp-nagios::params::nagios_obj_dir
-  $check_result_path = $hdp-nagios::params::check_result_path
-  $nagios_httpd_config_file = $hdp-nagios::params::httpd_conf_file
-  $pid_file = $hdp-nagios::params::nagios_pid_file
-
-  if hdp_is_empty($hdp::params::pathes[nagios_p1_pl]) {
-    hdp_fail("There is no path to p1.pl file for nagios")
-  }
-  else {
-    $nagios_p1_pl_by_os = $hdp::params::pathes[nagios_p1_pl]
-  }
-
-  if hdp_is_empty($nagios_p1_pl_by_os[$hdp::params::hdp_os_type]) {
-    if hdp_is_empty($nagios_p1_pl_by_os['ALL']) {
-      hdp_fail("There is no path to p1.pl file for nagios")
-    }
-      else {
-        $nagios_p1_pl = $nagios_p1_pl_by_os['ALL']
-      }
-    }
-    else {
-      $nagios_p1_pl = $nagios_p1_pl_by_os[$hdp::params::hdp_os_type]
-    }
-
-
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['uninstalled']) {
-    class { 'hdp-nagios::server::packages' : 
-      service_state => uninstalled
-    }
-
-    hdp::exec { "rm -f /var/nagios/rw/nagios.cmd" :
-      command => "rm -f /var/nagios/rw/nagios.cmd",
-      unless => "test ! -e  /var/nagios/rw/nagios.cmd"
-    }
-
-    hdp::exec { "rm -rf /tmp/hadoop-nagios" :
-      command => "rm -rf /tmp/hadoop-nagios",
-      unless => "test ! -e  /tmp/hadoop-nagios"
-    }
-
-    hdp::directory { $nagios_config_dir:
-      service_state => $service_state,
-      force => true
-    }
-
-    hdp::directory { $plugins_dir:
-      service_state => $service_state,
-      force => true
-    }
-
-    hdp::directory { $nagios_obj_dir:
-      service_state => $service_state,
-      force => true
-    }
-	
-	hdp::directory_recursive_create { nagios_pid_dir:
-      service_state => $service_state,
-      force => true
-    }
-
-    hdp::directory { $nagios_var_dir:
-      service_state => $service_state,
-      force => true
-    }
-    
-
-
-
-     Class['hdp-nagios::server::packages'] -> Exec['rm -f /var/nagios/rw/nagios.cmd'] -> Hdp::Directory[$nagios_config_dir] -> Hdp::Directory[$plugins_dir] -> Hdp::Directory[$nagios_obj_dir] ->  Hdp::Directory_recursive_create[$nagios_pid_dir] -> Hdp::Directory[$nagios_var_dir]
-
-  } elsif ($service_state in ['running','stopped','installed_and_configured']) {
-    class { 'hdp-nagios::server::packages' : service_state => $service_state}
-  
-    file{ $nagios_httpd_config_file :
-      ensure => present,
-      owner => $nagios_user,
-      group => $nagios_group,
-      content => template("hdp-nagios/nagios.conf.erb"),
-      mode   => '0644'
-    }
-
-    hdp::directory { $nagios_config_dir:
-      service_state => $service_state,
-      force => true,
-      owner => $nagios_user,
-      group => $nagios_group
-    }
-
-    hdp::directory { $plugins_dir:
-      service_state => $service_state,
-      force => true
-    }
-
-    hdp::directory { $nagios_obj_dir:
-      service_state => $service_state,
-      force => true
-    }
-
-    hdp::directory_recursive_create { $nagios_pid_dir:
-      service_state => $service_state,
-      owner => $nagios_user,
-      group => $nagios_group,
-      ensure => "directory",
-      mode => '0755',
-      force => true
-    }
-
-
-    hdp::directory_recursive_create { $nagios_var_dir:
-      service_state => $service_state,
-      force => true,
-      owner => $nagios_user,
-      group => $nagios_group
-    }
-    
-    hdp::directory_recursive_create { $check_result_path:
-      service_state => $service_state,
-      force => true,
-      owner => $nagios_user,
-      group => $nagios_group
-    }
-
-    hdp::directory_recursive_create { $nagios_rw_dir:
-      service_state => $service_state,
-      force => true,
-      owner => $nagios_user,
-      group => $nagios_group
-    }
-    
-    hdp::directory { $nagios_log_dir:
-      service_state => $service_state,
-      force => true,
-      owner => $nagios_user,
-      group => $nagios_group,
-      mode => '0755',
-      override_owner => true
-    }
-    
-    hdp::directory { $nagios_log_archives_dir:
-      service_state => $service_state,
-      force => true,
-      owner => $nagios_user,
-      group => $nagios_group,
-      mode => '0755',
-      override_owner => true
-    }
-
-    if ($service_state == 'installed_and_configured') {
-      $webserver_state = 'restart'
-    } elsif ($service_state == 'running') {
-      $webserver_state = 'restart'
-    } else {
-      # We are never stopping httpd
-      #$webserver_state = $service_state
-    }
-
-    class { 'hdp-monitor-webserver': service_state => $webserver_state}
-
-
-    class { 'hdp-nagios::server::config': 
-      notify => Class['hdp-nagios::server::services']
-    }
-
-    class { 'hdp-nagios::server::enable_snmp': }
-
-    class { 'hdp-nagios::server::web_permisssions': }
-
-    file { "$nagios_config_dir/command.cfg" :
-      owner => $nagios_user,
-      group => $nagios_group
-    }
-
-    class { 'hdp-nagios::server::services': ensure => $service_state}
-
-    anchor{'hdp-nagios::server::begin':}
-    anchor{'hdp-nagios::server::end':}
-
-    Anchor['hdp-nagios::server::begin'] -> Class['hdp-nagios::server::packages'] -> File[$nagios_httpd_config_file] -> Class['hdp-nagios::server::enable_snmp']->
-    Hdp::Directory[$nagios_config_dir] -> Hdp::Directory[$plugins_dir] -> Hdp::Directory_recursive_create[$nagios_pid_dir] ->
-    Hdp::Directory[$nagios_obj_dir] -> Hdp::Directory_Recursive_Create[$nagios_var_dir] ->
-    Hdp::Directory_Recursive_Create[$check_result_path] -> Hdp::Directory_Recursive_Create[$nagios_rw_dir] ->
-    Hdp::Directory[$nagios_log_dir] -> Hdp::Directory[$nagios_log_archives_dir] ->
-    Class['hdp-nagios::server::config'] -> Class['hdp-nagios::server::web_permisssions'] ->
-    File["$nagios_config_dir/command.cfg"] -> Class['hdp-nagios::server::services'] -> Class['hdp-monitor-webserver'] -> Anchor['hdp-nagios::server::end']
-
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
-
-class hdp-nagios::server::web_permisssions()
-{
-  $web_login = $hdp-nagios::params::nagios_web_login
-  $htpasswd_cmd_os = $hdp::params::cmds[htpasswd]#[$hdp::params::hdp_os_type]
-
-
-  if hdp_is_empty($hdp::params::cmds[htpasswd]) {
-    hdp_fail("There is no htpasswd command mapping")
-  }
-  else {
-    $htpasswd_cmd_by_os = $hdp::params::cmds[htpasswd]
-  }
-
-  if hdp_is_empty($htpasswd_cmd_by_os[$hdp::params::hdp_os_type]) {
-    if hdp_is_empty($htpasswd_cmd_by_os['ALL']) {
-      hdp_fail("There is no htpasswd command mapping")
-    }
-    else {
-      $htpasswd_cmd = $htpasswd_cmd_by_os['ALL']
-    }
-  }
-  else {
-    $htpasswd_cmd = $htpasswd_cmd_by_os[$hdp::params::hdp_os_type]
-  }
-
-  $cmd = "$htpasswd_cmd -c -b  /etc/nagios/htpasswd.users ${web_login} ${hdp-nagios::params::nagios_web_password}"
-  $test = "grep ${web_user} /etc/nagios/htpasswd.users"
-  hdp::exec { $cmd :
-    command => $cmd,
-    unless => $test
-  }
-
-  file { "/etc/nagios/htpasswd.users" :
-    owner => $hdp-nagios::params::nagios_user,
-    group => $hdp-nagios::params::nagios_group,
-    mode  => '0640'
-  }
-
-  if ($hdp::params::hdp_os_type == "suse") {
-    $command = "usermod -G $hdp-nagios::params::nagios_group wwwrun"
-  } else {
-    $command = "usermod -a -G $hdp-nagios::params::nagios_group apache"
-  }
-
-  hdp::exec { "apache_permissions_htpasswd.users" :
-    command => $command  
-  }
-
-  Hdp::Exec[$cmd] -> File["/etc/nagios/htpasswd.users"] -> Hdp::Exec["apache_permissions_htpasswd.users"]
-}
-
-class hdp-nagios::server::services($ensure)
-{
-   $pid_file = $hdp-nagios::params::nagios_pid_file
-  
-   if ($ensure == 'running') {
-     $command = "service nagios start"
-   } elsif ($ensure == 'stopped') {
-     $command = "service nagios stop && rm -f ${pid_file}"
-   }
-
-   if ($ensure in ['running','stopped']) {
-     exec { "nagios":
-       command => $command,
-       path    => "/usr/local/bin/:/bin/:/sbin/",      
-     }
-     anchor{'hdp-nagios::server::services::begin':} ->  Exec['nagios'] ->  anchor{'hdp-nagios::server::services::end':}	
-   }
-}
-
-class hdp-nagios::server::enable_snmp() {
-
-  exec { "enable_snmp":
-    command => "service snmpd start; chkconfig snmpd on",
-    path    => "/usr/local/bin/:/bin/:/sbin/",
-  }
-
-}