You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ma...@apache.org on 2014/01/18 00:40:31 UTC

[35/37] AMBARI-4341. Rename 2.0.8 to 2.1.1 in the stack definition. (mahadev)

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/startRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/startRrdcached.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/startRrdcached.sh
new file mode 100644
index 0000000..e79472b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/startRrdcached.sh
@@ -0,0 +1,69 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Slurp in all our user-customizable settings.
+source ./gangliaEnv.sh;
+
+# Get all our common constants etc. set up.
+source ./rrdcachedLib.sh;
+
+rrdcachedRunningPid=`getRrdcachedRunningPid`;
+
+# Only attempt to start rrdcached if there's not already one running.
+if [ -z "${rrdcachedRunningPid}" ]
+then
+    #changed because problem puppet had with nobody user
+    #sudo -u ${GMETAD_USER} ${RRDCACHED_BIN} -p ${RRDCACHED_PID_FILE} \
+    #         -m 664 -l unix:${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
+    #         -m 777 -P FLUSH,STATS,HELP -l unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET} \
+    #         -b /var/lib/ganglia/rrds -B
+    su - ${GMETAD_USER} -c "${RRDCACHED_BIN} -p ${RRDCACHED_PID_FILE} \
+             -m 664 -l unix:${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
+             -m 777 -P FLUSH,STATS,HELP -l unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET} \
+             -b ${RRDCACHED_BASE_DIR} -B"
+
+    # Ideally, we'd use ${RRDCACHED_BIN}'s -s ${WEBSERVER_GROUP} option for 
+    # this, but it doesn't take sometimes due to a lack of permissions,
+    # so perform the operation explicitly to be super-sure.
+    chgrp ${WEBSERVER_GROUP} ${RRDCACHED_ALL_ACCESS_UNIX_SOCKET};
+    chgrp ${WEBSERVER_GROUP} ${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET};
+
+    # Check to make sure rrdcached actually started up.
+    for i in `seq 0 5`; do
+      rrdcachedRunningPid=`getRrdcachedRunningPid`;
+      if [ -n "${rrdcachedRunningPid}" ]
+        then
+          break;
+      fi
+      sleep 1;
+    done
+
+    if [ -n "${rrdcachedRunningPid}" ]
+    then
+        echo "Started ${RRDCACHED_BIN} with PID ${rrdcachedRunningPid}";
+    else
+        echo "Failed to start ${RRDCACHED_BIN}";
+        exit 1;
+    fi
+else
+    echo "${RRDCACHED_BIN} already running with PID ${rrdcachedRunningPid}";
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/stopGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/stopGmetad.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/stopGmetad.sh
new file mode 100644
index 0000000..2764e0e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/stopGmetad.sh
@@ -0,0 +1,43 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./gmetadLib.sh;
+
+gmetadRunningPid=`getGmetadRunningPid`;
+
+# Only go ahead with the termination if we could find a running PID.
+if [ -n "${gmetadRunningPid}" ]
+then
+    kill -KILL ${gmetadRunningPid};
+    echo "Stopped ${GMETAD_BIN} (with PID ${gmetadRunningPid})";
+fi
+
+# Poll again.
+gmetadRunningPid=`getGmetadRunningPid`;
+
+# Once we've killed gmetad, there should no longer be a running PID.
+if [ -z "${gmetadRunningPid}" ]
+then
+    # It's safe to stop rrdcached now.
+    ./stopRrdcached.sh;
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/stopGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/stopGmond.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/stopGmond.sh
new file mode 100644
index 0000000..1af3eb9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/stopGmond.sh
@@ -0,0 +1,54 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
+source ./gmondLib.sh;
+
+function stopGmondForCluster()
+{
+    gmondClusterName=${1};
+
+    gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
+
+    # Only go ahead with the termination if we could find a running PID.
+    if [ -n "${gmondRunningPid}" ]
+    then
+      kill -KILL ${gmondRunningPid};
+      echo "Stopped ${GMOND_BIN} for cluster ${gmondClusterName} (with PID ${gmondRunningPid})";
+    fi
+}
+
+# main()
+gmondClusterName=${1};
+
+if [ "x" == "x${gmondClusterName}" ]
+then
+    # No ${gmondClusterName} passed in as command-line arg, so stop
+    # all the gmonds we know about.
+    for gmondClusterName in `getConfiguredGangliaClusterNames`
+    do
+        stopGmondForCluster ${gmondClusterName};
+    done
+else
+    stopGmondForCluster ${gmondClusterName};
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/stopRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/stopRrdcached.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/stopRrdcached.sh
new file mode 100644
index 0000000..0a0d8d8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/stopRrdcached.sh
@@ -0,0 +1,41 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./rrdcachedLib.sh;
+
+rrdcachedRunningPid=`getRrdcachedRunningPid`;
+
+# Only go ahead with the termination if we could find a running PID.
+if [ -n "${rrdcachedRunningPid}" ]
+then
+    kill -TERM ${rrdcachedRunningPid};
+    # ${RRDCACHED_BIN} takes a few seconds to drain its buffers, so wait 
+    # until we're sure it's well and truly dead. 
+    #
+    # Without this, an immediately following startRrdcached.sh won't do
+    # anything, because it still sees this soon-to-die instance alive,
+    # and the net result is that after a few seconds, there's no
+    # ${RRDCACHED_BIN} running on the box anymore.
+    sleep 5;
+    echo "Stopped ${RRDCACHED_BIN} (with PID ${rrdcachedRunningPid})";
+fi 

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/teardownGanglia.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/teardownGanglia.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/teardownGanglia.sh
new file mode 100644
index 0000000..b27f7a2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/teardownGanglia.sh
@@ -0,0 +1,28 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get access to Ganglia-wide constants, utilities etc.
+source ./gangliaLib.sh;
+
+# Undo what we did while setting up Ganglia on this box.
+rm -rf ${GANGLIA_CONF_DIR};
+rm -rf ${GANGLIA_RUNTIME_DIR};

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia.py
new file mode 100644
index 0000000..1eae6d0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia.py
@@ -0,0 +1,106 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management import *
+import os
+
+
+def groups_and_users():
+  import params
+
+  Group(params.user_group)
+  Group(params.gmetad_user)
+  Group(params.gmond_user)
+  User(params.gmond_user,
+       groups=[params.gmond_user])
+  User(params.gmetad_user,
+       groups=[params.gmetad_user])
+
+
+def config():
+  import params
+
+  shell_cmds_dir = params.ganglia_shell_cmds_dir
+  shell_files = ['checkGmond.sh', 'checkRrdcached.sh', 'gmetadLib.sh',
+                 'gmondLib.sh', 'rrdcachedLib.sh',
+                 'setupGanglia.sh', 'startGmetad.sh', 'startGmond.sh',
+                 'startRrdcached.sh', 'stopGmetad.sh',
+                 'stopGmond.sh', 'stopRrdcached.sh', 'teardownGanglia.sh']
+  Directory(shell_cmds_dir,
+            owner="root",
+            group="root",
+            recursive=True
+  )
+  init_file("gmetad")
+  init_file("gmond")
+  for sh_file in shell_files:
+    shell_file(sh_file)
+  for conf_file in ['gangliaClusters.conf', 'gangliaEnv.sh', 'gangliaLib.sh']:
+    ganglia_TemplateConfig(conf_file)
+
+
+def init_file(name):
+  import params
+
+  File("/etc/init.d/hdp-" + name,
+       content=StaticFile(name + ".init"),
+       mode=0755
+  )
+
+
+def shell_file(name):
+  import params
+
+  File(params.ganglia_shell_cmds_dir + os.sep + name,
+       content=StaticFile(name),
+       mode=0755
+  )
+
+
+def ganglia_TemplateConfig(name, mode=755, tag=None):
+  import params
+
+  TemplateConfig(format("{params.ganglia_shell_cmds_dir}/{name}"),
+                 owner="root",
+                 group="root",
+                 template_tag=tag,
+                 mode=mode
+  )
+
+
+def generate_daemon(ganglia_service,
+                    name=None,
+                    role=None,
+                    owner=None,
+                    group=None):
+  import params
+
+  cmd = ""
+  if ganglia_service == "gmond":
+    if role == "server":
+      cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -c {name} -m -o {owner} -g {group}"
+    else:
+      cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -c {name} -o {owner} -g {group}"
+  elif ganglia_service == "gmetad":
+    cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -t -o {owner} -g {group}"
+  else:
+    raise Fail("Unexpected ganglia service")
+  Execute(format(cmd),
+          path=[params.ganglia_shell_cmds_dir, "/usr/sbin",
+                "/sbin:/usr/local/bin", "/bin", "/usr/bin"]
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_monitor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_monitor.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_monitor.py
new file mode 100644
index 0000000..bddecf6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_monitor.py
@@ -0,0 +1,163 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import sys
+import os
+from os import path
+from resource_management import *
+from ganglia import generate_daemon
+import ganglia
+import ganglia_monitor_service
+
+
+class GangliaMonitor(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+    self.config(env)
+
+  def start(self, env):
+    ganglia_monitor_service.monitor("start")
+
+  def stop(self, env):
+    ganglia_monitor_service.monitor("stop")
+
+
+  def status(self, env):
+    import status_params
+    pid_file_name = 'gmond.pid'
+    pid_file_count = 0
+    pid_dir = status_params.pid_dir
+    # Recursively check all existing gmond pid files
+    for cur_dir, subdirs, files in os.walk(pid_dir):
+      for file_name in files:
+        if file_name == pid_file_name:
+          pid_file = os.path.join(cur_dir, file_name)
+          check_process_status(pid_file)
+          pid_file_count += 1
+    if pid_file_count == 0: # If no any pid file is present
+      raise ComponentIsNotRunning()
+
+
+  def config(self, env):
+    import params
+
+    ganglia.groups_and_users()
+
+    Directory(params.ganglia_conf_dir,
+              owner="root",
+              group=params.user_group,
+              recursive=True
+    )
+
+    ganglia.config()
+
+    if params.is_namenode_master:
+      generate_daemon("gmond",
+                      name = "HDPNameNode",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_jtnode_master:
+      generate_daemon("gmond",
+                      name = "HDPJobTracker",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_rmnode_master:
+      generate_daemon("gmond",
+                      name = "HDPResourceManager",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_hsnode_master:
+      generate_daemon("gmond",
+                      name = "HDPHistoryServer",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_hbase_master:
+      generate_daemon("gmond",
+                      name = "HDPHBaseMaster",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_hsnode_master:
+      generate_daemon("gmond",
+                      name = "HDPHistoryServer",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_slave:
+      generate_daemon("gmond",
+                      name = "HDPDataNode",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_tasktracker:
+      generate_daemon("gmond",
+                      name = "HDPTaskTracker",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_hbase_rs:
+      generate_daemon("gmond",
+                      name = "HDPHBaseRegionServer",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_flume:
+      generate_daemon("gmond",
+                      name = "HDPFlumeServer",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+
+    Directory(path.join(params.ganglia_dir, "conf.d"),
+              owner="root",
+              group=params.user_group
+    )
+
+    File(path.join(params.ganglia_dir, "conf.d/modgstatus.conf"),
+         owner="root",
+         group=params.user_group
+    )
+    File(path.join(params.ganglia_dir, "conf.d/multicpu.conf"),
+         owner="root",
+         group=params.user_group
+    )
+    File(path.join(params.ganglia_dir, "gmond.conf"),
+         owner="root",
+         group=params.user_group
+    )
+
+
+if __name__ == "__main__":
+  GangliaMonitor().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_monitor_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_monitor_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_monitor_service.py
new file mode 100644
index 0000000..d86d894
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_monitor_service.py
@@ -0,0 +1,31 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management import *
+
+
+def monitor(action=None):# 'start' or 'stop'
+  if action == "start":
+    Execute("chkconfig gmond off",
+            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+    )
+  Execute(
+    format(
+      "service hdp-gmond {action} >> /tmp/gmond.log  2>&1 ; /bin/ps auwx | /bin/grep [g]mond  >> /tmp/gmond.log  2>&1"),
+    path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_server.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_server.py
new file mode 100644
index 0000000..e391562
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_server.py
@@ -0,0 +1,181 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import sys
+import os
+from os import path
+from resource_management import *
+from ganglia import generate_daemon
+import ganglia
+import ganglia_server_service
+
+
+class GangliaServer(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+    self.config(env)
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+    ganglia_server_service.server("start")
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+    ganglia_server_service.server("stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{pid_dir}/gmetad.pid")
+    # Recursively check all existing gmetad pid files
+    check_process_status(pid_file)
+
+  def config(self, env):
+    import params
+
+    ganglia.groups_and_users()
+    ganglia.config()
+
+    if params.has_namenodes:
+      generate_daemon("gmond",
+                      name = "HDPNameNode",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_jobtracker:
+      generate_daemon("gmond",
+                      name = "HDPJobTracker",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_hbase_masters:
+      generate_daemon("gmond",
+                      name = "HDPHBaseMaster",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_resourcemanager:
+      generate_daemon("gmond",
+                      name = "HDPResourceManager",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+    if params.has_historyserver:
+      generate_daemon("gmond",
+                      name = "HDPHistoryServer",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_slaves:
+      generate_daemon("gmond",
+                      name = "HDPDataNode",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_tasktracker:
+      generate_daemon("gmond",
+                      name = "HDPTaskTracker",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_hbase_rs:
+      generate_daemon("gmond",
+                      name = "HDPHBaseRegionServer",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_flume:
+      generate_daemon("gmond",
+                      name = "HDPFlumeServer",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+    generate_daemon("gmetad",
+                    name = "gmetad",
+                    role = "server",
+                    owner = "root",
+                    group = params.user_group)
+
+    change_permission()
+    server_files()
+    File(path.join(params.ganglia_dir, "gmetad.conf"),
+         owner="root",
+         group=params.user_group
+    )
+
+
+def change_permission():
+  import params
+
+  Directory('/var/lib/ganglia/dwoo',
+            mode=0777,
+            owner=params.gmetad_user,
+            recursive=True
+  )
+
+
+def server_files():
+  import params
+
+  rrd_py_path = params.rrd_py_path
+  Directory(rrd_py_path,
+            recursive=True
+  )
+  rrd_py_file_path = path.join(rrd_py_path, "rrd.py")
+  File(rrd_py_file_path,
+       content=StaticFile("rrd.py"),
+       mode=0755
+  )
+  rrd_file_owner = params.gmetad_user
+  if params.rrdcached_default_base_dir != params.rrdcached_base_dir:
+    Directory(params.rrdcached_base_dir,
+              owner=rrd_file_owner,
+              group=rrd_file_owner,
+              mode=0755,
+              recursive=True
+    )
+    Directory(params.rrdcached_default_base_dir,
+              action = "delete"
+    )
+    Link(params.rrdcached_default_base_dir,
+         to=params.rrdcached_base_dir
+    )
+  elif rrd_file_owner != 'nobody':
+    Directory(params.rrdcached_default_base_dir,
+              owner=rrd_file_owner,
+              group=rrd_file_owner,
+              recursive=True
+    )
+
+
+if __name__ == "__main__":
+  GangliaServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_server_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_server_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_server_service.py
new file mode 100644
index 0000000..b93e3f8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_server_service.py
@@ -0,0 +1,27 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management import *
+
+
+def server(action=None):# 'start' or 'stop'
+  command = "service hdp-gmetad {action} >> /tmp/gmetad.log  2>&1 ; /bin/ps auwx | /bin/grep [g]metad  >> /tmp/gmetad.log  2>&1"
+  Execute(format(command),
+          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
+  )
+  MonitorWebserver("restart")

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/params.py
new file mode 100644
index 0000000..3700d0a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/params.py
@@ -0,0 +1,74 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management import *
+from resource_management.core.system import System
+
+config = Script.get_config()
+
+user_group = config['configurations']['global']["user_group"]
+ganglia_conf_dir = config['configurations']['global']["ganglia_conf_dir"]
+ganglia_dir = "/etc/ganglia"
+ganglia_runtime_dir = config['configurations']['global']["ganglia_runtime_dir"]
+ganglia_shell_cmds_dir = "/usr/libexec/hdp/ganglia"
+
+gmetad_user = config['configurations']['global']["gmetad_user"]
+gmond_user = config['configurations']['global']["gmond_user"]
+
+webserver_group = "apache"
+rrdcached_default_base_dir = "/var/lib/ganglia/rrds"
+rrdcached_base_dir = config['configurations']['global']["rrdcached_base_dir"]
+
+ganglia_server_host = config["clusterHostInfo"]["ganglia_server_host"][0]
+
+hostname = config["hostname"]
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+jtnode_host = default("/clusterHostInfo/jtnode_host", [])
+rm_host = default("/clusterHostInfo/rm_host", [])
+hs_host = default("/clusterHostInfo/hs_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+# datanodes are marked as slave_hosts
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+tt_hosts = default("/clusterHostInfo/mapred_tt_hosts", [])
+hbase_rs_hosts = default("/clusterHostInfo/hbase_rs_hosts", [])
+flume_hosts = default("/clusterHostInfo/flume_hosts", [])
+
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+is_tasktracker = hostname in tt_hosts
+is_hbase_rs = hostname in hbase_rs_hosts
+is_flume = hostname in flume_hosts
+
+has_namenodes = not len(namenode_host) == 0
+has_jobtracker = not len(jtnode_host) == 0
+has_resourcemanager = not len(rm_host) == 0
+has_historyserver = not len(hs_host) == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_slaves = not len(slave_hosts) == 0
+has_tasktracker = not len(tt_hosts) == 0
+has_hbase_rs = not len(hbase_rs_hosts) == 0
+has_flume = not len(flume_hosts) == 0
+
+if System.get_instance().platform == "suse":
+  rrd_py_path = '/srv/www/cgi-bin'
+else:
+  rrd_py_path = '/var/www/cgi-bin'

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/status_params.py
new file mode 100644
index 0000000..3ccad2f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/status_params.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+pid_dir = config['configurations']['global']['ganglia_runtime_dir']

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/templates/gangliaClusters.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/templates/gangliaClusters.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/templates/gangliaClusters.conf.j2
new file mode 100644
index 0000000..23588a5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/templates/gangliaClusters.conf.j2
@@ -0,0 +1,34 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+#########################################################
+### ClusterName           GmondMasterHost   GmondPort ###
+#########################################################
+
+    HDPJournalNode      {{ganglia_server_host}}   8654
+    HDPFlumeServer      {{ganglia_server_host}}   8655
+    HDPHBaseRegionServer       	{{ganglia_server_host}}   8656
+    HDPNodeManager     	{{ganglia_server_host}}   8657
+    HDPTaskTracker     	{{ganglia_server_host}}   8658
+    HDPDataNode       	{{ganglia_server_host}}   8659
+    HDPSlaves       	{{ganglia_server_host}}   8660
+    HDPNameNode         {{ganglia_server_host}}   8661
+    HDPJobTracker     	{{ganglia_server_host}}  8662
+    HDPHBaseMaster      {{ganglia_server_host}}   8663
+    HDPResourceManager  {{ganglia_server_host}}   8664
+    HDPHistoryServer    {{ganglia_server_host}}   8666

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/templates/gangliaEnv.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/templates/gangliaEnv.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/templates/gangliaEnv.sh.j2
new file mode 100644
index 0000000..1ead550
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/templates/gangliaEnv.sh.j2
@@ -0,0 +1,24 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Unix users and groups for the binaries we start up.
+GMETAD_USER={{gmetad_user}};
+GMOND_USER={{gmond_user}};
+WEBSERVER_GROUP={{webserver_group}};

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/templates/gangliaLib.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/templates/gangliaLib.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/templates/gangliaLib.sh.j2
new file mode 100644
index 0000000..4b5bdd1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/templates/gangliaLib.sh.j2
@@ -0,0 +1,62 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+GANGLIA_CONF_DIR={{ganglia_conf_dir}};
+GANGLIA_RUNTIME_DIR={{ganglia_runtime_dir}};
+RRDCACHED_BASE_DIR={{rrdcached_base_dir}};
+
+# This file contains all the info about each Ganglia Cluster in our Grid.
+GANGLIA_CLUSTERS_CONF_FILE=./gangliaClusters.conf;
+
+function createDirectory()
+{
+    directoryPath=${1};
+
+    if [ "x" != "x${directoryPath}" ]
+    then
+        mkdir -p ${directoryPath};
+    fi
+}
+
+function getGangliaClusterInfo()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # Fetch the particular entry for ${clusterName} from ${GANGLIA_CLUSTERS_CONF_FILE}.
+        awk -v clusterName=${clusterName} '($1 !~ /^#/) && ($1 == clusterName)' ${GANGLIA_CLUSTERS_CONF_FILE};
+    else
+        # Spit out all the non-comment, non-empty lines from ${GANGLIA_CLUSTERS_CONF_FILE}.
+        awk '($1 !~ /^#/) && (NF)' ${GANGLIA_CLUSTERS_CONF_FILE};
+    fi
+}
+
+function getConfiguredGangliaClusterNames()
+{
+  # Find all the subdirectories in ${GANGLIA_CONF_DIR} and extract only 
+  # the subdirectory name from each.
+  if [ -e ${GANGLIA_CONF_DIR} ]
+  then  
+    find ${GANGLIA_CONF_DIR} -maxdepth 1 -mindepth 1 -type d | xargs -n1 basename;
+  fi
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/files/hbaseSmokeVerify.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/files/hbaseSmokeVerify.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/files/hbaseSmokeVerify.sh
new file mode 100644
index 0000000..39fe6e5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/files/hbaseSmokeVerify.sh
@@ -0,0 +1,32 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+conf_dir=$1
+data=$2
+echo "scan 'ambarismoketest'" | hbase --config $conf_dir shell > /tmp/hbase_chk_verify
+cat /tmp/hbase_chk_verify
+echo "Looking for $data"
+grep -q $data /tmp/hbase_chk_verify
+if [ "$?" -ne 0 ]
+then
+  exit 1
+fi
+
+grep -q '1 row(s)' /tmp/hbase_chk_verify
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/__init__.py
new file mode 100644
index 0000000..5561e10
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/functions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/functions.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/functions.py
new file mode 100644
index 0000000..80b49e6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/functions.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import re
+import math
+import datetime
+
+from resource_management.core.shell import checked_call
+
+def calc_xmn_from_xms(heapsize_str, xmn_percent, xmn_max):
+  """
+  @param heapsize_str: str (e.g '1000m')
+  @param xmn_percent: float (e.g 0.2)
+  @param xmn_max: integer (e.g 512)
+  """
+  heapsize = int(re.search('\d+',heapsize_str).group(0))
+  heapsize_unit = re.search('\D+',heapsize_str).group(0)
+  xmn_val = int(math.floor(heapsize*xmn_percent))
+  xmn_val -= xmn_val % 8
+  
+  result_xmn_val = xmn_max if xmn_val > xmn_max else xmn_val
+  return str(result_xmn_val) + heapsize_unit
+
+def get_unique_id_and_date():
+    code, out = checked_call("hostid")
+    id = out.strip()
+    
+    now = datetime.datetime.now()
+    date = now.strftime("%M%d%y")
+
+    return "id{id}_date{date}".format(id=id, date=date)
+  
+def get_kinit_path(pathes_list):
+  """
+  @param pathes: comma separated list
+  """
+  kinit_path = ""
+  
+  for x in pathes_list:
+    if not x:
+      continue
+    
+    path = os.path.join(x,"kinit")
+
+    if os.path.isfile(path):
+      kinit_path = path
+      break
+    
+  return kinit_path

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase.py
new file mode 100644
index 0000000..bd33463
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase.py
@@ -0,0 +1,91 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import sys
+
+def hbase(type=None # 'master' or 'regionserver' or 'client'
+              ):
+  import params
+  
+  Directory( params.conf_dir,
+      owner = params.hbase_user,
+      group = params.user_group,
+      recursive = True
+  )
+  
+  XmlConfig( "hbase-site.xml",
+            conf_dir = params.conf_dir,
+            configurations = params.config['configurations']['hbase-site'],
+            owner = params.hbase_user,
+            group = params.user_group
+  )
+
+  XmlConfig( "hdfs-site.xml",
+            conf_dir = params.conf_dir,
+            configurations = params.config['configurations']['hdfs-site'],
+            owner = params.hbase_user,
+            group = params.user_group
+  )
+  
+  if 'hbase-policy' in params.config['configurations']:
+    XmlConfig( "hbase-policy.xml",
+      configurations = params.config['configurations']['hbase-policy'],
+      owner = params.hbase_user,
+      group = params.user_group
+    )
+  # Manually overriding ownership of file installed by hadoop package
+  else: 
+    File( format("{conf_dir}/hbase-policy.xml"),
+      owner = params.hbase_user,
+      group = params.user_group
+    )
+  
+  hbase_TemplateConfig( 'hbase-env.sh')     
+       
+  hbase_TemplateConfig( params.metric_prop_file_name,
+    tag = 'GANGLIA-MASTER' if type == 'master' else 'GANGLIA-RS'
+  )
+
+  hbase_TemplateConfig( 'regionservers')
+
+  if params.security_enabled:
+    hbase_TemplateConfig( format("hbase_{type}_jaas.conf"))
+  
+  if type != "client":
+    Directory( params.pid_dir,
+      owner = params.hbase_user,
+      recursive = True
+    )
+  
+    Directory ( [params.tmp_dir, params.log_dir],
+      owner = params.hbase_user,
+      recursive = True
+    )    
+
+def hbase_TemplateConfig(name, 
+                         tag=None
+                         ):
+  import params
+
+  TemplateConfig( format("{conf_dir}/{name}"),
+      owner = params.hbase_user,
+      template_tag = tag
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_client.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_client.py
new file mode 100644
index 0000000..0f2a1bc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_client.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hbase import hbase
+
+         
+class HbaseClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    
+    hbase(type='client')
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+#for tests
+def main():
+  command_type = 'install'
+  command_data_file = '/root/workspace/HBase/input.json'
+  basedir = '/root/workspace/HBase/'
+  stdoutfile = '/1.txt'
+  sys.argv = ["", command_type, command_data_file, basedir, stdoutfile]
+  
+  HbaseClient().execute()
+  
+if __name__ == "__main__":
+  HbaseClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_master.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_master.py
new file mode 100644
index 0000000..d94b4b4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_master.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hbase import hbase
+from hbase_service import hbase_service
+
+         
+class HbaseMaster(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hbase(type='master')
+    
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # for security
+
+    hbase_service( 'master',
+      action = 'start'
+    )
+    
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    hbase_service( 'master',
+      action = 'stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{pid_dir}/hbase-hbase-master.pid")
+    check_process_status(pid_file)
+
+def main():
+  command_type = sys.argv[1] if len(sys.argv)>1 else "install"
+  print "Running "+command_type
+  command_data_file = '/var/lib/ambari-agent/data/command-3.json'
+  basedir = '/root/ambari/ambari-server/src/main/resources/stacks/HDP/2.0._/services/HBASE/package'
+  stroutputf = '/1.txt'
+  sys.argv = ["", command_type, command_data_file, basedir, stroutputf]
+  
+  HbaseMaster().execute()
+  
+if __name__ == "__main__":
+  HbaseMaster().execute()
+  #main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_regionserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_regionserver.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_regionserver.py
new file mode 100644
index 0000000..2d91e75
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_regionserver.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hbase import hbase
+from hbase_service import hbase_service
+
+         
+class HbaseRegionServer(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hbase(type='regionserver')
+      
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # for security
+
+    hbase_service( 'regionserver',
+      action = 'start'
+    )
+    
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    hbase_service( 'regionserver',
+      action = 'stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{pid_dir}/hbase-hbase-regionserver.pid")
+    check_process_status(pid_file)
+    
+  def decommission(self, env):
+    print "Decommission not yet implemented!"
+    
+def main():
+  command_type = sys.argv[1] if len(sys.argv)>1 else "stop"
+  print "Running "+command_type
+  command_data_file = '/root/workspace/HBase/input.json'
+  basedir = '/root/workspace/HBase/main'
+  sys.argv = ["", command_type, command_data_file, basedir]
+  
+  HbaseRegionServer().execute()
+  
+if __name__ == "__main__":
+  HbaseRegionServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_service.py
new file mode 100644
index 0000000..7a1248b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_service.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+def hbase_service(
+  name,
+  action = 'start'): # 'start' or 'stop' or 'status'
+    
+    import params
+  
+    role = name
+    cmd = format("{daemon_script} --config {conf_dir}")
+    pid_file = format("{pid_dir}/hbase-hbase-{role}.pid")
+    
+    daemon_cmd = None
+    no_op_test = None
+    
+    if action == 'start':
+      daemon_cmd = format("{cmd} start {role}")
+      no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+    elif action == 'stop':
+      daemon_cmd = format("{cmd} stop {role} && rm -f {pid_file}")
+
+    if daemon_cmd is not None:
+      Execute ( daemon_cmd,
+        not_if = no_op_test,
+        user = params.hbase_user
+      )

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/params.py
new file mode 100644
index 0000000..95880cb
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/params.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import functions
+import status_params
+
+# server configurations
+config = Script.get_config()
+
+conf_dir = "/etc/hbase/conf"
+daemon_script = "/usr/lib/hbase/bin/hbase-daemon.sh"
+
+hbase_user = config['configurations']['global']['hbase_user']
+smokeuser = config['configurations']['global']['smokeuser']
+security_enabled = config['configurations']['global']['security_enabled']
+user_group = config['configurations']['global']['user_group']
+
+# this is "hadoop-metrics2-hbase.properties" for 2.x stacks
+metric_prop_file_name = "hadoop-metrics.properties" 
+
+# not supporting 32 bit jdk.
+java64_home = config['hostLevelParams']['java_home']
+
+log_dir = config['configurations']['global']['hbase_log_dir']
+master_heapsize = config['configurations']['global']['hbase_master_heapsize']
+
+regionserver_heapsize = config['configurations']['global']['hbase_regionserver_heapsize']
+regionserver_xmn_size = functions.calc_xmn_from_xms(regionserver_heapsize, 0.2, 512)
+
+pid_dir = status_params.pid_dir
+tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
+
+client_jaas_config_file = default('hbase_client_jaas_config_file', format("{conf_dir}/hbase_client_jaas.conf"))
+master_jaas_config_file = default('hbase_master_jaas_config_file', format("{conf_dir}/hbase_master_jaas.conf"))
+regionserver_jaas_config_file = default('hbase_regionserver_jaas_config_file', format("{conf_dir}/hbase_regionserver_jaas.conf"))
+
+ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_host', []) # is not passed when ganglia is not present
+ganglia_server_host = '' if len(ganglia_server_hosts) == 0 else ganglia_server_hosts[0]
+
+rs_hosts = default('hbase_rs_hosts', config['clusterHostInfo']['slave_hosts']) #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
+
+smoke_test_user = config['configurations']['global']['smokeuser']
+smokeuser_permissions = default('smokeuser_permissions', "RWXCA")
+service_check_data = get_unique_id_and_date()
+
+if security_enabled:
+  
+  _use_hostname_in_principal = default('instance_name', True)
+  _master_primary_name = config['configurations']['global']['hbase_master_primary_name']
+  _hostname = config['hostname']
+  _kerberos_domain = config['configurations']['global']['kerberos_domain']
+  _master_principal_name = config['configurations']['global']['hbase_master_principal_name']
+  _regionserver_primary_name = config['configurations']['global']['hbase_regionserver_primary_name']
+  
+  if _use_hostname_in_principal:
+    master_jaas_princ = format("{_master_primary_name}/{_hostname}@{_kerberos_domain}")
+    regionserver_jaas_princ = format("{_regionserver_primary_name}/{_hostname}@{_kerberos_domain}")
+  else:
+    master_jaas_princ = format("{_master_principal_name}@{_kerberos_domain}")
+    regionserver_jaas_princ = format("{_regionserver_primary_name}@{_kerberos_domain}")
+    
+master_keytab_path = config['configurations']['hbase-site']['hbase.master.keytab.file']
+regionserver_keytab_path = config['configurations']['hbase-site']['hbase.regionserver.keytab.file']
+smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+hbase_user_keytab = config['configurations']['global']['hbase_user_keytab']
+kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/service_check.py
new file mode 100644
index 0000000..ff6d0ed
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/service_check.py
@@ -0,0 +1,89 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import functions
+
+
+class HbaseServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    
+    output_file = "/apps/hbase/data/ambarismoketest"
+    test_cmd = format("fs -test -e {output_file}")
+    kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smoke_test_user};") if params.security_enabled else ""
+    hbase_servicecheck_file = '/tmp/hbase-smoke.sh'
+  
+    File( '/tmp/hbaseSmokeVerify.sh',
+      content = StaticFile("hbaseSmokeVerify.sh"),
+      mode = 0755
+    )
+  
+    File( hbase_servicecheck_file,
+      mode = 0755,
+      content = Template('hbase-smoke.sh.j2')
+    )
+    
+    if params.security_enabled:    
+      hbase_grant_premissions_file = '/tmp/hbase_grant_permissions.sh'
+      hbase_kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} {hbase_user};")
+      grantprivelegecmd = format("{hbase_kinit_cmd} hbase shell {hbase_grant_premissions_file}")
+  
+      File( hbase_grant_premissions_file,
+        owner   = params.hbase_user,
+        group   = params.user_group,
+        mode    = 0644,
+        content = Template('hbase_grant_permissions.j2')
+      )
+      
+      Execute( grantprivelegecmd,
+        user = params.hbase_user,
+      )
+
+    servicecheckcmd = format("{kinit_cmd} hbase --config {conf_dir} shell {hbase_servicecheck_file}")
+    smokeverifycmd = format("{kinit_cmd} /tmp/hbaseSmokeVerify.sh {conf_dir} {service_check_data}")
+  
+    Execute( servicecheckcmd,
+      tries     = 3,
+      try_sleep = 5,
+      user = params.smoke_test_user,
+      logoutput = True
+    )
+  
+    Execute ( smokeverifycmd,
+      tries     = 3,
+      try_sleep = 5,
+      user = params.smoke_test_user,
+      logoutput = True
+    )
+    
+def main():
+  import sys
+  command_type = 'perform'
+  command_data_file = '/root/workspace/HBase/input.json'
+  basedir = '/root/workspace/HBase/main'
+  sys.argv = ["", command_type, command_data_file, basedir]
+  
+  HbaseServiceCheck().execute()
+  
+if __name__ == "__main__":
+  HbaseServiceCheck().execute()
+  

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/status_params.py
new file mode 100644
index 0000000..c9b20ef
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/status_params.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+pid_dir = config['configurations']['global']['hbase_pid_dir']

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-MASTER.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-MASTER.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-MASTER.j2
new file mode 100644
index 0000000..1c75d15
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-MASTER.j2
@@ -0,0 +1,50 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See http://wiki.apache.org/hadoop/GangliaMetrics
+#
+# Make sure you know whether you are using ganglia 3.0 or 3.1.
+# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
+# And, yes, this file is named hadoop-metrics.properties rather than
+# hbase-metrics.properties because we're leveraging the hadoop metrics
+# package and hadoop-metrics.properties is an hardcoded-name, at least
+# for the moment.
+#
+# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
+
+# HBase-specific configuration to reset long-running stats (e.g. compactions)
+# If this variable is left out, then the default is no expiration.
+hbase.extendedperiod = 3600
+
+# Configuration of the "hbase" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+hbase.period=10
+hbase.servers={{ganglia_server_host}}:8663
+
+# Configuration of the "jvm" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+jvm.period=10
+jvm.servers={{ganglia_server_host}}:8663
+
+# Configuration of the "rpc" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+rpc.period=10
+rpc.servers={{ganglia_server_host}}:8663

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-RS.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-RS.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-RS.j2
new file mode 100644
index 0000000..e971e13
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-RS.j2
@@ -0,0 +1,50 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See http://wiki.apache.org/hadoop/GangliaMetrics
+#
+# Make sure you know whether you are using ganglia 3.0 or 3.1.
+# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
+# And, yes, this file is named hadoop-metrics.properties rather than
+# hbase-metrics.properties because we're leveraging the hadoop metrics
+# package and hadoop-metrics.properties is an hardcoded-name, at least
+# for the moment.
+#
+# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
+
+# HBase-specific configuration to reset long-running stats (e.g. compactions)
+# If this variable is left out, then the default is no expiration.
+hbase.extendedperiod = 3600
+
+# Configuration of the "hbase" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+hbase.period=10
+hbase.servers={{ganglia_server_host}}:8656
+
+# Configuration of the "jvm" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+jvm.period=10
+jvm.servers={{ganglia_server_host}}:8656
+
+# Configuration of the "rpc" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+rpc.period=10
+rpc.servers={{ganglia_server_host}}:8656

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties.j2
new file mode 100644
index 0000000..1c75d15
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties.j2
@@ -0,0 +1,50 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See http://wiki.apache.org/hadoop/GangliaMetrics
+#
+# Make sure you know whether you are using ganglia 3.0 or 3.1.
+# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
+# And, yes, this file is named hadoop-metrics.properties rather than
+# hbase-metrics.properties because we're leveraging the hadoop metrics
+# package and hadoop-metrics.properties is an hardcoded-name, at least
+# for the moment.
+#
+# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
+
+# HBase-specific configuration to reset long-running stats (e.g. compactions)
+# If this variable is left out, then the default is no expiration.
+hbase.extendedperiod = 3600
+
+# Configuration of the "hbase" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+hbase.period=10
+hbase.servers={{ganglia_server_host}}:8663
+
+# Configuration of the "jvm" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+jvm.period=10
+jvm.servers={{ganglia_server_host}}:8663
+
+# Configuration of the "rpc" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+rpc.period=10
+rpc.servers={{ganglia_server_host}}:8663

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase-env.sh.j2
new file mode 100644
index 0000000..b8505b5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase-env.sh.j2
@@ -0,0 +1,82 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Set environment variables here.
+
+# The java implementation to use. Java 1.6 required.
+export JAVA_HOME={{java64_home}}
+
+# HBase Configuration directory
+export HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{conf_dir}}}
+
+# Extra Java CLASSPATH elements. Optional.
+export HBASE_CLASSPATH=${HBASE_CLASSPATH}
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+# export HBASE_HEAPSIZE=1000
+
+# Extra Java runtime options.
+# Below are what we set by default. May only work with SUN JVM.
+# For more on why as well as other possible settings,
+# see http://wiki.apache.org/hadoop/PerformanceTuning
+export HBASE_OPTS="-XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log"
+export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`"
+# Uncomment below to enable java garbage collection logging.
+# export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log"
+
+# Uncomment and adjust to enable JMX exporting
+# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
+# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
+#
+# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
+export HBASE_MASTER_OPTS="-Xmx{{master_heapsize}}"
+export HBASE_REGIONSERVER_OPTS="-Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}"
+# export HBASE_THRIFT_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
+# export HBASE_ZOOKEEPER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
+
+# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
+export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers
+
+# Extra ssh options. Empty by default.
+# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
+
+# Where log files are stored. $HBASE_HOME/logs by default.
+export HBASE_LOG_DIR={{log_dir}}
+
+# A string representing this instance of hbase. $USER by default.
+# export HBASE_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes. See 'man nice'.
+# export HBASE_NICENESS=10
+
+# The directory where pid files are stored. /tmp by default.
+export HBASE_PID_DIR={{pid_dir}}
+
+# Seconds to sleep between slave commands. Unset by default. This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HBASE_SLAVE_SLEEP=0.1
+
+# Tell HBase whether it should manage it's own instance of Zookeeper or not.
+export HBASE_MANAGES_ZK=false
+
+{% if security_enabled %}
+export HBASE_OPTS="$HBASE_OPTS -Djava.security.auth.login.config={{client_jaas_config_file}}"
+export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Djava.security.auth.login.config={{master_jaas_config_file}}"
+export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Djava.security.auth.login.config={{regionserver_jaas_config_file}}"
+{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase-smoke.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase-smoke.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase-smoke.sh.j2
new file mode 100644
index 0000000..61fe62f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase-smoke.sh.j2
@@ -0,0 +1,26 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+disable 'ambarismoketest'
+drop 'ambarismoketest'
+create 'ambarismoketest','family'
+put 'ambarismoketest','row01','family:col01','{{service_check_data}}'
+scan 'ambarismoketest'
+exit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_client_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_client_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_client_jaas.conf.j2
new file mode 100644
index 0000000..3b3bb18
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_client_jaas.conf.j2
@@ -0,0 +1,23 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=false
+useTicketCache=true;
+};