You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ao...@apache.org on 2014/01/31 20:50:45 UTC
[20/51] [partial] AMBARI-4491. Move all the supported versions in
Baikal for stack to python code (remove dependence on puppet). (aonishuk)
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/setupGanglia.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/setupGanglia.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/setupGanglia.sh
deleted file mode 100644
index 5145b9c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/setupGanglia.sh
+++ /dev/null
@@ -1,141 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements. See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership. The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License. You may obtain a copy of the License at
-# *
-# * http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get access to Ganglia-wide constants, utilities etc.
-source ./gangliaLib.sh
-
-function usage()
-{
- cat << END_USAGE
-Usage: ${0} [-c <gmondClusterName> [-m]] [-t] [-o <owner>] [-g <group>]
-
-Options:
- -c <gmondClusterName> The name of the Ganglia Cluster whose gmond configuration we're here to generate.
-
- -m Whether this gmond (if -t is not specified) is the master for its Ganglia
- Cluster. Without this, we generate slave gmond configuration.
-
- -t Whether this is a call to generate gmetad configuration (as opposed to the
- gmond configuration that is generated without this).
- -o <owner> Owner
- -g <group> Group
-END_USAGE
-}
-
-function instantiateGmetadConf()
-{
- # gmetad utility library.
- source ./gmetadLib.sh;
-
- generateGmetadConf > ${GMETAD_CONF_FILE};
-}
-
-function instantiateGmondConf()
-{
- # gmond utility library.
- source ./gmondLib.sh;
-
- gmondClusterName=${1};
-
- if [ "x" != "x${gmondClusterName}" ]
- then
-
- createDirectory "${GANGLIA_RUNTIME_DIR}/${gmondClusterName}";
- createDirectory "${GANGLIA_CONF_DIR}/${gmondClusterName}/conf.d";
-
- # Always blindly generate the core gmond config - that goes on every box running gmond.
- generateGmondCoreConf ${gmondClusterName} > `getGmondCoreConfFileName ${gmondClusterName}`;
-
- isMasterGmond=${2};
-
- # Decide whether we want to add on the master or slave gmond config.
- if [ "0" -eq "${isMasterGmond}" ]
- then
- generateGmondSlaveConf ${gmondClusterName} > `getGmondSlaveConfFileName ${gmondClusterName}`;
- else
- generateGmondMasterConf ${gmondClusterName} > `getGmondMasterConfFileName ${gmondClusterName}`;
- fi
-
- chown -R ${3}:${4} ${GANGLIA_CONF_DIR}/${gmondClusterName}
-
- else
- echo "No gmondClusterName passed in, nothing to instantiate";
- fi
-}
-
-# main()
-
-gmondClusterName=;
-isMasterGmond=0;
-configureGmetad=0;
-owner='root';
-group='root';
-
-while getopts ":c:mto:g:" OPTION
-do
- case ${OPTION} in
- c)
- gmondClusterName=${OPTARG};
- ;;
- m)
- isMasterGmond=1;
- ;;
- t)
- configureGmetad=1;
- ;;
- o)
- owner=${OPTARG};
- ;;
- g)
- group=${OPTARG};
- ;;
- ?)
- usage;
- exit 1;
- esac
-done
-
-# Initialization.
-createDirectory ${GANGLIA_CONF_DIR};
-createDirectory ${GANGLIA_RUNTIME_DIR};
-# So rrdcached can drop its PID files in here.
-chmod a+w ${GANGLIA_RUNTIME_DIR};
-chown ${owner}:${group} ${GANGLIA_CONF_DIR};
-
-if [ -n "${gmondClusterName}" ]
-then
-
- # Be forgiving of users who pass in -c along with -t (which always takes precedence).
- if [ "1" -eq "${configureGmetad}" ]
- then
- instantiateGmetadConf;
- else
- instantiateGmondConf ${gmondClusterName} ${isMasterGmond} ${owner} ${group};
- fi
-
-elif [ "1" -eq "${configureGmetad}" ]
-then
- instantiateGmetadConf;
-else
- usage;
- exit 2;
-fi
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/startGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/startGmetad.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/startGmetad.sh
deleted file mode 100644
index ab5102d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/startGmetad.sh
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements. See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership. The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License. You may obtain a copy of the License at
-# *
-# * http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./gmetadLib.sh;
-
-# To get access to ${RRDCACHED_ALL_ACCESS_UNIX_SOCKET}.
-source ./rrdcachedLib.sh;
-
-# Before starting gmetad, start rrdcached.
-./startRrdcached.sh;
-
-if [ $? -eq 0 ]
-then
- gmetadRunningPid=`getGmetadRunningPid`;
-
- # Only attempt to start gmetad if there's not already one running.
- if [ -z "${gmetadRunningPid}" ]
- then
- env RRDCACHED_ADDRESS=${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
- ${GMETAD_BIN} --conf=${GMETAD_CONF_FILE} --pid-file=${GMETAD_PID_FILE};
-
- for i in `seq 0 5`; do
- gmetadRunningPid=`getGmetadRunningPid`;
- if [ -n "${gmetadRunningPid}" ]
- then
- break;
- fi
- sleep 1;
- done
-
- if [ -n "${gmetadRunningPid}" ]
- then
- echo "Started ${GMETAD_BIN} with PID ${gmetadRunningPid}";
- else
- echo "Failed to start ${GMETAD_BIN}";
- exit 1;
- fi
- else
- echo "${GMETAD_BIN} already running with PID ${gmetadRunningPid}";
- fi
-else
- echo "Not starting ${GMETAD_BIN} because starting ${RRDCACHED_BIN} failed.";
- exit 2;
-fi
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/startGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/startGmond.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/startGmond.sh
deleted file mode 100644
index 239b62e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/startGmond.sh
+++ /dev/null
@@ -1,80 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements. See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership. The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License. You may obtain a copy of the License at
-# *
-# * http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
-source ./gmondLib.sh;
-
-function startGmondForCluster()
-{
- gmondClusterName=${1};
-
- gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
-
- # Only attempt to start gmond if there's not already one running.
- if [ -z "${gmondRunningPid}" ]
- then
- gmondCoreConfFileName=`getGmondCoreConfFileName ${gmondClusterName}`;
-
- if [ -e "${gmondCoreConfFileName}" ]
- then
- gmondPidFileName=`getGmondPidFileName ${gmondClusterName}`;
-
- ${GMOND_BIN} --conf=${gmondCoreConfFileName} --pid-file=${gmondPidFileName};
-
- for i in `seq 0 5`; do
- gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
- if [ -n "${gmondRunningPid}" ]
- then
- break;
- fi
- sleep 1;
- done
-
- if [ -n "${gmondRunningPid}" ]
- then
- echo "Started ${GMOND_BIN} for cluster ${gmondClusterName} with PID ${gmondRunningPid}";
- else
- echo "Failed to start ${GMOND_BIN} for cluster ${gmondClusterName}";
- exit 1;
- fi
- fi
- else
- echo "${GMOND_BIN} for cluster ${gmondClusterName} already running with PID ${gmondRunningPid}";
- fi
-}
-
-# main()
-gmondClusterName=${1};
-
-if [ "x" == "x${gmondClusterName}" ]
-then
- # No ${gmondClusterName} passed in as command-line arg, so start
- # all the gmonds we know about.
- for gmondClusterName in `getConfiguredGangliaClusterNames`
- do
- startGmondForCluster ${gmondClusterName};
- done
-else
- # Just start the one ${gmondClusterName} that was asked for.
- startGmondForCluster ${gmondClusterName};
-fi
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/startRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/startRrdcached.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/startRrdcached.sh
deleted file mode 100644
index e79472b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/startRrdcached.sh
+++ /dev/null
@@ -1,69 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements. See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership. The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License. You may obtain a copy of the License at
-# *
-# * http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Slurp in all our user-customizable settings.
-source ./gangliaEnv.sh;
-
-# Get all our common constants etc. set up.
-source ./rrdcachedLib.sh;
-
-rrdcachedRunningPid=`getRrdcachedRunningPid`;
-
-# Only attempt to start rrdcached if there's not already one running.
-if [ -z "${rrdcachedRunningPid}" ]
-then
- #changed because problem puppet had with nobody user
- #sudo -u ${GMETAD_USER} ${RRDCACHED_BIN} -p ${RRDCACHED_PID_FILE} \
- # -m 664 -l unix:${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
- # -m 777 -P FLUSH,STATS,HELP -l unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET} \
- # -b /var/lib/ganglia/rrds -B
- su - ${GMETAD_USER} -c "${RRDCACHED_BIN} -p ${RRDCACHED_PID_FILE} \
- -m 664 -l unix:${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
- -m 777 -P FLUSH,STATS,HELP -l unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET} \
- -b ${RRDCACHED_BASE_DIR} -B"
-
- # Ideally, we'd use ${RRDCACHED_BIN}'s -s ${WEBSERVER_GROUP} option for
- # this, but it doesn't take sometimes due to a lack of permissions,
- # so perform the operation explicitly to be super-sure.
- chgrp ${WEBSERVER_GROUP} ${RRDCACHED_ALL_ACCESS_UNIX_SOCKET};
- chgrp ${WEBSERVER_GROUP} ${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET};
-
- # Check to make sure rrdcached actually started up.
- for i in `seq 0 5`; do
- rrdcachedRunningPid=`getRrdcachedRunningPid`;
- if [ -n "${rrdcachedRunningPid}" ]
- then
- break;
- fi
- sleep 1;
- done
-
- if [ -n "${rrdcachedRunningPid}" ]
- then
- echo "Started ${RRDCACHED_BIN} with PID ${rrdcachedRunningPid}";
- else
- echo "Failed to start ${RRDCACHED_BIN}";
- exit 1;
- fi
-else
- echo "${RRDCACHED_BIN} already running with PID ${rrdcachedRunningPid}";
-fi
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/stopGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/stopGmetad.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/stopGmetad.sh
deleted file mode 100644
index 2764e0e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/stopGmetad.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements. See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership. The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License. You may obtain a copy of the License at
-# *
-# * http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./gmetadLib.sh;
-
-gmetadRunningPid=`getGmetadRunningPid`;
-
-# Only go ahead with the termination if we could find a running PID.
-if [ -n "${gmetadRunningPid}" ]
-then
- kill -KILL ${gmetadRunningPid};
- echo "Stopped ${GMETAD_BIN} (with PID ${gmetadRunningPid})";
-fi
-
-# Poll again.
-gmetadRunningPid=`getGmetadRunningPid`;
-
-# Once we've killed gmetad, there should no longer be a running PID.
-if [ -z "${gmetadRunningPid}" ]
-then
- # It's safe to stop rrdcached now.
- ./stopRrdcached.sh;
-fi
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/stopGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/stopGmond.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/stopGmond.sh
deleted file mode 100644
index 1af3eb9..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/stopGmond.sh
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements. See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership. The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License. You may obtain a copy of the License at
-# *
-# * http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
-source ./gmondLib.sh;
-
-function stopGmondForCluster()
-{
- gmondClusterName=${1};
-
- gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
-
- # Only go ahead with the termination if we could find a running PID.
- if [ -n "${gmondRunningPid}" ]
- then
- kill -KILL ${gmondRunningPid};
- echo "Stopped ${GMOND_BIN} for cluster ${gmondClusterName} (with PID ${gmondRunningPid})";
- fi
-}
-
-# main()
-gmondClusterName=${1};
-
-if [ "x" == "x${gmondClusterName}" ]
-then
- # No ${gmondClusterName} passed in as command-line arg, so stop
- # all the gmonds we know about.
- for gmondClusterName in `getConfiguredGangliaClusterNames`
- do
- stopGmondForCluster ${gmondClusterName};
- done
-else
- stopGmondForCluster ${gmondClusterName};
-fi
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/stopRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/stopRrdcached.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/stopRrdcached.sh
deleted file mode 100644
index 0a0d8d8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/stopRrdcached.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements. See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership. The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License. You may obtain a copy of the License at
-# *
-# * http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./rrdcachedLib.sh;
-
-rrdcachedRunningPid=`getRrdcachedRunningPid`;
-
-# Only go ahead with the termination if we could find a running PID.
-if [ -n "${rrdcachedRunningPid}" ]
-then
- kill -TERM ${rrdcachedRunningPid};
- # ${RRDCACHED_BIN} takes a few seconds to drain its buffers, so wait
- # until we're sure it's well and truly dead.
- #
- # Without this, an immediately following startRrdcached.sh won't do
- # anything, because it still sees this soon-to-die instance alive,
- # and the net result is that after a few seconds, there's no
- # ${RRDCACHED_BIN} running on the box anymore.
- sleep 5;
- echo "Stopped ${RRDCACHED_BIN} (with PID ${rrdcachedRunningPid})";
-fi
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/teardownGanglia.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/teardownGanglia.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/teardownGanglia.sh
deleted file mode 100644
index b27f7a2..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/teardownGanglia.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements. See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership. The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License. You may obtain a copy of the License at
-# *
-# * http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get access to Ganglia-wide constants, utilities etc.
-source ./gangliaLib.sh;
-
-# Undo what we did while setting up Ganglia on this box.
-rm -rf ${GANGLIA_CONF_DIR};
-rm -rf ${GANGLIA_RUNTIME_DIR};
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia.py
deleted file mode 100644
index 69fde27..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia.py
+++ /dev/null
@@ -1,97 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-from resource_management import *
-import os
-
-
-def groups_and_users():
- import params
-
-def config():
- import params
-
- shell_cmds_dir = params.ganglia_shell_cmds_dir
- shell_files = ['checkGmond.sh', 'checkRrdcached.sh', 'gmetadLib.sh',
- 'gmondLib.sh', 'rrdcachedLib.sh',
- 'setupGanglia.sh', 'startGmetad.sh', 'startGmond.sh',
- 'startRrdcached.sh', 'stopGmetad.sh',
- 'stopGmond.sh', 'stopRrdcached.sh', 'teardownGanglia.sh']
- Directory(shell_cmds_dir,
- owner="root",
- group="root",
- recursive=True
- )
- init_file("gmetad")
- init_file("gmond")
- for sh_file in shell_files:
- shell_file(sh_file)
- for conf_file in ['gangliaClusters.conf', 'gangliaEnv.sh', 'gangliaLib.sh']:
- ganglia_TemplateConfig(conf_file)
-
-
-def init_file(name):
- import params
-
- File("/etc/init.d/hdp-" + name,
- content=StaticFile(name + ".init"),
- mode=0755
- )
-
-
-def shell_file(name):
- import params
-
- File(params.ganglia_shell_cmds_dir + os.sep + name,
- content=StaticFile(name),
- mode=0755
- )
-
-
-def ganglia_TemplateConfig(name, mode=0755, tag=None):
- import params
-
- TemplateConfig(format("{params.ganglia_shell_cmds_dir}/{name}"),
- owner="root",
- group="root",
- template_tag=tag,
- mode=mode
- )
-
-
-def generate_daemon(ganglia_service,
- name=None,
- role=None,
- owner=None,
- group=None):
- import params
-
- cmd = ""
- if ganglia_service == "gmond":
- if role == "server":
- cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -c {name} -m -o {owner} -g {group}"
- else:
- cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -c {name} -o {owner} -g {group}"
- elif ganglia_service == "gmetad":
- cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -t -o {owner} -g {group}"
- else:
- raise Fail("Unexpected ganglia service")
- Execute(format(cmd),
- path=[params.ganglia_shell_cmds_dir, "/usr/sbin",
- "/sbin:/usr/local/bin", "/bin", "/usr/bin"]
- )
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_monitor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_monitor.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_monitor.py
deleted file mode 100644
index 4abdb9b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_monitor.py
+++ /dev/null
@@ -1,139 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import sys
-import os
-from os import path
-from resource_management import *
-from ganglia import generate_daemon
-import ganglia
-import ganglia_monitor_service
-
-
-class GangliaMonitor(Script):
- def install(self, env):
- import params
-
- self.install_packages(env)
- env.set_params(params)
- self.configure(env)
-
- def start(self, env):
- ganglia_monitor_service.monitor("start")
-
- def stop(self, env):
- ganglia_monitor_service.monitor("stop")
-
-
- def status(self, env):
- import status_params
- pid_file_name = 'gmond.pid'
- pid_file_count = 0
- pid_dir = status_params.pid_dir
- # Recursively check all existing gmond pid files
- for cur_dir, subdirs, files in os.walk(pid_dir):
- for file_name in files:
- if file_name == pid_file_name:
- pid_file = os.path.join(cur_dir, file_name)
- check_process_status(pid_file)
- pid_file_count += 1
- if pid_file_count == 0: # If no any pid file is present
- raise ComponentIsNotRunning()
-
-
- def configure(self, env):
- import params
-
- ganglia.groups_and_users()
-
- Directory(params.ganglia_conf_dir,
- owner="root",
- group=params.user_group,
- recursive=True
- )
-
- ganglia.config()
-
- if params.is_namenode_master:
- generate_daemon("gmond",
- name = "HDPNameNode",
- role = "monitor",
- owner = "root",
- group = params.user_group)
-
- if params.is_jtnode_master:
- generate_daemon("gmond",
- name = "HDPJobTracker",
- role = "monitor",
- owner = "root",
- group = params.user_group)
-
- if params.is_rmnode_master:
- generate_daemon("gmond",
- name = "HDPResourceManager",
- role = "monitor",
- owner = "root",
- group = params.user_group)
-
- if params.is_hsnode_master:
- generate_daemon("gmond",
- name = "HDPHistoryServer",
- role = "monitor",
- owner = "root",
- group = params.user_group)
-
- if params.is_hbase_master:
- generate_daemon("gmond",
- name = "HDPHBaseMaster",
- role = "monitor",
- owner = "root",
- group = params.user_group)
-
- pure_slave = not (params.is_namenode_master and
- params.is_jtnode_master and
- params.is_rmnode_master and
- params.is_hsnode_master and
- params.is_hbase_master) and params.is_slave
- if pure_slave:
- generate_daemon("gmond",
- name = "HDPSlaves",
- role = "monitor",
- owner = "root",
- group = params.user_group)
-
- Directory(path.join(params.ganglia_dir, "conf.d"),
- owner="root",
- group=params.user_group
- )
-
- File(path.join(params.ganglia_dir, "conf.d/modgstatus.conf"),
- owner="root",
- group=params.user_group
- )
- File(path.join(params.ganglia_dir, "conf.d/multicpu.conf"),
- owner="root",
- group=params.user_group
- )
- File(path.join(params.ganglia_dir, "gmond.conf"),
- owner="root",
- group=params.user_group
- )
-
-
-if __name__ == "__main__":
- GangliaMonitor().execute()
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_monitor_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_monitor_service.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_monitor_service.py
deleted file mode 100644
index d86d894..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_monitor_service.py
+++ /dev/null
@@ -1,31 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-from resource_management import *
-
-
-def monitor(action=None):# 'start' or 'stop'
- if action == "start":
- Execute("chkconfig gmond off",
- path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
- )
- Execute(
- format(
- "service hdp-gmond {action} >> /tmp/gmond.log 2>&1 ; /bin/ps auwx | /bin/grep [g]mond >> /tmp/gmond.log 2>&1"),
- path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
- )
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_server.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_server.py
deleted file mode 100644
index 863f092..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_server.py
+++ /dev/null
@@ -1,197 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import sys
-import os
-from os import path
-from resource_management import *
-from ganglia import generate_daemon
-import ganglia
-import ganglia_server_service
-
-
-class GangliaServer(Script):
- def install(self, env):
- import params
-
- self.install_packages(env)
- env.set_params(params)
- self.configure(env)
-
- def start(self, env):
- import params
-
- env.set_params(params)
- ganglia_server_service.server("start")
-
- def stop(self, env):
- import params
-
- env.set_params(params)
- ganglia_server_service.server("stop")
-
- def status(self, env):
- import status_params
- env.set_params(status_params)
- pid_file = format("{pid_dir}/gmetad.pid")
- # Recursively check all existing gmetad pid files
- check_process_status(pid_file)
-
- def configure(self, env):
- import params
-
- ganglia.groups_and_users()
- ganglia.config()
-
- if params.has_namenodes:
- generate_daemon("gmond",
- name = "HDPNameNode",
- role = "server",
- owner = "root",
- group = params.user_group)
-
- if params.has_jobtracker:
- generate_daemon("gmond",
- name = "HDPJobTracker",
- role = "server",
- owner = "root",
- group = params.user_group)
-
- if params.has_hbase_masters:
- generate_daemon("gmond",
- name = "HDPHBaseMaster",
- role = "server",
- owner = "root",
- group = params.user_group)
-
- if params.has_resourcemanager:
- generate_daemon("gmond",
- name = "HDPResourceManager",
- role = "server",
- owner = "root",
- group = params.user_group)
-
- if params.has_nodemanager:
- generate_daemon("gmond",
- name = "HDPNodeManager",
- role = "server",
- owner = "root",
- group = params.user_group)
-
- if params.has_historyserver:
- generate_daemon("gmond",
- name = "HDPHistoryServer",
- role = "server",
- owner = "root",
- group = params.user_group)
-
- if params.has_slaves:
- generate_daemon("gmond",
- name = "HDPDataNode",
- role = "server",
- owner = "root",
- group = params.user_group)
-
- if params.has_tasktracker:
- generate_daemon("gmond",
- name = "HDPTaskTracker",
- role = "server",
- owner = "root",
- group = params.user_group)
-
- if params.has_hbase_rs:
- generate_daemon("gmond",
- name = "HDPHBaseRegionServer",
- role = "server",
- owner = "root",
- group = params.user_group)
-
- if params.has_flume:
- generate_daemon("gmond",
- name = "HDPFlumeServer",
- role = "server",
- owner = "root",
- group = params.user_group)
-
- if params.has_journalnode:
- generate_daemon("gmond",
- name = "HDPJournalNode",
- role = "server",
- owner = "root",
- group = params.user_group)
-
- generate_daemon("gmetad",
- name = "gmetad",
- role = "server",
- owner = "root",
- group = params.user_group)
-
- change_permission()
- server_files()
- File(path.join(params.ganglia_dir, "gmetad.conf"),
- owner="root",
- group=params.user_group
- )
-
-
-def change_permission():
- import params
-
- Directory('/var/lib/ganglia/dwoo',
- mode=0777,
- owner=params.gmetad_user,
- recursive=True
- )
-
-
-def server_files():
- import params
-
- rrd_py_path = params.rrd_py_path
- Directory(rrd_py_path,
- recursive=True
- )
- rrd_py_file_path = path.join(rrd_py_path, "rrd.py")
- File(rrd_py_file_path,
- content=StaticFile("rrd.py"),
- mode=0755
- )
- rrd_file_owner = params.gmetad_user
- if params.rrdcached_default_base_dir != params.rrdcached_base_dir:
- Directory(params.rrdcached_base_dir,
- owner=rrd_file_owner,
- group=rrd_file_owner,
- mode=0755,
- recursive=True
- )
- Directory(params.rrdcached_default_base_dir,
- action = "delete"
- )
- Link(params.rrdcached_default_base_dir,
- to=params.rrdcached_base_dir
- )
- elif rrd_file_owner != 'nobody':
- Directory(params.rrdcached_default_base_dir,
- owner=rrd_file_owner,
- group=rrd_file_owner,
- recursive=True
- )
-
-
-if __name__ == "__main__":
- GangliaServer().execute()
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_server_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_server_service.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_server_service.py
deleted file mode 100644
index b93e3f8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_server_service.py
+++ /dev/null
@@ -1,27 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-from resource_management import *
-
-
-def server(action=None):# 'start' or 'stop'
- command = "service hdp-gmetad {action} >> /tmp/gmetad.log 2>&1 ; /bin/ps auwx | /bin/grep [g]metad >> /tmp/gmetad.log 2>&1"
- Execute(format(command),
- path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
- )
- MonitorWebserver("restart")
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/params.py
deleted file mode 100644
index 601601e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/params.py
+++ /dev/null
@@ -1,80 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-from resource_management import *
-from resource_management.core.system import System
-
-config = Script.get_config()
-
-user_group = config['configurations']['global']["user_group"]
-ganglia_conf_dir = "/etc/ganglia/hdp"
-ganglia_dir = "/etc/ganglia"
-ganglia_runtime_dir = config['configurations']['global']["ganglia_runtime_dir"]
-ganglia_shell_cmds_dir = "/usr/libexec/hdp/ganglia"
-
-gmetad_user = config['configurations']['global']["gmetad_user"]
-gmond_user = config['configurations']['global']["gmond_user"]
-
-webserver_group = "apache"
-rrdcached_default_base_dir = "/var/lib/ganglia/rrds"
-rrdcached_base_dir = config['configurations']['global']["rrdcached_base_dir"]
-
-ganglia_server_host = config["clusterHostInfo"]["ganglia_server_host"][0]
-
-hostname = config["hostname"]
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-rm_host = default("/clusterHostInfo/rm_host", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-# datanodes are marked as slave_hosts
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-tt_hosts = default("/clusterHostInfo/mapred_tt_hosts", [])
-nm_hosts = default("/clusterHostInfo/nm_hosts", [])
-hbase_rs_hosts = default("/clusterHostInfo/hbase_rs_hosts", [])
-flume_hosts = default("/clusterHostInfo/flume_hosts", [])
-jn_hosts = default("/clusterHostInfo/journalnode_hosts", [])
-
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-is_tasktracker = hostname in tt_hosts
-is_nodemanager = hostname in nm_hosts
-is_hbase_rs = hostname in hbase_rs_hosts
-is_flume = hostname in flume_hosts
-is_jn_host = hostname in jn_hosts
-
-has_namenodes = not len(namenode_host) == 0
-has_jobtracker = not len(jtnode_host) == 0
-has_resourcemanager = not len(rm_host) == 0
-has_historyserver = not len(hs_host) == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_slaves = not len(slave_hosts) == 0
-has_tasktracker = not len(tt_hosts) == 0
-has_nodemanager = not len(nm_hosts) == 0
-has_hbase_rs = not len(hbase_rs_hosts) == 0
-has_flume = not len(flume_hosts) == 0
-has_journalnode = not len(jn_hosts) == 0
-
-if System.get_instance().os_family == "suse":
- rrd_py_path = '/srv/www/cgi-bin'
-else:
- rrd_py_path = '/var/www/cgi-bin'
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/status_params.py
deleted file mode 100644
index 3ccad2f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/status_params.py
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-pid_dir = config['configurations']['global']['ganglia_runtime_dir']
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/templates/gangliaClusters.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/templates/gangliaClusters.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/templates/gangliaClusters.conf.j2
deleted file mode 100644
index f3bb355..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/templates/gangliaClusters.conf.j2
+++ /dev/null
@@ -1,35 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements. See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership. The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License. You may obtain a copy of the License at
-# *
-# * http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-#########################################################
-### ClusterName GmondMasterHost GmondPort ###
-#########################################################
-
- HDPJournalNode {{ganglia_server_host}} 8654
- HDPFlumeServer {{ganglia_server_host}} 8655
- HDPHBaseRegionServer {{ganglia_server_host}} 8656
- HDPNodeManager {{ganglia_server_host}} 8657
- HDPTaskTracker {{ganglia_server_host}} 8658
- HDPDataNode {{ganglia_server_host}} 8659
- HDPSlaves {{ganglia_server_host}} 8660
- HDPNameNode {{ganglia_server_host}} 8661
- HDPJobTracker {{ganglia_server_host}} 8662
- HDPHBaseMaster {{ganglia_server_host}} 8663
- HDPResourceManager {{ganglia_server_host}} 8664
- HDPHistoryServer {{ganglia_server_host}} 8666
-
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/templates/gangliaEnv.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/templates/gangliaEnv.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/templates/gangliaEnv.sh.j2
deleted file mode 100644
index 1ead550..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/templates/gangliaEnv.sh.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements. See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership. The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License. You may obtain a copy of the License at
-# *
-# * http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Unix users and groups for the binaries we start up.
-GMETAD_USER={{gmetad_user}};
-GMOND_USER={{gmond_user}};
-WEBSERVER_GROUP={{webserver_group}};
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/templates/gangliaLib.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/templates/gangliaLib.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/templates/gangliaLib.sh.j2
deleted file mode 100644
index 4b5bdd1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/templates/gangliaLib.sh.j2
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements. See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership. The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License. You may obtain a copy of the License at
-# *
-# * http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-GANGLIA_CONF_DIR={{ganglia_conf_dir}};
-GANGLIA_RUNTIME_DIR={{ganglia_runtime_dir}};
-RRDCACHED_BASE_DIR={{rrdcached_base_dir}};
-
-# This file contains all the info about each Ganglia Cluster in our Grid.
-GANGLIA_CLUSTERS_CONF_FILE=./gangliaClusters.conf;
-
-function createDirectory()
-{
- directoryPath=${1};
-
- if [ "x" != "x${directoryPath}" ]
- then
- mkdir -p ${directoryPath};
- fi
-}
-
-function getGangliaClusterInfo()
-{
- clusterName=${1};
-
- if [ "x" != "x${clusterName}" ]
- then
- # Fetch the particular entry for ${clusterName} from ${GANGLIA_CLUSTERS_CONF_FILE}.
- awk -v clusterName=${clusterName} '($1 !~ /^#/) && ($1 == clusterName)' ${GANGLIA_CLUSTERS_CONF_FILE};
- else
- # Spit out all the non-comment, non-empty lines from ${GANGLIA_CLUSTERS_CONF_FILE}.
- awk '($1 !~ /^#/) && (NF)' ${GANGLIA_CLUSTERS_CONF_FILE};
- fi
-}
-
-function getConfiguredGangliaClusterNames()
-{
- # Find all the subdirectories in ${GANGLIA_CONF_DIR} and extract only
- # the subdirectory name from each.
- if [ -e ${GANGLIA_CONF_DIR} ]
- then
- find ${GANGLIA_CONF_DIR} -maxdepth 1 -mindepth 1 -type d | xargs -n1 basename;
- fi
-}
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/global.xml
deleted file mode 100644
index b2c57bd..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/global.xml
+++ /dev/null
@@ -1,160 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
- <property>
- <name>hbasemaster_host</name>
- <value></value>
- <description>HBase Master Host.</description>
- </property>
- <property>
- <name>regionserver_hosts</name>
- <value></value>
- <description>Region Server Hosts</description>
- </property>
- <property>
- <name>hbase_log_dir</name>
- <value>/var/log/hbase</value>
- <description>Log Directories for HBase.</description>
- </property>
- <property>
- <name>hbase_pid_dir</name>
- <value>/var/run/hbase</value>
- <description>Log Directories for HBase.</description>
- </property>
- <property>
- <name>hbase_log_dir</name>
- <value>/var/log/hbase</value>
- <description>Log Directories for HBase.</description>
- </property>
- <property>
- <name>hbase_regionserver_heapsize</name>
- <value>1024</value>
- <description>Log Directories for HBase.</description>
- </property>
- <property>
- <name>hbase_master_heapsize</name>
- <value>1024</value>
- <description>HBase Master Heap Size</description>
- </property>
- <property>
- <name>hstore_compactionthreshold</name>
- <value>3</value>
- <description>HBase HStore compaction threshold.</description>
- </property>
- <property>
- <name>hfile_blockcache_size</name>
- <value>0.40</value>
- <description>HFile block cache size.</description>
- </property>
- <property>
- <name>hstorefile_maxsize</name>
- <value>10737418240</value>
- <description>Maximum HStoreFile Size</description>
- </property>
- <property>
- <name>regionserver_handlers</name>
- <value>60</value>
- <description>HBase RegionServer Handler</description>
- </property>
- <property>
- <name>hregion_majorcompaction</name>
- <value>604800000</value>
- <description>The time between major compactions of all HStoreFiles in a region. Set to 0 to disable automated major compactions.</description>
- </property>
- <property>
- <name>hregion_blockmultiplier</name>
- <value>2</value>
- <description>HBase Region Block Multiplier</description>
- </property>
- <property>
- <name>hregion_memstoreflushsize</name>
- <value></value>
- <description>HBase Region MemStore Flush Size.</description>
- </property>
- <property>
- <name>client_scannercaching</name>
- <value>100</value>
- <description>Base Client Scanner Caching</description>
- </property>
- <property>
- <name>zookeeper_sessiontimeout</name>
- <value>30000</value>
- <description>ZooKeeper Session Timeout</description>
- </property>
- <property>
- <name>hfile_max_keyvalue_size</name>
- <value>10485760</value>
- <description>HBase Client Maximum key-value Size</description>
- </property>
- <property>
- <name>hbase_hdfs_root_dir</name>
- <value>/apps/hbase/data</value>
- <description>HBase Relative Path to HDFS.</description>
- </property>
- <property>
- <name>hbase_conf_dir</name>
- <value>/etc/hbase</value>
- <description>Config Directory for HBase.</description>
- </property>
- <property>
- <name>hdfs_enable_shortcircuit_read</name>
- <value>true</value>
- <description>HDFS Short Circuit Read</description>
- </property>
- <property>
- <name>hdfs_support_append</name>
- <value>true</value>
- <description>HDFS append support</description>
- </property>
- <property>
- <name>hstore_blockingstorefiles</name>
- <value>10</value>
- <description>HStore blocking storefiles.</description>
- </property>
- <property>
- <name>regionserver_memstore_lab</name>
- <value>true</value>
- <description>Region Server memstore.</description>
- </property>
- <property>
- <name>regionserver_memstore_lowerlimit</name>
- <value>0.38</value>
- <description>Region Server memstore lower limit.</description>
- </property>
- <property>
- <name>regionserver_memstore_upperlimit</name>
- <value>0.4</value>
- <description>Region Server memstore upper limit.</description>
- </property>
- <property>
- <name>hbase_conf_dir</name>
- <value>/etc/hbase</value>
- <description>HBase conf dir.</description>
- </property>
- <property>
- <name>hbase_user</name>
- <value>hbase</value>
- <description>HBase User Name.</description>
- </property>
-
-</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/hbase-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/hbase-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/hbase-log4j.xml
deleted file mode 100644
index 2258d73..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/hbase-log4j.xml
+++ /dev/null
@@ -1,183 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-
- <property>
- <name>hbase.root.logger</name>
- <value>INFO,console</value>
- </property>
- <property>
- <name>hbase.security.logger</name>
- <value>INFO,console</value>
- </property>
- <property>
- <name>hbase.log.dir</name>
- <value>.</value>
- </property>
- <property>
- <name>hbase.log.file</name>
- <value>hbase.log</value>
- </property>
- <property>
- <name>log4j.rootLogger</name>
- <value>${hbase.root.logger}</value>
- </property>
- <property>
- <name>log4j.threshold</name>
- <value>ALL</value>
- </property>
- <property>
- <name>log4j.appender.DRFA</name>
- <value>org.apache.log4j.DailyRollingFileAppender</value>
- </property>
- <property>
- <name>log4j.appender.DRFA.File</name>
- <value>${hbase.log.dir}/${hbase.log.file}</value>
- </property>
- <property>
- <name>log4j.appender.DRFA.DatePattern</name>
- <value>.yyyy-MM-dd</value>
- </property>
- <property>
- <name>log4j.appender.DRFA.layout</name>
- <value>org.apache.log4j.PatternLayout</value>
- </property>
- <property>
- <name>log4j.appender.DRFA.layout.ConversionPattern</name>
- <value>%d{ISO8601} %-5p [%t] %c{2}: %m%n</value>
- </property>
- <property>
- <name>hbase.log.maxfilesize</name>
- <value>256MB</value>
- </property>
- <property>
- <name>hbase.log.maxbackupindex</name>
- <value>20</value>
- </property>
- <property>
- <name>log4j.appender.RFA</name>
- <value>org.apache.log4j.RollingFileAppender</value>
- </property>
- <property>
- <name>log4j.appender.RFA.File</name>
- <value>${hbase.log.dir}/${hbase.log.file}</value>
- </property>
- <property>
- <name>log4j.appender.RFA.MaxFileSize</name>
- <value>${hbase.log.maxfilesize}</value>
- </property>
- <property>
- <name>log4j.appender.RFA.MaxBackupIndex</name>
- <value>${hbase.log.maxbackupindex}</value>
- </property>
- <property>
- <name>log4j.appender.RFA.layout</name>
- <value>org.apache.log4j.PatternLayout</value>
- </property>
- <property>
- <name>log4j.appender.RFA.layout.ConversionPattern</name>
- <value>%d{ISO8601} %-5p [%t] %c{2}: %m%n</value>
- </property>
- <property>
- <name>hbase.security.log.file</name>
- <value>SecurityAuth.audit</value>
- </property>
- <property>
- <name>hbase.security.log.maxfilesize</name>
- <value>256MB</value>
- </property>
- <property>
- <name>hbase.security.log.maxbackupindex</name>
- <value>20</value>
- </property>
- <property>
- <name>log4j.appender.RFAS</name>
- <value>org.apache.log4j.RollingFileAppender</value>
- </property>
- <property>
- <name>log4j.appender.RFAS.File</name>
- <value>${hbase.log.dir}/${hbase.security.log.file}</value>
- </property>
- <property>
- <name>log4j.appender.RFAS.MaxFileSize</name>
- <value>${hbase.security.log.maxfilesize}</value>
- </property>
- <property>
- <name>log4j.appender.RFAS.MaxBackupIndex</name>
- <value>${hbase.security.log.maxbackupindex}</value>
- </property>
- <property>
- <name>log4j.appender.RFAS.layout</name>
- <value>org.apache.log4j.PatternLayout</value>
- </property>
- <property>
- <name>log4j.appender.RFAS.layout.ConversionPattern</name>
- <value>%d{ISO8601} %p %c: %m%n</value>
- </property>
- <property>
- <name>log4j.category.SecurityLogger</name>
- <value>${hbase.security.logger}</value>
- </property>
- <property>
- <name>log4j.additivity.SecurityLogger</name>
- <value>false</value>
- </property>
- <property>
- <name>log4j.appender.NullAppender</name>
- <value>org.apache.log4j.varia.NullAppender</value>
- </property>
- <property>
- <name>log4j.appender.console</name>
- <value>org.apache.log4j.ConsoleAppender</value>
- </property>
- <property>
- <name>log4j.appender.console.target</name>
- <value>System.err</value>
- </property>
- <property>
- <name>log4j.appender.console.layout</name>
- <value>org.apache.log4j.PatternLayout</value>
- </property>
- <property>
- <name>log4j.appender.console.layout.ConversionPattern</name>
- <value>%d{ISO8601} %-5p [%t] %c{2}: %m%n</value>
- </property>
- <property>
- <name>log4j.logger.org.apache.zookeeper</name>
- <value>INFO</value>
- </property>
- <property>
- <name>log4j.logger.org.apache.hadoop.hbase</name>
- <value>DEBUG</value>
- </property>
- <property>
- <name>log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil</name>
- <value>INFO</value>
- </property>
- <property>
- <name>log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher</name>
- <value>INFO</value>
- </property>
-
-
-</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/hbase-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/hbase-policy.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/hbase-policy.xml
deleted file mode 100644
index e45f23c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/hbase-policy.xml
+++ /dev/null
@@ -1,53 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
- <property>
- <name>security.client.protocol.acl</name>
- <value>*</value>
- <description>ACL for HRegionInterface protocol implementations (ie.
- clients talking to HRegionServers)
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.</description>
- </property>
-
- <property>
- <name>security.admin.protocol.acl</name>
- <value>*</value>
- <description>ACL for HMasterInterface protocol implementation (ie.
- clients talking to HMaster for admin operations).
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.</description>
- </property>
-
- <property>
- <name>security.masterregion.protocol.acl</name>
- <value>*</value>
- <description>ACL for HMasterRegionInterface protocol implementations
- (for HRegionServers communicating with HMaster)
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.</description>
- </property>
-</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/hbase-site.xml
deleted file mode 100644
index d1e933d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/hbase-site.xml
+++ /dev/null
@@ -1,359 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
- <property>
- <name>hbase.rootdir</name>
- <value>hdfs://localhost:8020/apps/hbase/data</value>
- <description>The directory shared by region servers and into
- which HBase persists. The URL should be 'fully-qualified'
- to include the filesystem scheme. For example, to specify the
- HDFS directory '/hbase' where the HDFS instance's namenode is
- running at namenode.example.org on port 9000, set this value to:
- hdfs://namenode.example.org:9000/hbase. By default HBase writes
- into /tmp. Change this configuration else all data will be lost
- on machine restart.
- </description>
- </property>
- <property>
- <name>hbase.cluster.distributed</name>
- <value>true</value>
- <description>The mode the cluster will be in. Possible values are
- false for standalone mode and true for distributed mode. If
- false, startup will run all HBase and ZooKeeper daemons together
- in the one JVM.
- </description>
- </property>
- <property>
- <name>hbase.tmp.dir</name>
- <value>/hadoop/hbase</value>
- <description>Temporary directory on the local filesystem.
- Change this setting to point to a location more permanent
- than '/tmp' (The '/tmp' directory is often cleared on
- machine restart).
- </description>
- </property>
- <property>
- <name>hbase.master.info.bindAddress</name>
- <value></value>
- <description>The bind address for the HBase Master web UI
- </description>
- </property>
- <property>
- <name>hbase.master.info.port</name>
- <value></value>
- <description>The port for the HBase Master web UI.</description>
- </property>
- <property>
- <name>hbase.regionserver.info.port</name>
- <value></value>
- <description>The port for the HBase RegionServer web UI.</description>
- </property>
- <property>
- <name>hbase.regionserver.global.memstore.upperLimit</name>
- <value>0.4</value>
- <description>Maximum size of all memstores in a region server before new
- updates are blocked and flushes are forced. Defaults to 40% of heap
- </description>
- </property>
- <property>
- <name>hbase.regionserver.handler.count</name>
- <value>60</value>
- <description>Count of RPC Listener instances spun up on RegionServers.
- Same property is used by the Master for count of master handlers.
- Default is 10.
- </description>
- </property>
- <property>
- <name>hbase.hregion.majorcompaction</name>
- <value>86400000</value>
- <description>The time (in milliseconds) between 'major' compactions of all
- HStoreFiles in a region. Default: 1 day.
- Set to 0 to disable automated major compactions.
- </description>
- </property>
-
- <property>
- <name>hbase.regionserver.global.memstore.lowerLimit</name>
- <value>0.38</value>
- <description>When memstores are being forced to flush to make room in
- memory, keep flushing until we hit this mark. Defaults to 35% of heap.
- This value equal to hbase.regionserver.global.memstore.upperLimit causes
- the minimum possible flushing to occur when updates are blocked due to
- memstore limiting.
- </description>
- </property>
- <property>
- <name>hbase.hregion.memstore.block.multiplier</name>
- <value>2</value>
- <description>Block updates if memstore has hbase.hregion.memstore.block.multiplier
- time hbase.hregion.flush.size bytes. Useful preventing
- runaway memstore during spikes in update traffic. Without an
- upper-bound, memstore fills such that when it flushes the
- resultant flush files take a long time to compact or split, or
- worse, we OOME
- </description>
- </property>
- <property>
- <name>hbase.hregion.memstore.flush.size</name>
- <value>134217728</value>
- <description>
- Memstore will be flushed to disk if size of the memstore
- exceeds this number of bytes. Value is checked by a thread that runs
- every hbase.server.thread.wakefrequency.
- </description>
- </property>
- <property>
- <name>hbase.hregion.memstore.mslab.enabled</name>
- <value>true</value>
- <description>
- Enables the MemStore-Local Allocation Buffer,
- a feature which works to prevent heap fragmentation under
- heavy write loads. This can reduce the frequency of stop-the-world
- GC pauses on large heaps.
- </description>
- </property>
- <property>
- <name>hbase.hregion.max.filesize</name>
- <value>10737418240</value>
- <description>
- Maximum HStoreFile size. If any one of a column families' HStoreFiles has
- grown to exceed this value, the hosting HRegion is split in two.
- Default: 1G.
- </description>
- </property>
- <property>
- <name>hbase.client.scanner.caching</name>
- <value>100</value>
- <description>Number of rows that will be fetched when calling next
- on a scanner if it is not served from (local, client) memory. Higher
- caching values will enable faster scanners but will eat up more memory
- and some calls of next may take longer and longer times when the cache is empty.
- Do not set this value such that the time between invocations is greater
- than the scanner timeout; i.e. hbase.regionserver.lease.period
- </description>
- </property>
- <property>
- <name>zookeeper.session.timeout</name>
- <value>30000</value>
- <description>ZooKeeper session timeout.
- HBase passes this to the zk quorum as suggested maximum time for a
- session (This setting becomes zookeeper's 'maxSessionTimeout'). See
- http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
- "The client sends a requested timeout, the server responds with the
- timeout that it can give the client. " In milliseconds.
- </description>
- </property>
- <property>
- <name>hbase.client.keyvalue.maxsize</name>
- <value>10485760</value>
- <description>Specifies the combined maximum allowed size of a KeyValue
- instance. This is to set an upper boundary for a single entry saved in a
- storage file. Since they cannot be split it helps avoiding that a region
- cannot be split any further because the data is too large. It seems wise
- to set this to a fraction of the maximum region size. Setting it to zero
- or less disables the check.
- </description>
- </property>
- <property>
- <name>hbase.hstore.compactionThreshold</name>
- <value>3</value>
- <description>
- If more than this number of HStoreFiles in any one HStore
- (one HStoreFile is written per flush of memstore) then a compaction
- is run to rewrite all HStoreFiles files as one. Larger numbers
- put off compaction but when it runs, it takes longer to complete.
- </description>
- </property>
- <property>
- <name>hbase.hstore.flush.retries.number</name>
- <value>120</value>
- <description>
- The number of times the region flush operation will be retried.
- </description>
- </property>
-
- <property>
- <name>hbase.hstore.blockingStoreFiles</name>
- <value>10</value>
- <description>
- If more than this number of StoreFiles in any one Store
- (one StoreFile is written per flush of MemStore) then updates are
- blocked for this HRegion until a compaction is completed, or
- until hbase.hstore.blockingWaitTime has been exceeded.
- </description>
- </property>
- <property>
- <name>hfile.block.cache.size</name>
- <value>0.40</value>
- <description>
- Percentage of maximum heap (-Xmx setting) to allocate to block cache
- used by HFile/StoreFile. Default of 0.25 means allocate 25%.
- Set to 0 to disable but it's not recommended.
- </description>
- </property>
-
- <!-- The following properties configure authentication information for
- HBase processes when using Kerberos security. There are no default
- values, included here for documentation purposes -->
- <property>
- <name>hbase.master.keytab.file</name>
- <value></value>
- <description>Full path to the kerberos keytab file to use for logging in
- the configured HMaster server principal.
- </description>
- </property>
- <property>
- <name>hbase.master.kerberos.principal</name>
- <value></value>
- <description>Ex. "hbase/_HOST@EXAMPLE.COM". The kerberos principal name
- that should be used to run the HMaster process. The principal name should
- be in the form: user/hostname@DOMAIN. If "_HOST" is used as the hostname
- portion, it will be replaced with the actual hostname of the running
- instance.
- </description>
- </property>
- <property>
- <name>hbase.regionserver.keytab.file</name>
- <value></value>
- <description>Full path to the kerberos keytab file to use for logging in
- the configured HRegionServer server principal.
- </description>
- </property>
- <property>
- <name>hbase.regionserver.kerberos.principal</name>
- <value></value>
- <description>Ex. "hbase/_HOST@EXAMPLE.COM". The kerberos principal name
- that should be used to run the HRegionServer process. The principal name
- should be in the form: user/hostname@DOMAIN. If "_HOST" is used as the
- hostname portion, it will be replaced with the actual hostname of the
- running instance. An entry for this principal must exist in the file
- specified in hbase.regionserver.keytab.file
- </description>
- </property>
-
- <!-- Additional configuration specific to HBase security -->
- <property>
- <name>hbase.superuser</name>
- <value>hbase</value>
- <description>List of users or groups (comma-separated), who are allowed
- full privileges, regardless of stored ACLs, across the cluster.
- Only used when HBase security is enabled.
- </description>
- </property>
-
- <property>
- <name>hbase.security.authentication</name>
- <value>simple</value>
- <description> Controls whether or not secure authentication is enabled for HBase. Possible values are 'simple'
- (no authentication), and 'kerberos'.
- </description>
- </property>
-
- <property>
- <name>hbase.security.authorization</name>
- <value>false</value>
- <description>Enables HBase authorization. Set the value of this property to false to disable HBase authorization.
- </description>
- </property>
-
- <property>
- <name>hbase.coprocessor.region.classes</name>
- <value></value>
- <description>A comma-separated list of Coprocessors that are loaded by
- default on all tables. For any override coprocessor method, these classes
- will be called in order. After implementing your own Coprocessor, just put
- it in HBase's classpath and add the fully qualified class name here.
- A coprocessor can also be loaded on demand by setting HTableDescriptor.
- </description>
- </property>
-
- <property>
- <name>hbase.coprocessor.master.classes</name>
- <value></value>
- <description>A comma-separated list of
- org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
- loaded by default on the active HMaster process. For any implemented
- coprocessor methods, the listed classes will be called in order. After
- implementing your own MasterObserver, just put it in HBase's classpath
- and add the fully qualified class name here.
- </description>
- </property>
-
- <property>
- <name>hbase.zookeeper.property.clientPort</name>
- <value>2181</value>
- <description>Property from ZooKeeper's config zoo.cfg.
- The port at which the clients will connect.
- </description>
- </property>
-
- <!--
- The following three properties are used together to create the list of
- host:peer_port:leader_port quorum servers for ZooKeeper.
- -->
- <property>
- <name>hbase.zookeeper.quorum</name>
- <value>localhost</value>
- <description>Comma separated list of servers in the ZooKeeper Quorum.
- For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
- By default this is set to localhost for local and pseudo-distributed modes
- of operation. For a fully-distributed setup, this should be set to a full
- list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
- this is the list of servers which we will start/stop ZooKeeper on.
- </description>
- </property>
- <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
-
- <property>
- <name>hbase.zookeeper.useMulti</name>
- <value>true</value>
- <description>Instructs HBase to make use of ZooKeeper's multi-update functionality.
- This allows certain ZooKeeper operations to complete more quickly and prevents some issues
- with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).ยท
- IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+
- and will not be downgraded. ZooKeeper versions before 3.4 do not support multi-update and will
- not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
- </description>
- </property>
- <property>
- <name>zookeeper.znode.parent</name>
- <value>/hbase-unsecure</value>
- <description>Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper
- files that are configured with a relative path will go under this node.
- By default, all of HBase's ZooKeeper file path are configured with a
- relative path, so they will all go under this directory unless changed.
- </description>
- </property>
-
- <property>
- <name>hbase.defaults.for.version.skip</name>
- <value>true</value>
- <description>Disables version verification.</description>
- </property>
-
- <property>
- <name>dfs.domain.socket.path</name>
- <value>/var/lib/hadoop-hdfs/dn_socket</value>
- <description>Path to domain socket.</description>
- </property>
-
-</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/metainfo.xml
deleted file mode 100644
index c16b160..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/metainfo.xml
+++ /dev/null
@@ -1,94 +0,0 @@
-<?xml version="1.0"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<metainfo>
- <schemaVersion>2.0</schemaVersion>
- <services>
- <service>
- <name>HBASE</name>
- <comment>Non-relational distributed database and centralized service for configuration management &
- synchronization
- </comment>
- <version>0.96.0.2.1.1</version>
- <components>
- <component>
- <name>HBASE_MASTER</name>
- <category>MASTER</category>
- <commandScript>
- <script>scripts/hbase_master.py</script>
- <scriptType>PYTHON</scriptType>
- <timeout>600</timeout>
- </commandScript>
- <customCommands>
- <customCommand>
- <name>DECOMMISSION</name>
- <commandScript>
- <script>scripts/hbase_master.py</script>
- <scriptType>PYTHON</scriptType>
- <timeout>600</timeout>
- </commandScript>
- </customCommand>
- </customCommands>
- </component>
-
- <component>
- <name>HBASE_REGIONSERVER</name>
- <category>SLAVE</category>
- <commandScript>
- <script>scripts/hbase_regionserver.py</script>
- <scriptType>PYTHON</scriptType>
- </commandScript>
- </component>
-
- <component>
- <name>HBASE_CLIENT</name>
- <category>CLIENT</category>
- <commandScript>
- <script>scripts/hbase_client.py</script>
- <scriptType>PYTHON</scriptType>
- </commandScript>
- </component>
- </components>
-
- <osSpecifics>
- <osSpecific>
- <osType>any</osType>
- <packages>
- <package>
- <type>rpm</type>
- <name>hbase</name>
- </package>
- </packages>
- </osSpecific>
- </osSpecifics>
-
- <commandScript>
- <script>scripts/service_check.py</script>
- <scriptType>PYTHON</scriptType>
- <timeout>300</timeout>
- </commandScript>
-
- <configuration-dependencies>
- <config-type>global</config-type>
- <config-type>hbase-policy</config-type>
- <config-type>hbase-site</config-type>
- <config-type>hbase-log4j</config-type>
- </configuration-dependencies>
-
- </service>
- </services>
-</metainfo>